extract colorspace code from picture.c into picture_csp.c

had to refactor few functions here and there.

Change-Id: I86fde6fec7c2fc7eb48f0ecf327dbbd2bd40b9d4
This commit is contained in:
Pascal Massimino 2014-07-14 02:04:14 -07:00
parent fbadb48026
commit 736f2a175e
9 changed files with 548 additions and 523 deletions

View File

@ -64,9 +64,10 @@ LOCAL_SRC_FILES := \
src/enc/histogram.c \ src/enc/histogram.c \
src/enc/iterator.c \ src/enc/iterator.c \
src/enc/picture.c \ src/enc/picture.c \
src/enc/picture_csp.c \
src/enc/picture_psnr.c \ src/enc/picture_psnr.c \
src/enc/picture_tools.c \
src/enc/picture_rescale.c \ src/enc/picture_rescale.c \
src/enc/picture_tools.c \
src/enc/quant.c \ src/enc/quant.c \
src/enc/syntax.c \ src/enc/syntax.c \
src/enc/token.c \ src/enc/token.c \

View File

@ -218,9 +218,10 @@ ENC_OBJS = \
$(DIROBJ)\enc\histogram.obj \ $(DIROBJ)\enc\histogram.obj \
$(DIROBJ)\enc\iterator.obj \ $(DIROBJ)\enc\iterator.obj \
$(DIROBJ)\enc\picture.obj \ $(DIROBJ)\enc\picture.obj \
$(DIROBJ)\enc\picture_csp.obj \
$(DIROBJ)\enc\picture_psnr.obj \ $(DIROBJ)\enc\picture_psnr.obj \
$(DIROBJ)\enc\picture_tools.obj \
$(DIROBJ)\enc\picture_rescale.obj \ $(DIROBJ)\enc\picture_rescale.obj \
$(DIROBJ)\enc\picture_tools.obj \
$(DIROBJ)\enc\quant.obj \ $(DIROBJ)\enc\quant.obj \
$(DIROBJ)\enc\syntax.obj \ $(DIROBJ)\enc\syntax.obj \
$(DIROBJ)\enc\token.obj \ $(DIROBJ)\enc\token.obj \

View File

@ -143,6 +143,7 @@ ENC_OBJS = \
src/enc/histogram.o \ src/enc/histogram.o \
src/enc/iterator.o \ src/enc/iterator.o \
src/enc/picture.o \ src/enc/picture.o \
src/enc/picture_csp.o \
src/enc/picture_psnr.o \ src/enc/picture_psnr.o \
src/enc/picture_rescale.o \ src/enc/picture_rescale.o \
src/enc/picture_tools.o \ src/enc/picture_tools.o \

View File

@ -12,6 +12,7 @@ libwebpencode_la_SOURCES += frame.c
libwebpencode_la_SOURCES += histogram.c libwebpencode_la_SOURCES += histogram.c
libwebpencode_la_SOURCES += iterator.c libwebpencode_la_SOURCES += iterator.c
libwebpencode_la_SOURCES += picture.c libwebpencode_la_SOURCES += picture.c
libwebpencode_la_SOURCES += picture_csp.c
libwebpencode_la_SOURCES += picture_psnr.c libwebpencode_la_SOURCES += picture_psnr.c
libwebpencode_la_SOURCES += picture_rescale.c libwebpencode_la_SOURCES += picture_rescale.c
libwebpencode_la_SOURCES += picture_tools.c libwebpencode_la_SOURCES += picture_tools.c

View File

@ -7,60 +7,80 @@
// be found in the AUTHORS file in the root of the source tree. // be found in the AUTHORS file in the root of the source tree.
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// //
// WebPPicture utils: colorspace conversion, crop, ... // WebPPicture class basis
// //
// Author: Skal (pascal.massimino@gmail.com) // Author: Skal (pascal.massimino@gmail.com)
#include <assert.h> #include <assert.h>
#include <stdlib.h> #include <stdlib.h>
#include <math.h>
#include "./vp8enci.h" #include "./vp8enci.h"
#include "../utils/random.h"
#include "../utils/utils.h" #include "../utils/utils.h"
#include "../dsp/yuv.h"
// Uncomment to disable gamma-compression during RGB->U/V averaging
#define USE_GAMMA_COMPRESSION
#define HALVE(x) (((x) + 1) >> 1)
static const union {
uint32_t argb;
uint8_t bytes[4];
} test_endian = { 0xff000000u };
#define ALPHA_IS_LAST (test_endian.bytes[3] == 0xff)
static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
return (0xff000000u | (r << 16) | (g << 8) | b);
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// WebPPicture // WebPPicture
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
int WebPPictureAlloc(WebPPicture* picture) { static void WebPPictureResetBufferARGB(WebPPicture* const picture) {
if (picture != NULL) { picture->memory_argb_ = NULL;
picture->argb = NULL;
picture->argb_stride = 0;
}
static void WebPPictureResetBufferYUVA(WebPPicture* const picture) {
picture->memory_ = NULL;
picture->y = picture->u = picture->v = picture->a = NULL;
picture->y_stride = picture->uv_stride = 0;
picture->a_stride = 0;
}
void WebPPictureResetBuffers(WebPPicture* const picture) {
WebPPictureResetBufferARGB(picture);
WebPPictureResetBufferYUVA(picture);
}
int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) {
void* memory;
const uint64_t argb_size = (uint64_t)width * height;
assert(picture != NULL);
WebPSafeFree(picture->memory_argb_);
WebPPictureResetBufferARGB(picture);
if (width <= 0 || height <= 0) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
}
// allocate a new buffer.
memory = WebPSafeMalloc(argb_size, sizeof(*picture->argb));
if (memory == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
// TODO(skal): align plane to cache line?
picture->memory_argb_ = memory;
picture->argb = (uint32_t*)memory;
picture->argb_stride = width;
return 1;
}
int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK; const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK;
const int has_alpha = picture->colorspace & WEBP_CSP_ALPHA_BIT; const int has_alpha = picture->colorspace & WEBP_CSP_ALPHA_BIT;
const int width = picture->width;
const int height = picture->height;
if (!picture->use_argb) {
const int y_stride = width; const int y_stride = width;
const int uv_width = HALVE(width); const int uv_width = (width + 1) >> 1;
const int uv_height = HALVE(height); const int uv_height = (height + 1) >> 1;
const int uv_stride = uv_width; const int uv_stride = uv_width;
int a_width, a_stride; int a_width, a_stride;
uint64_t y_size, uv_size, a_size, total_size; uint64_t y_size, uv_size, a_size, total_size;
uint8_t* mem; uint8_t* mem;
// U/V assert(picture != NULL);
switch (uv_csp) {
case WEBP_YUV420: WebPSafeFree(picture->memory_);
break; WebPPictureResetBufferYUVA(picture);
default:
return 0; if (uv_csp != WEBP_YUV420) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
} }
// alpha // alpha
@ -75,12 +95,13 @@ int WebPPictureAlloc(WebPPicture* picture) {
// Security and validation checks // Security and validation checks
if (width <= 0 || height <= 0 || // luma/alpha param error if (width <= 0 || height <= 0 || // luma/alpha param error
uv_width < 0 || uv_height < 0) { // u/v param error uv_width < 0 || uv_height < 0) { // u/v param error
return 0; return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
} }
// Clear previous buffer and allocate a new one. // allocate a new buffer.
WebPPictureFree(picture); // erase previous buffer
mem = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*mem)); mem = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*mem));
if (mem == NULL) return 0; if (mem == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
// From now on, we're in the clear, we can no longer fail... // From now on, we're in the clear, we can no longer fail...
picture->memory_ = (void*)mem; picture->memory_ = (void*)mem;
@ -102,75 +123,30 @@ int WebPPictureAlloc(WebPPicture* picture) {
mem += a_size; mem += a_size;
} }
(void)mem; // makes the static analyzer happy (void)mem; // makes the static analyzer happy
} else { return 1;
void* memory; }
const uint64_t argb_size = (uint64_t)width * height;
if (width <= 0 || height <= 0) { int WebPPictureAlloc(WebPPicture* picture) {
return 0; if (picture != NULL) {
} const int width = picture->width;
// Clear previous buffer and allocate a new one. const int height = picture->height;
WebPPictureFree(picture); // erase previous buffer WebPPictureFree(picture); // erase previous buffer
memory = WebPSafeMalloc(argb_size, sizeof(*picture->argb));
if (memory == NULL) return 0;
// TODO(skal): align plane to cache line? if (!picture->use_argb) {
picture->memory_argb_ = memory; return WebPPictureAllocYUVA(picture, width, height);
picture->argb = (uint32_t*)memory; } else {
picture->argb_stride = width; return WebPPictureAllocARGB(picture, width, height);
} }
} }
return 1; return 1;
} }
// Remove reference to the ARGB buffer (doesn't free anything).
static void PictureResetARGB(WebPPicture* const picture) {
picture->memory_argb_ = NULL;
picture->argb = NULL;
picture->argb_stride = 0;
}
// Remove reference to the YUVA buffer (doesn't free anything).
static void PictureResetYUVA(WebPPicture* const picture) {
picture->memory_ = NULL;
picture->y = picture->u = picture->v = picture->a = NULL;
picture->y_stride = picture->uv_stride = 0;
picture->a_stride = 0;
}
// Grab the 'specs' (writer, *opaque, width, height...) from 'src' and copy them
// into 'dst'. Mark 'dst' as not owning any memory.
void WebPPictureGrabSpecs(const WebPPicture* const src,
WebPPicture* const dst) {
assert(src != NULL && dst != NULL);
*dst = *src;
PictureResetYUVA(dst);
PictureResetARGB(dst);
}
// Allocate a new argb buffer, discarding any existing one and preserving
// the other YUV(A) buffer.
static int PictureAllocARGB(WebPPicture* const picture) {
WebPPicture tmp;
WebPSafeFree(picture->memory_argb_);
PictureResetARGB(picture);
picture->use_argb = 1;
WebPPictureGrabSpecs(picture, &tmp);
if (!WebPPictureAlloc(&tmp)) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
picture->memory_argb_ = tmp.memory_argb_;
picture->argb = tmp.argb;
picture->argb_stride = tmp.argb_stride;
return 1;
}
// Release memory owned by 'picture' (both YUV and ARGB buffers).
void WebPPictureFree(WebPPicture* picture) { void WebPPictureFree(WebPPicture* picture) {
if (picture != NULL) { if (picture != NULL) {
WebPSafeFree(picture->memory_); WebPSafeFree(picture->memory_);
WebPSafeFree(picture->memory_argb_); WebPSafeFree(picture->memory_argb_);
PictureResetYUVA(picture); WebPPictureResetBuffers(picture);
PictureResetARGB(picture);
} }
} }
@ -224,387 +200,6 @@ void WebPMemoryWriterClear(WebPMemoryWriter* writer) {
} }
} }
//------------------------------------------------------------------------------
// Detection of non-trivial transparency
// Returns true if alpha[] has non-0xff values.
static int CheckNonOpaque(const uint8_t* alpha, int width, int height,
int x_step, int y_step) {
if (alpha == NULL) return 0;
while (height-- > 0) {
int x;
for (x = 0; x < width * x_step; x += x_step) {
if (alpha[x] != 0xff) return 1; // TODO(skal): check 4/8 bytes at a time.
}
alpha += y_step;
}
return 0;
}
// Checking for the presence of non-opaque alpha.
int WebPPictureHasTransparency(const WebPPicture* picture) {
if (picture == NULL) return 0;
if (!picture->use_argb) {
return CheckNonOpaque(picture->a, picture->width, picture->height,
1, picture->a_stride);
} else {
int x, y;
const uint32_t* argb = picture->argb;
if (argb == NULL) return 0;
for (y = 0; y < picture->height; ++y) {
for (x = 0; x < picture->width; ++x) {
if (argb[x] < 0xff000000u) return 1; // test any alpha values != 0xff
}
argb += picture->argb_stride;
}
}
return 0;
}
//------------------------------------------------------------------------------
// RGB -> YUV conversion
static int RGBToY(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX));
}
static int RGBToU(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
}
static int RGBToV(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
}
//------------------------------------------------------------------------------
#if defined(USE_GAMMA_COMPRESSION)
// gamma-compensates loss of resolution during chroma subsampling
#define kGamma 0.80
#define kGammaFix 12 // fixed-point precision for linear values
#define kGammaScale ((1 << kGammaFix) - 1)
#define kGammaTabFix 7 // fixed-point fractional bits precision
#define kGammaTabScale (1 << kGammaTabFix)
#define kGammaTabRounder (kGammaTabScale >> 1)
#define kGammaTabSize (1 << (kGammaFix - kGammaTabFix))
static int kLinearToGammaTab[kGammaTabSize + 1];
static uint16_t kGammaToLinearTab[256];
static int kGammaTablesOk = 0;
static void InitGammaTables(void) {
if (!kGammaTablesOk) {
int v;
const double scale = 1. / kGammaScale;
for (v = 0; v <= 255; ++v) {
kGammaToLinearTab[v] =
(uint16_t)(pow(v / 255., kGamma) * kGammaScale + .5);
}
for (v = 0; v <= kGammaTabSize; ++v) {
const double x = scale * (v << kGammaTabFix);
kLinearToGammaTab[v] = (int)(pow(x, 1. / kGamma) * 255. + .5);
}
kGammaTablesOk = 1;
}
}
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
return kGammaToLinearTab[v];
}
// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
// U/V value, suitable for RGBToU/V calls.
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
const int v = base_value << shift; // final uplifted value
const int tab_pos = v >> (kGammaTabFix + 2); // integer part
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
const int v0 = kLinearToGammaTab[tab_pos];
const int v1 = kLinearToGammaTab[tab_pos + 1];
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
return (y + kGammaTabRounder) >> kGammaTabFix; // descale
}
#else
static void InitGammaTables(void) {}
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; }
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
return (int)(base_value << shift);
}
#endif // USE_GAMMA_COMPRESSION
//------------------------------------------------------------------------------
#define SUM4(ptr) LinearToGamma( \
GammaToLinear((ptr)[0]) + \
GammaToLinear((ptr)[step]) + \
GammaToLinear((ptr)[rgb_stride]) + \
GammaToLinear((ptr)[rgb_stride + step]), 0) \
#define SUM2H(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[step]), 1)
#define SUM2V(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1)
#define SUM1(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]), 2)
#define RGB_TO_UV(x, y, SUM) { \
const int src = (2 * (step * (x) + (y) * rgb_stride)); \
const int dst = (x) + (y) * picture->uv_stride; \
const int r = SUM(r_ptr + src); \
const int g = SUM(g_ptr + src); \
const int b = SUM(b_ptr + src); \
picture->u[dst] = RGBToU(r, g, b, &rg); \
picture->v[dst] = RGBToV(r, g, b, &rg); \
}
static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
const uint8_t* const g_ptr,
const uint8_t* const b_ptr,
const uint8_t* const a_ptr,
int step, // bytes per pixel
int rgb_stride, // bytes per scanline
float dithering,
WebPPicture* const picture) {
const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK;
int x, y;
const int width = picture->width;
const int height = picture->height;
const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
VP8Random rg;
picture->colorspace = uv_csp;
picture->use_argb = 0;
if (has_alpha) {
picture->colorspace |= WEBP_CSP_ALPHA_BIT;
}
if (!WebPPictureAlloc(picture)) return 0;
VP8InitRandom(&rg, dithering);
InitGammaTables();
// Import luma plane
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
picture->y[x + y * picture->y_stride] =
RGBToY(r_ptr[offset], g_ptr[offset], b_ptr[offset], &rg);
}
}
// Downsample U/V plane
for (y = 0; y < (height >> 1); ++y) {
for (x = 0; x < (width >> 1); ++x) {
RGB_TO_UV(x, y, SUM4);
}
if (width & 1) {
RGB_TO_UV(x, y, SUM2V);
}
}
if (height & 1) {
for (x = 0; x < (width >> 1); ++x) {
RGB_TO_UV(x, y, SUM2H);
}
if (width & 1) {
RGB_TO_UV(x, y, SUM1);
}
}
if (has_alpha) {
assert(step >= 4);
assert(picture->a != NULL);
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
picture->a[x + y * picture->a_stride] =
a_ptr[step * x + y * rgb_stride];
}
}
}
return 1;
}
static int Import(WebPPicture* const picture,
const uint8_t* const rgb, int rgb_stride,
int step, int swap_rb, int import_alpha) {
const uint8_t* const r_ptr = rgb + (swap_rb ? 2 : 0);
const uint8_t* const g_ptr = rgb + 1;
const uint8_t* const b_ptr = rgb + (swap_rb ? 0 : 2);
const uint8_t* const a_ptr = import_alpha ? rgb + 3 : NULL;
const int width = picture->width;
const int height = picture->height;
if (!picture->use_argb) {
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
0.f /* no dithering */, picture);
}
if (import_alpha) {
picture->colorspace |= WEBP_CSP_ALPHA_BIT;
} else {
picture->colorspace &= ~WEBP_CSP_ALPHA_BIT;
}
if (!WebPPictureAlloc(picture)) return 0;
if (!import_alpha) {
int x, y;
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
const uint32_t argb =
MakeARGB32(r_ptr[offset], g_ptr[offset], b_ptr[offset]);
picture->argb[x + y * picture->argb_stride] = argb;
}
}
} else {
int x, y;
assert(step >= 4);
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
const uint32_t argb = ((uint32_t)a_ptr[offset] << 24) |
(r_ptr[offset] << 16) |
(g_ptr[offset] << 8) |
(b_ptr[offset]);
picture->argb[x + y * picture->argb_stride] = argb;
}
}
}
return 1;
}
#undef SUM4
#undef SUM2V
#undef SUM2H
#undef SUM1
#undef RGB_TO_UV
//------------------------------------------------------------------------------
int WebPPictureImportRGB(WebPPicture* picture,
const uint8_t* rgb, int rgb_stride) {
return Import(picture, rgb, rgb_stride, 3, 0, 0);
}
int WebPPictureImportBGR(WebPPicture* picture,
const uint8_t* rgb, int rgb_stride) {
return Import(picture, rgb, rgb_stride, 3, 1, 0);
}
int WebPPictureImportRGBA(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 0, 1);
}
int WebPPictureImportBGRA(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 1, 1);
}
int WebPPictureImportRGBX(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 0, 0);
}
int WebPPictureImportBGRX(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 1, 0);
}
//------------------------------------------------------------------------------
// Automatic YUV <-> ARGB conversions.
int WebPPictureYUVAToARGB(WebPPicture* picture) {
if (picture == NULL) return 0;
if (picture->y == NULL || picture->u == NULL || picture->v == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
}
if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
}
if ((picture->colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
// Allocate a new argb buffer (discarding the previous one).
if (!PictureAllocARGB(picture)) return 0;
// Convert
{
int y;
const int width = picture->width;
const int height = picture->height;
const int argb_stride = 4 * picture->argb_stride;
uint8_t* dst = (uint8_t*)picture->argb;
const uint8_t *cur_u = picture->u, *cur_v = picture->v, *cur_y = picture->y;
WebPUpsampleLinePairFunc upsample = WebPGetLinePairConverter(ALPHA_IS_LAST);
// First row, with replicated top samples.
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
cur_y += picture->y_stride;
dst += argb_stride;
// Center rows.
for (y = 1; y + 1 < height; y += 2) {
const uint8_t* const top_u = cur_u;
const uint8_t* const top_v = cur_v;
cur_u += picture->uv_stride;
cur_v += picture->uv_stride;
upsample(cur_y, cur_y + picture->y_stride, top_u, top_v, cur_u, cur_v,
dst, dst + argb_stride, width);
cur_y += 2 * picture->y_stride;
dst += 2 * argb_stride;
}
// Last row (if needed), with replicated bottom samples.
if (height > 1 && !(height & 1)) {
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
}
// Insert alpha values if needed, in replacement for the default 0xff ones.
if (picture->colorspace & WEBP_CSP_ALPHA_BIT) {
for (y = 0; y < height; ++y) {
uint32_t* const argb_dst = picture->argb + y * picture->argb_stride;
const uint8_t* const src = picture->a + y * picture->a_stride;
int x;
for (x = 0; x < width; ++x) {
argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24);
}
}
}
}
return 1;
}
int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
float dithering) {
if (picture == NULL) return 0;
if (picture->argb == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
} else {
const uint8_t* const argb = (const uint8_t*)picture->argb;
const uint8_t* const r = ALPHA_IS_LAST ? argb + 2 : argb + 1;
const uint8_t* const g = ALPHA_IS_LAST ? argb + 1 : argb + 2;
const uint8_t* const b = ALPHA_IS_LAST ? argb + 0 : argb + 3;
const uint8_t* const a = ALPHA_IS_LAST ? argb + 3 : argb + 0;
// We work on a tmp copy of 'picture', because ImportYUVAFromRGBA()
// would be calling WebPPictureFree(picture) otherwise.
WebPPicture tmp = *picture;
PictureResetARGB(&tmp); // reset ARGB buffer so that it's not free()'d.
tmp.use_argb = 0;
tmp.colorspace = colorspace & WEBP_CSP_UV_MASK;
if (!ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, dithering,
&tmp)) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
// Copy back the YUV specs into 'picture'.
tmp.argb = picture->argb;
tmp.argb_stride = picture->argb_stride;
tmp.memory_argb_ = picture->memory_argb_;
*picture = tmp;
}
return 1;
}
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
return WebPPictureARGBToYUVADithered(picture, colorspace, 0.f);
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Simplest high-level calls: // Simplest high-level calls:

408
src/enc/picture_csp.c Normal file
View File

@ -0,0 +1,408 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebPPicture utils for colorspace conversion
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include <math.h>
#include "./vp8enci.h"
#include "../utils/random.h"
#include "../dsp/yuv.h"
// Uncomment to disable gamma-compression during RGB->U/V averaging
#define USE_GAMMA_COMPRESSION
static const union {
uint32_t argb;
uint8_t bytes[4];
} test_endian = { 0xff000000u };
#define ALPHA_IS_LAST (test_endian.bytes[3] == 0xff)
static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
return (0xff000000u | (r << 16) | (g << 8) | b);
}
//------------------------------------------------------------------------------
// Detection of non-trivial transparency
// Returns true if alpha[] has non-0xff values.
static int CheckNonOpaque(const uint8_t* alpha, int width, int height,
int x_step, int y_step) {
if (alpha == NULL) return 0;
while (height-- > 0) {
int x;
for (x = 0; x < width * x_step; x += x_step) {
if (alpha[x] != 0xff) return 1; // TODO(skal): check 4/8 bytes at a time.
}
alpha += y_step;
}
return 0;
}
// Checking for the presence of non-opaque alpha.
int WebPPictureHasTransparency(const WebPPicture* picture) {
if (picture == NULL) return 0;
if (!picture->use_argb) {
return CheckNonOpaque(picture->a, picture->width, picture->height,
1, picture->a_stride);
} else {
int x, y;
const uint32_t* argb = picture->argb;
if (argb == NULL) return 0;
for (y = 0; y < picture->height; ++y) {
for (x = 0; x < picture->width; ++x) {
if (argb[x] < 0xff000000u) return 1; // test any alpha values != 0xff
}
argb += picture->argb_stride;
}
}
return 0;
}
//------------------------------------------------------------------------------
// RGB -> YUV conversion
static int RGBToY(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX));
}
static int RGBToU(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
}
static int RGBToV(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
}
//------------------------------------------------------------------------------
#if defined(USE_GAMMA_COMPRESSION)
// gamma-compensates loss of resolution during chroma subsampling
#define kGamma 0.80
#define kGammaFix 12 // fixed-point precision for linear values
#define kGammaScale ((1 << kGammaFix) - 1)
#define kGammaTabFix 7 // fixed-point fractional bits precision
#define kGammaTabScale (1 << kGammaTabFix)
#define kGammaTabRounder (kGammaTabScale >> 1)
#define kGammaTabSize (1 << (kGammaFix - kGammaTabFix))
static int kLinearToGammaTab[kGammaTabSize + 1];
static uint16_t kGammaToLinearTab[256];
static int kGammaTablesOk = 0;
static void InitGammaTables(void) {
if (!kGammaTablesOk) {
int v;
const double scale = 1. / kGammaScale;
for (v = 0; v <= 255; ++v) {
kGammaToLinearTab[v] =
(uint16_t)(pow(v / 255., kGamma) * kGammaScale + .5);
}
for (v = 0; v <= kGammaTabSize; ++v) {
const double x = scale * (v << kGammaTabFix);
kLinearToGammaTab[v] = (int)(pow(x, 1. / kGamma) * 255. + .5);
}
kGammaTablesOk = 1;
}
}
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
return kGammaToLinearTab[v];
}
// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
// U/V value, suitable for RGBToU/V calls.
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
const int v = base_value << shift; // final uplifted value
const int tab_pos = v >> (kGammaTabFix + 2); // integer part
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
const int v0 = kLinearToGammaTab[tab_pos];
const int v1 = kLinearToGammaTab[tab_pos + 1];
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
return (y + kGammaTabRounder) >> kGammaTabFix; // descale
}
#else
static void InitGammaTables(void) {}
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; }
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
return (int)(base_value << shift);
}
#endif // USE_GAMMA_COMPRESSION
//------------------------------------------------------------------------------
#define SUM4(ptr) LinearToGamma( \
GammaToLinear((ptr)[0]) + \
GammaToLinear((ptr)[step]) + \
GammaToLinear((ptr)[rgb_stride]) + \
GammaToLinear((ptr)[rgb_stride + step]), 0) \
#define SUM2H(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[step]), 1)
#define SUM2V(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1)
#define SUM1(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]), 2)
#define RGB_TO_UV(x, y, SUM) { \
const int src = (2 * (step * (x) + (y) * rgb_stride)); \
const int dst = (x) + (y) * picture->uv_stride; \
const int r = SUM(r_ptr + src); \
const int g = SUM(g_ptr + src); \
const int b = SUM(b_ptr + src); \
picture->u[dst] = RGBToU(r, g, b, &rg); \
picture->v[dst] = RGBToV(r, g, b, &rg); \
}
static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
const uint8_t* const g_ptr,
const uint8_t* const b_ptr,
const uint8_t* const a_ptr,
int step, // bytes per pixel
int rgb_stride, // bytes per scanline
float dithering,
WebPPicture* const picture) {
const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK;
int x, y;
const int width = picture->width;
const int height = picture->height;
const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
VP8Random rg;
picture->colorspace = uv_csp;
picture->use_argb = 0;
if (has_alpha) {
picture->colorspace |= WEBP_CSP_ALPHA_BIT;
}
if (!WebPPictureAllocYUVA(picture, width, height)) return 0;
VP8InitRandom(&rg, dithering);
InitGammaTables();
// Import luma plane
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
picture->y[x + y * picture->y_stride] =
RGBToY(r_ptr[offset], g_ptr[offset], b_ptr[offset], &rg);
}
}
// Downsample U/V plane
for (y = 0; y < (height >> 1); ++y) {
for (x = 0; x < (width >> 1); ++x) {
RGB_TO_UV(x, y, SUM4);
}
if (width & 1) {
RGB_TO_UV(x, y, SUM2V);
}
}
if (height & 1) {
for (x = 0; x < (width >> 1); ++x) {
RGB_TO_UV(x, y, SUM2H);
}
if (width & 1) {
RGB_TO_UV(x, y, SUM1);
}
}
if (has_alpha) {
assert(step >= 4);
assert(picture->a != NULL);
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
picture->a[x + y * picture->a_stride] =
a_ptr[step * x + y * rgb_stride];
}
}
}
return 1;
}
static int Import(WebPPicture* const picture,
const uint8_t* const rgb, int rgb_stride,
int step, int swap_rb, int import_alpha) {
const uint8_t* const r_ptr = rgb + (swap_rb ? 2 : 0);
const uint8_t* const g_ptr = rgb + 1;
const uint8_t* const b_ptr = rgb + (swap_rb ? 0 : 2);
const uint8_t* const a_ptr = import_alpha ? rgb + 3 : NULL;
const int width = picture->width;
const int height = picture->height;
if (!picture->use_argb) {
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
0.f /* no dithering */, picture);
}
if (import_alpha) {
picture->colorspace |= WEBP_CSP_ALPHA_BIT;
} else {
picture->colorspace &= ~WEBP_CSP_ALPHA_BIT;
}
if (!WebPPictureAlloc(picture)) return 0;
if (!import_alpha) {
int x, y;
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
const uint32_t argb =
MakeARGB32(r_ptr[offset], g_ptr[offset], b_ptr[offset]);
picture->argb[x + y * picture->argb_stride] = argb;
}
}
} else {
int x, y;
assert(step >= 4);
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
const uint32_t argb = ((uint32_t)a_ptr[offset] << 24) |
(r_ptr[offset] << 16) |
(g_ptr[offset] << 8) |
(b_ptr[offset]);
picture->argb[x + y * picture->argb_stride] = argb;
}
}
}
return 1;
}
#undef SUM4
#undef SUM2V
#undef SUM2H
#undef SUM1
#undef RGB_TO_UV
//------------------------------------------------------------------------------
int WebPPictureImportRGB(WebPPicture* picture,
const uint8_t* rgb, int rgb_stride) {
return Import(picture, rgb, rgb_stride, 3, 0, 0);
}
int WebPPictureImportBGR(WebPPicture* picture,
const uint8_t* rgb, int rgb_stride) {
return Import(picture, rgb, rgb_stride, 3, 1, 0);
}
int WebPPictureImportRGBA(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 0, 1);
}
int WebPPictureImportBGRA(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 1, 1);
}
int WebPPictureImportRGBX(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 0, 0);
}
int WebPPictureImportBGRX(WebPPicture* picture,
const uint8_t* rgba, int rgba_stride) {
return Import(picture, rgba, rgba_stride, 4, 1, 0);
}
//------------------------------------------------------------------------------
// Automatic YUV <-> ARGB conversions.
int WebPPictureYUVAToARGB(WebPPicture* picture) {
if (picture == NULL) return 0;
if (picture->y == NULL || picture->u == NULL || picture->v == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
}
if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
}
if ((picture->colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
// Allocate a new argb buffer (discarding the previous one).
if (!WebPPictureAllocARGB(picture, picture->width, picture->height)) return 0;
picture->use_argb = 1;
// Convert
{
int y;
const int width = picture->width;
const int height = picture->height;
const int argb_stride = 4 * picture->argb_stride;
uint8_t* dst = (uint8_t*)picture->argb;
const uint8_t *cur_u = picture->u, *cur_v = picture->v, *cur_y = picture->y;
WebPUpsampleLinePairFunc upsample = WebPGetLinePairConverter(ALPHA_IS_LAST);
// First row, with replicated top samples.
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
cur_y += picture->y_stride;
dst += argb_stride;
// Center rows.
for (y = 1; y + 1 < height; y += 2) {
const uint8_t* const top_u = cur_u;
const uint8_t* const top_v = cur_v;
cur_u += picture->uv_stride;
cur_v += picture->uv_stride;
upsample(cur_y, cur_y + picture->y_stride, top_u, top_v, cur_u, cur_v,
dst, dst + argb_stride, width);
cur_y += 2 * picture->y_stride;
dst += 2 * argb_stride;
}
// Last row (if needed), with replicated bottom samples.
if (height > 1 && !(height & 1)) {
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
}
// Insert alpha values if needed, in replacement for the default 0xff ones.
if (picture->colorspace & WEBP_CSP_ALPHA_BIT) {
for (y = 0; y < height; ++y) {
uint32_t* const argb_dst = picture->argb + y * picture->argb_stride;
const uint8_t* const src = picture->a + y * picture->a_stride;
int x;
for (x = 0; x < width; ++x) {
argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24);
}
}
}
}
return 1;
}
int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
float dithering) {
if (picture == NULL) return 0;
if (picture->argb == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
} else {
const uint8_t* const argb = (const uint8_t*)picture->argb;
const uint8_t* const r = ALPHA_IS_LAST ? argb + 2 : argb + 1;
const uint8_t* const g = ALPHA_IS_LAST ? argb + 1 : argb + 2;
const uint8_t* const b = ALPHA_IS_LAST ? argb + 0 : argb + 3;
const uint8_t* const a = ALPHA_IS_LAST ? argb + 3 : argb + 0;
picture->colorspace = colorspace;
if (!ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, dithering,
picture)) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
}
return 1;
}
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
return WebPPictureARGBToYUVADithered(picture, colorspace, 0.f);
}
//------------------------------------------------------------------------------

View File

@ -20,6 +20,15 @@
#define HALVE(x) (((x) + 1) >> 1) #define HALVE(x) (((x) + 1) >> 1)
// Grab the 'specs' (writer, *opaque, width, height...) from 'src' and copy them
// into 'dst'. Mark 'dst' as not owning any memory.
static void PictureGrabSpecs(const WebPPicture* const src,
WebPPicture* const dst) {
assert(src != NULL && dst != NULL);
*dst = *src;
WebPPictureResetBuffers(dst);
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Picture copying // Picture copying
@ -57,7 +66,7 @@ int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
if (src == NULL || dst == NULL) return 0; if (src == NULL || dst == NULL) return 0;
if (src == dst) return 1; if (src == dst) return 1;
WebPPictureGrabSpecs(src, dst); PictureGrabSpecs(src, dst);
if (!WebPPictureAlloc(dst)) return 0; if (!WebPPictureAlloc(dst)) return 0;
if (!src->use_argb) { if (!src->use_argb) {
@ -96,7 +105,7 @@ int WebPPictureView(const WebPPicture* src,
if (!AdjustAndCheckRectangle(src, &left, &top, width, height)) return 0; if (!AdjustAndCheckRectangle(src, &left, &top, width, height)) return 0;
if (src != dst) { // beware of aliasing! We don't want to leak 'memory_'. if (src != dst) { // beware of aliasing! We don't want to leak 'memory_'.
WebPPictureGrabSpecs(src, dst); PictureGrabSpecs(src, dst);
} }
dst->width = width; dst->width = width;
dst->height = height; dst->height = height;
@ -127,7 +136,7 @@ int WebPPictureCrop(WebPPicture* pic,
if (pic == NULL) return 0; if (pic == NULL) return 0;
if (!AdjustAndCheckRectangle(pic, &left, &top, width, height)) return 0; if (!AdjustAndCheckRectangle(pic, &left, &top, width, height)) return 0;
WebPPictureGrabSpecs(pic, &tmp); PictureGrabSpecs(pic, &tmp);
tmp.width = width; tmp.width = width;
tmp.height = height; tmp.height = height;
if (!WebPPictureAlloc(&tmp)) return 0; if (!WebPPictureAlloc(&tmp)) return 0;
@ -216,7 +225,7 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
// Check if the overall dimensions still make sense. // Check if the overall dimensions still make sense.
if (width <= 0 || height <= 0) return 0; if (width <= 0 || height <= 0) return 0;
WebPPictureGrabSpecs(pic, &tmp); PictureGrabSpecs(pic, &tmp);
tmp.width = width; tmp.width = width;
tmp.height = height; tmp.height = height;
if (!WebPPictureAlloc(&tmp)) return 0; if (!WebPPictureAlloc(&tmp)) return 0;

View File

@ -136,10 +136,10 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
if (pic == NULL) return; if (pic == NULL) return;
if (!pic->use_argb) { if (!pic->use_argb) {
const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop
const int Y0 = VP8RGBToY(red, green, blue, 1 << (YUV_FIX - 1)); const int Y0 = VP8RGBToY(red, green, blue, YUV_HALF);
// VP8RGBToU/V expects the u/v values summed over four pixels // VP8RGBToU/V expects the u/v values summed over four pixels
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 1 << (YUV_FIX + 1)); const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 1 << (YUV_FIX + 1)); const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT; const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
if (!has_alpha || pic->a == NULL) return; // nothing to do if (!has_alpha || pic->a == NULL) return; // nothing to do
for (y = 0; y < pic->height; ++y) { for (y = 0; y < pic->height; ++y) {

View File

@ -553,12 +553,21 @@ void VP8AdjustFilterStrength(VP8EncIterator* const it);
// step of 'delta', given a sharpness parameter 'sharpness'. // step of 'delta', given a sharpness parameter 'sharpness'.
int VP8FilterStrengthFromDelta(int sharpness, int delta); int VP8FilterStrengthFromDelta(int sharpness, int delta);
// misc utils for picture_*.c: // misc utils for picture_*.c:
// Grab the 'specs' (writer, *opaque, width, height...) from 'src' and copy them // Remove reference to the ARGB/YUVA buffer (doesn't free anything).
// into 'dst'. Mark 'dst' as not owning any memory. void WebPPictureResetBuffers(WebPPicture* const picture);
void WebPPictureGrabSpecs(const WebPPicture* const src,
WebPPicture* const dst); // Allocates ARGB buffer of given dimension (previous one is always free'd).
// Preserves the YUV(A) buffer. Returns false in case of error (invalid param,
// out-of-memory).
int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height);
// Allocates YUVA buffer of given dimension (previous one is always free'd).
// Uses picture->csp to determine whether an alpha buffer is needed.
// Preserves the ARGB buffer.
// Returns false in case of error (invalid param, out-of-memory).
int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height);
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------