Simplify the gif2webp tool: move the optimization details to util

Specifically:
- Merge OptimizeAndEncodeFrame with WebPFrameCacheAddFrame: they use the same
  if-else structure.
- Move maintenance of 'prev_canvas' and 'curr_canvas' to util.
- Move ReduceTransparency() and FlattenPixels() calls to SetFrame(): This is in
  preparation for the next patch: which will try try lossless encoding for
  each frame, even when '-lossy' option is given.
- Make most methods static inside util.

No changes to output expected.

Change-Id: I1f65af25246665508cb20f0f6e338f9aaba9367b
This commit is contained in:
Urvang Joshi 2013-10-14 14:39:46 -07:00
parent cb22155201
commit 38efdc2e9c
3 changed files with 399 additions and 421 deletions

View File

@ -36,9 +36,7 @@
//------------------------------------------------------------------------------
// Global variables gathered in a struct.
static int transparent_index = -1; // Index of transparent color in the map.
static int is_first_frame = 1; // Whether we are processing the first frame.
static void SanitizeKeyFrameIntervals(size_t* const kmin_ptr,
size_t* const kmax_ptr) {
@ -81,14 +79,15 @@ static void Remap(const uint8_t* const src, const GifFileType* const gif,
for (i = 0; i < len; ++i) {
const GifColorType c = colors[src[i]];
dst[i] = (src[i] == transparent_index) ? TRANSPARENT_COLOR
dst[i] = (src[i] == transparent_index) ? WEBP_UTIL_TRANSPARENT_COLOR
: c.Blue | (c.Green << 8) | (c.Red << 16) | (0xff << 24);
}
}
// Read the GIF image frame.
static int ReadFrame(GifFileType* const gif, WebPFrameRect* const gif_rect,
WebPPicture* const sub_image, WebPPicture* const curr) {
WebPPicture* const webp_frame) {
WebPPicture sub_image;
const GifImageDesc image_desc = gif->Image;
uint32_t* dst = NULL;
uint8_t* tmp = NULL;
@ -99,13 +98,13 @@ static int ReadFrame(GifFileType* const gif, WebPFrameRect* const gif_rect,
*gif_rect = rect;
// Use a view for the sub-picture:
if (!WebPPictureView(curr, rect.x_offset, rect.y_offset,
rect.width, rect.height, sub_image)) {
if (!WebPPictureView(webp_frame, rect.x_offset, rect.y_offset,
rect.width, rect.height, &sub_image)) {
fprintf(stderr, "Sub-image %dx%d at position %d,%d is invalid!\n",
rect.width, rect.height, rect.x_offset, rect.y_offset);
goto End;
return 0;
}
dst = sub_image->argb;
dst = sub_image.argb;
tmp = (uint8_t*)malloc(rect.width * sizeof(*tmp));
if (tmp == NULL) goto End;
@ -120,107 +119,29 @@ static int ReadFrame(GifFileType* const gif, WebPFrameRect* const gif_rect,
for (y = interlace_offsets[pass]; y < rect.height;
y += interlace_jumps[pass]) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * sub_image->argb_stride, rect.width);
Remap(tmp, gif, dst + y * sub_image.argb_stride, rect.width);
}
}
} else { // Non-interlaced image.
int y;
for (y = 0; y < rect.height; ++y) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * sub_image->argb_stride, rect.width);
Remap(tmp, gif, dst + y * sub_image.argb_stride, rect.width);
}
}
ok = 1;
End:
if (!ok) webp_frame->error_code = sub_image.error_code;
WebPPictureFree(&sub_image);
free(tmp);
return ok;
}
// Optimize the image frame for WebP and encode it.
static int OptimizeAndEncodeFrame(
const WebPConfig* const config, const WebPFrameRect* const gif_rect,
WebPPicture* const curr, WebPPicture* const prev_canvas,
WebPPicture* const curr_canvas, WebPPicture* const sub_image,
WebPMuxFrameInfo* const info, WebPFrameCache* const cache) {
WebPFrameRect rect = *gif_rect;
// Snap to even offsets (and adjust dimensions if needed).
rect.width += (rect.x_offset & 1);
rect.height += (rect.y_offset & 1);
rect.x_offset &= ~1;
rect.y_offset &= ~1;
if (!WebPPictureView(curr, rect.x_offset, rect.y_offset,
rect.width, rect.height, sub_image)) {
return 0;
}
info->x_offset = rect.x_offset;
info->y_offset = rect.y_offset;
if (is_first_frame || WebPUtilIsKeyFrame(curr, &rect, prev_canvas)) {
// Add this as a key frame.
if (!WebPFrameCacheAddFrame(cache, config, NULL, NULL, info, sub_image)) {
return 0;
}
// Update prev_canvas by simply copying from 'curr'.
WebPUtilCopyPixels(curr, prev_canvas);
} else {
if (!config->lossless) {
// For lossy compression, it's better to replace transparent pixels of
// 'curr' with actual RGB values, whenever possible.
WebPUtilReduceTransparency(prev_canvas, &rect, curr);
WebPUtilFlattenSimilarBlocks(prev_canvas, &rect, curr);
}
if (!WebPFrameCacheShouldTryKeyFrame(cache)) {
// Add this as a frame rectangle.
if (!WebPFrameCacheAddFrame(cache, config, info, sub_image, NULL, NULL)) {
return 0;
}
// Update prev_canvas by blending 'curr' into it.
WebPUtilBlendPixels(curr, gif_rect, prev_canvas);
} else {
WebPPicture full_image;
WebPMuxFrameInfo full_image_info;
int ok;
// Convert to a key frame.
WebPUtilCopyPixels(curr, curr_canvas);
WebPUtilConvertToKeyFrame(prev_canvas, &rect, curr_canvas);
if (!WebPPictureView(curr_canvas, rect.x_offset, rect.y_offset,
rect.width, rect.height, &full_image)) {
return 0;
}
full_image_info = *info;
full_image_info.x_offset = rect.x_offset;
full_image_info.y_offset = rect.y_offset;
// Add both variants to cache: frame rectangle and key frame.
ok = WebPFrameCacheAddFrame(cache, config, info, sub_image,
&full_image_info, &full_image);
WebPPictureFree(&full_image);
if (!ok) return 0;
// Update prev_canvas by simply copying from 'curr_canvas'.
WebPUtilCopyPixels(curr_canvas, prev_canvas);
}
}
return 1;
}
static void DisposeFrame(WebPMuxAnimDispose dispose_method,
const WebPFrameRect* const gif_rect,
WebPPicture* const frame, WebPPicture* const canvas) {
if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
WebPUtilClearPic(frame, NULL);
WebPUtilClearPic(canvas, gif_rect);
}
}
static int GetBackgroundColor(const ColorMapObject* const color_map,
GifWord bgcolor_idx, uint32_t* const bgcolor) {
if (transparent_index != -1 && bgcolor_idx == transparent_index) {
*bgcolor = TRANSPARENT_COLOR; // Special case.
*bgcolor = WEBP_UTIL_TRANSPARENT_COLOR; // Special case.
return 1;
} else if (color_map == NULL || color_map->Colors == NULL
|| bgcolor_idx >= color_map->ColorCount) {
@ -299,13 +220,12 @@ int main(int argc, const char *argv[]) {
FILE* out = NULL;
GifFileType* gif = NULL;
WebPConfig config;
WebPPicture curr_frame;
WebPPicture prev_canvas;
WebPPicture curr_canvas;
WebPMuxFrameInfo frame;
WebPPicture frame;
WebPMuxFrameInfo info;
WebPMuxAnimParams anim = { WHITE_COLOR, 0 };
WebPFrameCache* cache = NULL;
int is_first_frame = 1; // Whether we are processing the first frame.
int done;
int c;
int quiet = 0;
@ -319,13 +239,12 @@ int main(int argc, const char *argv[]) {
size_t kmin = 0;
size_t kmax = 0;
memset(&frame, 0, sizeof(frame));
frame.id = WEBP_CHUNK_ANMF;
frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
frame.blend_method = WEBP_MUX_BLEND;
memset(&info, 0, sizeof(info));
info.id = WEBP_CHUNK_ANMF;
info.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
info.blend_method = WEBP_MUX_BLEND;
if (!WebPConfigInit(&config) || !WebPPictureInit(&curr_frame) ||
!WebPPictureInit(&prev_canvas) || !WebPPictureInit(&curr_canvas)) {
if (!WebPConfigInit(&config) || !WebPPictureInit(&frame)) {
fprintf(stderr, "Error! Version mismatch!\n");
return -1;
}
@ -387,9 +306,6 @@ int main(int argc, const char *argv[]) {
}
SanitizeKeyFrameIntervals(&kmin, &kmax);
cache = WebPFrameCacheNew(kmin, kmax);
if (cache == NULL) goto End;
if (!WebPValidateConfig(&config)) {
fprintf(stderr, "Error! Invalid configuration.\n");
goto End;
@ -411,12 +327,14 @@ int main(int argc, const char *argv[]) {
if (gif == NULL) goto End;
// Allocate current buffer
curr_frame.width = gif->SWidth;
curr_frame.height = gif->SHeight;
curr_frame.use_argb = 1;
if (!WebPPictureAlloc(&curr_frame)) goto End;
if (!WebPPictureCopy(&curr_frame, &prev_canvas)) goto End;
if (!WebPPictureCopy(&curr_frame, &curr_canvas)) goto End;
frame.width = gif->SWidth;
frame.height = gif->SHeight;
frame.use_argb = 1;
if (!WebPPictureAlloc(&frame)) goto End;
// Initialize cache
cache = WebPFrameCacheNew(frame.width, frame.height, kmin, kmax);
if (cache == NULL) goto End;
mux = WebPMuxNew();
if (mux == NULL) {
@ -432,21 +350,17 @@ int main(int argc, const char *argv[]) {
switch (type) {
case IMAGE_DESC_RECORD_TYPE: {
WebPPicture sub_image;
WebPFrameRect gif_rect;
if (!DGifGetImageDesc(gif)) goto End;
if (!ReadFrame(gif, &gif_rect, &sub_image, &curr_frame)) {
if (!ReadFrame(gif, &gif_rect, &frame)) {
goto End;
}
if (!OptimizeAndEncodeFrame(&config, &gif_rect, &curr_frame,
&prev_canvas, &curr_canvas, &sub_image,
&frame, cache)) {
if (!WebPFrameCacheAddFrame(cache, &config, &gif_rect, &frame, &info)) {
fprintf(stderr, "Error! Cannot encode frame as WebP\n");
fprintf(stderr, "Error code: %d\n", sub_image.error_code);
fprintf(stderr, "Error code: %d\n", frame.error_code);
}
WebPPictureFree(&sub_image);
err = WebPFrameCacheFlush(cache, verbose, mux);
if (err != WEBP_MUX_OK) {
@ -454,9 +368,6 @@ int main(int argc, const char *argv[]) {
ErrorString(err));
goto End;
}
DisposeFrame(frame.dispose_method, &gif_rect,
&curr_frame, &prev_canvas);
is_first_frame = 0;
break;
}
@ -475,7 +386,7 @@ int main(int argc, const char *argv[]) {
const int dispose = (flags >> GIF_DISPOSE_SHIFT) & GIF_DISPOSE_MASK;
const int delay = data[2] | (data[3] << 8); // In 10 ms units.
if (data[0] != 4) goto End;
frame.duration = delay * 10; // Duration is in 1 ms units for WebP.
info.duration = delay * 10; // Duration is in 1 ms units for WebP.
if (dispose == 3) {
static int warning_printed = 0;
if (!warning_printed) {
@ -484,9 +395,9 @@ int main(int argc, const char *argv[]) {
}
// failsafe. TODO(urvang): emulate the correct behaviour by
// recoding the whole frame.
frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
info.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
} else {
frame.dispose_method =
info.dispose_method =
(dispose == 2) ? WEBP_MUX_DISPOSE_BACKGROUND
: WEBP_MUX_DISPOSE_NONE;
}
@ -497,8 +408,7 @@ int main(int argc, const char *argv[]) {
fprintf(stderr, "GIF decode warning: invalid background color "
"index. Assuming white background.\n");
}
WebPUtilClearPic(&curr_frame, NULL);
WebPUtilClearPic(&prev_canvas, NULL);
WebPUtilClearPic(&frame, NULL);
}
break;
}
@ -645,9 +555,7 @@ int main(int argc, const char *argv[]) {
End:
WebPDataClear(&webp_data);
WebPMuxDelete(mux);
WebPPictureFree(&curr_frame);
WebPPictureFree(&prev_canvas);
WebPPictureFree(&curr_canvas);
WebPPictureFree(&frame);
WebPFrameCacheDelete(cache);
if (out != NULL && out_file != NULL) fclose(out);

View File

@ -20,215 +20,7 @@
#define KEYFRAME_NONE -1
//------------------------------------------------------------------------------
// Encoded frame.
// Used to store two candidates of encoded data for an animation frame. One of
// the two will be chosen later.
typedef struct {
WebPMuxFrameInfo sub_frame; // Encoded frame rectangle.
WebPMuxFrameInfo key_frame; // Encoded frame if it was converted to keyframe.
} EncodedFrame;
// Release the data contained by 'encoded_frame'.
static void FrameRelease(EncodedFrame* const encoded_frame) {
WebPDataClear(&encoded_frame->sub_frame.bitstream);
WebPDataClear(&encoded_frame->key_frame.bitstream);
memset(encoded_frame, 0, sizeof(*encoded_frame));
}
//------------------------------------------------------------------------------
// Frame cache.
// Used to store encoded frames that haven't been output yet.
struct WebPFrameCache {
EncodedFrame* encoded_frames; // Array of encoded frames.
size_t size; // Number of allocated data elements.
size_t start; // Start index.
size_t count; // Number of valid data elements.
int flush_count; // If >0, flush_count frames starting from
// 'start' are ready to be added to mux.
int64_t best_delta; // min(canvas size - frame size) over the frames.
// Can be negative in certain cases due to
// transparent pixels in a frame.
int keyframe; // Index of selected keyframe relative to 'start'.
size_t kmin; // Min distance between key frames.
size_t kmax; // Max distance between key frames.
size_t count_since_key_frame; // Frames seen since the last key frame.
};
// Reset the counters in the cache struct. Doesn't touch 'cache->encoded_frames'
// and 'cache->size'.
static void CacheReset(WebPFrameCache* const cache) {
cache->start = 0;
cache->count = 0;
cache->flush_count = 0;
cache->best_delta = DELTA_INFINITY;
cache->keyframe = KEYFRAME_NONE;
}
WebPFrameCache* WebPFrameCacheNew(size_t kmin, size_t kmax) {
WebPFrameCache* cache = (WebPFrameCache*)malloc(sizeof(*cache));
if (cache == NULL) return NULL;
CacheReset(cache);
cache->kmin = kmin;
cache->kmax = kmax;
cache->count_since_key_frame = 0;
assert(kmax > kmin);
cache->size = kmax - kmin;
cache->encoded_frames =
(EncodedFrame*)calloc(cache->size, sizeof(*cache->encoded_frames));
if (cache->encoded_frames == NULL) {
free(cache);
return NULL;
}
return cache;
}
void WebPFrameCacheDelete(WebPFrameCache* const cache) {
if (cache != NULL) {
size_t i;
for (i = 0; i < cache->size; ++i) {
FrameRelease(&cache->encoded_frames[i]);
}
free(cache->encoded_frames);
free(cache);
}
}
static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic,
WebPData* const encoded_data) {
WebPMemoryWriter memory;
pic->use_argb = 1;
pic->writer = WebPMemoryWrite;
pic->custom_ptr = &memory;
WebPMemoryWriterInit(&memory);
if (!WebPEncode(config, pic)) {
return 0;
}
encoded_data->bytes = memory.mem;
encoded_data->size = memory.size;
return 1;
}
// Returns cached frame at given 'position' index.
static EncodedFrame* CacheGetFrame(const WebPFrameCache* const cache,
size_t position) {
assert(cache->start + position < cache->size);
return &cache->encoded_frames[cache->start + position];
}
// Calculate the penalty incurred if we encode given frame as a key frame
// instead of a sub-frame.
static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) {
return ((int64_t)encoded_frame->key_frame.bitstream.size -
encoded_frame->sub_frame.bitstream.size);
}
static int SetFrame(const WebPConfig* const config,
const WebPMuxFrameInfo* const info, WebPPicture* const pic,
WebPMuxFrameInfo* const dst) {
*dst = *info;
if (!EncodeFrame(config, pic, &dst->bitstream)) {
return 0;
}
return 1;
}
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
const WebPConfig* const config,
const WebPMuxFrameInfo* const sub_frame_info,
WebPPicture* const sub_frame_pic,
const WebPMuxFrameInfo* const key_frame_info,
WebPPicture* const key_frame_pic) {
const size_t position = cache->count;
EncodedFrame* const encoded_frame = CacheGetFrame(cache, position);
assert(position < cache->size);
assert(sub_frame_pic != NULL || key_frame_pic != NULL);
if (sub_frame_pic != NULL && !SetFrame(config, sub_frame_info, sub_frame_pic,
&encoded_frame->sub_frame)) {
return 0;
}
if (key_frame_pic != NULL && !SetFrame(config, key_frame_info, key_frame_pic,
&encoded_frame->key_frame)) {
return 0;
}
++cache->count;
if (sub_frame_pic == NULL && key_frame_pic != NULL) { // Keyframe.
cache->keyframe = position;
cache->flush_count = cache->count;
cache->count_since_key_frame = 0;
} else {
++cache->count_since_key_frame;
if (sub_frame_pic != NULL && key_frame_pic == NULL) { // Non-keyframe.
assert(cache->count_since_key_frame < cache->kmax);
cache->flush_count = cache->count;
} else { // Analyze size difference of the two variants.
const int64_t curr_delta = KeyFramePenalty(encoded_frame);
if (curr_delta <= cache->best_delta) { // Pick this as keyframe.
cache->keyframe = position;
cache->best_delta = curr_delta;
cache->flush_count = cache->count - 1; // We can flush previous frames.
}
if (cache->count_since_key_frame == cache->kmax) {
cache->flush_count = cache->count;
cache->count_since_key_frame = 0;
}
}
}
return 1;
}
WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
WebPMux* const mux) {
while (cache->flush_count > 0) {
WebPMuxFrameInfo* info;
WebPMuxError err;
EncodedFrame* const curr = CacheGetFrame(cache, 0);
// Pick frame or full canvas.
if (cache->keyframe == 0) {
info = &curr->key_frame;
info->blend_method = WEBP_MUX_NO_BLEND;
cache->keyframe = KEYFRAME_NONE;
cache->best_delta = DELTA_INFINITY;
} else {
info = &curr->sub_frame;
info->blend_method = WEBP_MUX_BLEND;
}
// Add to mux.
err = WebPMuxPushFrame(mux, info, 1);
if (err != WEBP_MUX_OK) return err;
if (verbose) {
printf("Added frame. offset:%d,%d duration:%d dispose:%d blend:%d\n",
info->x_offset, info->y_offset, info->duration,
info->dispose_method, info->blend_method);
}
FrameRelease(curr);
++cache->start;
--cache->flush_count;
--cache->count;
if (cache->keyframe != KEYFRAME_NONE) --cache->keyframe;
}
if (cache->count == 0) CacheReset(cache);
return WEBP_MUX_OK;
}
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
WebPMux* const mux) {
cache->flush_count = cache->count; // Force flushing of all frames.
return WebPFrameCacheFlush(cache, verbose, mux);
}
int WebPFrameCacheShouldTryKeyFrame(const WebPFrameCache* const cache) {
return cache->count_since_key_frame >= cache->kmin;
}
//------------------------------------------------------------------------------
// Frame rectangle and related utilities.
// Helper utilities.
static void ClearRectangle(WebPPicture* const picture,
int left, int top, int width, int height) {
@ -237,12 +29,11 @@ static void ClearRectangle(WebPPicture* const picture,
uint32_t* const dst = picture->argb + j * picture->argb_stride;
int i;
for (i = left; i < left + width; ++i) {
dst[i] = TRANSPARENT_COLOR;
dst[i] = WEBP_UTIL_TRANSPARENT_COLOR;
}
}
}
// Clear pixels in 'picture' within given 'rect' to transparent color.
void WebPUtilClearPic(WebPPicture* const picture,
const WebPFrameRect* const rect) {
if (rect != NULL) {
@ -264,15 +55,18 @@ static void CopyPlane(const uint8_t* src, int src_stride,
}
}
void WebPUtilCopyPixels(const WebPPicture* const src, WebPPicture* const dst) {
// Copy pixels from 'src' to 'dst' honoring strides. 'src' and 'dst' are assumed
// to be already allocated.
static void CopyPixels(const WebPPicture* const src, WebPPicture* const dst) {
assert(src->width == dst->width && src->height == dst->height);
CopyPlane((uint8_t*)src->argb, 4 * src->argb_stride, (uint8_t*)dst->argb,
4 * dst->argb_stride, 4 * src->width, src->height);
}
void WebPUtilBlendPixels(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
// Given 'src' picture and its frame rectangle 'rect', blend it into 'dst'.
static void BlendPixels(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
int j;
assert(src->width == dst->width && src->height == dst->height);
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
@ -287,9 +81,10 @@ void WebPUtilBlendPixels(const WebPPicture* const src,
}
}
void WebPUtilReduceTransparency(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
// Replace transparent pixels within 'dst_rect' of 'dst' by those in the 'src'.
static void ReduceTransparency(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
int i, j;
assert(src != NULL && dst != NULL && rect != NULL);
assert(src->width == dst->width && src->height == dst->height);
@ -306,9 +101,11 @@ void WebPUtilReduceTransparency(const WebPPicture* const src,
}
}
void WebPUtilFlattenSimilarBlocks(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
// Replace similar blocks of pixels by a 'see-through' transparent block
// with uniform average color.
static void FlattenSimilarBlocks(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
int i, j;
const int block_size = 8;
const int y_start = (rect->y_offset + block_size) & ~(block_size - 1);
@ -359,9 +156,11 @@ void WebPUtilFlattenSimilarBlocks(const WebPPicture* const src,
//------------------------------------------------------------------------------
// Key frame related utilities.
int WebPUtilIsKeyFrame(const WebPPicture* const curr,
const WebPFrameRect* const curr_rect,
const WebPPicture* const prev) {
// Returns true if 'curr' frame with frame rectangle 'curr_rect' is a key frame,
// that is, it can be decoded independently of 'prev' canvas.
static int IsKeyFrame(const WebPPicture* const curr,
const WebPFrameRect* const curr_rect,
const WebPPicture* const prev) {
int i, j;
int is_key_frame = 1;
@ -404,9 +203,11 @@ int WebPUtilIsKeyFrame(const WebPPicture* const curr,
return 0;
}
void WebPUtilConvertToKeyFrame(const WebPPicture* const prev,
WebPFrameRect* const rect,
WebPPicture* const curr) {
// Given 'prev' frame and current frame rectangle 'rect', convert 'curr' frame
// to a key frame.
static void ConvertToKeyFrame(const WebPPicture* const prev,
WebPFrameRect* const rect,
WebPPicture* const curr) {
int j;
assert(curr->width == prev->width && curr->height == prev->height);
@ -431,3 +232,315 @@ void WebPUtilConvertToKeyFrame(const WebPPicture* const prev,
}
//------------------------------------------------------------------------------
// Encoded frame.
// Used to store two candidates of encoded data for an animation frame. One of
// the two will be chosen later.
typedef struct {
WebPMuxFrameInfo sub_frame; // Encoded frame rectangle.
WebPMuxFrameInfo key_frame; // Encoded frame if it was converted to keyframe.
} EncodedFrame;
// Release the data contained by 'encoded_frame'.
static void FrameRelease(EncodedFrame* const encoded_frame) {
WebPDataClear(&encoded_frame->sub_frame.bitstream);
WebPDataClear(&encoded_frame->key_frame.bitstream);
memset(encoded_frame, 0, sizeof(*encoded_frame));
}
//------------------------------------------------------------------------------
// Frame cache.
// Used to store encoded frames that haven't been output yet.
struct WebPFrameCache {
EncodedFrame* encoded_frames; // Array of encoded frames.
size_t size; // Number of allocated data elements.
size_t start; // Start index.
size_t count; // Number of valid data elements.
int flush_count; // If >0, flush_count frames starting from
// 'start' are ready to be added to mux.
int64_t best_delta; // min(canvas size - frame size) over the frames.
// Can be negative in certain cases due to
// transparent pixels in a frame.
int keyframe; // Index of selected keyframe relative to 'start'.
size_t kmin; // Min distance between key frames.
size_t kmax; // Max distance between key frames.
size_t count_since_key_frame; // Frames seen since the last key frame.
WebPPicture prev_canvas; // Previous canvas (properly disposed).
WebPPicture curr_canvas; // Current canvas (temporary buffer).
int is_first_frame; // True if no frames have been added to the cache
// since WebPFrameCacheNew().
};
// Reset the counters in the cache struct. Doesn't touch 'cache->encoded_frames'
// and 'cache->size'.
static void CacheReset(WebPFrameCache* const cache) {
cache->start = 0;
cache->count = 0;
cache->flush_count = 0;
cache->best_delta = DELTA_INFINITY;
cache->keyframe = KEYFRAME_NONE;
}
WebPFrameCache* WebPFrameCacheNew(int width, int height,
size_t kmin, size_t kmax) {
WebPFrameCache* cache = (WebPFrameCache*)malloc(sizeof(*cache));
if (cache == NULL) return NULL;
CacheReset(cache);
cache->is_first_frame = 1;
// Picture buffers.
if (!WebPPictureInit(&cache->prev_canvas) ||
!WebPPictureInit(&cache->curr_canvas)) {
return NULL;
}
cache->prev_canvas.width = width;
cache->prev_canvas.height = height;
cache->prev_canvas.use_argb = 1;
if (!WebPPictureAlloc(&cache->prev_canvas) ||
!WebPPictureCopy(&cache->prev_canvas, &cache->curr_canvas)) {
goto Err;
}
WebPUtilClearPic(&cache->prev_canvas, NULL);
// Cache data.
cache->kmin = kmin;
cache->kmax = kmax;
cache->count_since_key_frame = 0;
assert(kmax > kmin);
cache->size = kmax - kmin;
cache->encoded_frames =
(EncodedFrame*)calloc(cache->size, sizeof(*cache->encoded_frames));
if (cache->encoded_frames == NULL) goto Err;
return cache; // All OK.
Err:
WebPFrameCacheDelete(cache);
return NULL;
}
void WebPFrameCacheDelete(WebPFrameCache* const cache) {
if (cache != NULL) {
size_t i;
for (i = 0; i < cache->size; ++i) {
FrameRelease(&cache->encoded_frames[i]);
}
free(cache->encoded_frames);
WebPPictureFree(&cache->prev_canvas);
WebPPictureFree(&cache->curr_canvas);
free(cache);
}
}
static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic,
WebPData* const encoded_data) {
WebPMemoryWriter memory;
pic->use_argb = 1;
pic->writer = WebPMemoryWrite;
pic->custom_ptr = &memory;
WebPMemoryWriterInit(&memory);
if (!WebPEncode(config, pic)) {
return 0;
}
encoded_data->bytes = memory.mem;
encoded_data->size = memory.size;
return 1;
}
// Returns cached frame at given 'position' index.
static EncodedFrame* CacheGetFrame(const WebPFrameCache* const cache,
size_t position) {
assert(cache->start + position < cache->size);
return &cache->encoded_frames[cache->start + position];
}
// Calculate the penalty incurred if we encode given frame as a key frame
// instead of a sub-frame.
static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) {
return ((int64_t)encoded_frame->key_frame.bitstream.size -
encoded_frame->sub_frame.bitstream.size);
}
static int SetFrame(const WebPConfig* const config, int is_key_frame,
const WebPPicture* const prev_canvas,
WebPPicture* const frame, const WebPFrameRect* const rect,
const WebPMuxFrameInfo* const info,
WebPPicture* const sub_frame,
EncodedFrame* encoded_frame) {
WebPMuxFrameInfo* const dst =
is_key_frame ? &encoded_frame->key_frame : &encoded_frame->sub_frame;
*dst = *info;
if (!config->lossless && !is_key_frame) {
// For lossy compression of a frame, it's better to replace transparent
// pixels of 'curr' with actual RGB values, whenever possible.
ReduceTransparency(prev_canvas, rect, frame);
FlattenSimilarBlocks(prev_canvas, rect, frame);
}
if (!EncodeFrame(config, sub_frame, &dst->bitstream)) {
return 0;
}
return 1;
}
static void DisposeFrame(WebPMuxAnimDispose dispose_method,
const WebPFrameRect* const gif_rect,
WebPPicture* const frame, WebPPicture* const canvas) {
if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
WebPUtilClearPic(frame, NULL);
WebPUtilClearPic(canvas, gif_rect);
}
}
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
const WebPConfig* const config,
const WebPFrameRect* const orig_rect,
WebPPicture* const frame,
WebPMuxFrameInfo* const info) {
int ok = 0;
WebPFrameRect rect = *orig_rect;
WebPPicture sub_image; // View extracted from 'frame' with rectangle 'rect'.
WebPPicture* const prev_canvas = &cache->prev_canvas;
const size_t position = cache->count;
EncodedFrame* const encoded_frame = CacheGetFrame(cache, position);
assert(position < cache->size);
// Snap to even offsets (and adjust dimensions if needed).
rect.width += (rect.x_offset & 1);
rect.height += (rect.y_offset & 1);
rect.x_offset &= ~1;
rect.y_offset &= ~1;
if (!WebPPictureView(frame, rect.x_offset, rect.y_offset,
rect.width, rect.height, &sub_image)) {
return 0;
}
info->x_offset = rect.x_offset;
info->y_offset = rect.y_offset;
++cache->count;
if (cache->is_first_frame || IsKeyFrame(frame, &rect, prev_canvas)) {
// Add this as a key frame.
if (!SetFrame(config, 1, NULL, NULL, NULL, info, &sub_image,
encoded_frame)) {
goto End;
}
cache->keyframe = position;
cache->flush_count = cache->count;
cache->count_since_key_frame = 0;
// Update prev_canvas by simply copying from 'curr'.
CopyPixels(frame, prev_canvas);
} else {
++cache->count_since_key_frame;
if (cache->count_since_key_frame <= cache->kmin) {
// Add this as a frame rectangle.
if (!SetFrame(config, 0, prev_canvas, frame, &rect, info, &sub_image,
encoded_frame)) {
goto End;
}
cache->flush_count = cache->count;
// Update prev_canvas by blending 'curr' into it.
BlendPixels(frame, orig_rect, prev_canvas);
} else {
WebPPicture full_image;
WebPMuxFrameInfo full_image_info;
int frame_added;
int64_t curr_delta;
// Add frame rectangle to cache.
if (!SetFrame(config, 0, prev_canvas, frame, &rect, info, &sub_image,
encoded_frame)) {
goto End;
}
// Convert to a key frame.
CopyPixels(frame, &cache->curr_canvas);
ConvertToKeyFrame(prev_canvas, &rect, &cache->curr_canvas);
if (!WebPPictureView(&cache->curr_canvas, rect.x_offset, rect.y_offset,
rect.width, rect.height, &full_image)) {
goto End;
}
full_image_info = *info;
full_image_info.x_offset = rect.x_offset;
full_image_info.y_offset = rect.y_offset;
// Add key frame to cache, too.
frame_added = SetFrame(config, 1, NULL, NULL, NULL, &full_image_info,
&full_image, encoded_frame);
WebPPictureFree(&full_image);
if (!frame_added) goto End;
// Analyze size difference of the two variants.
curr_delta = KeyFramePenalty(encoded_frame);
if (curr_delta <= cache->best_delta) { // Pick this as keyframe.
cache->keyframe = position;
cache->best_delta = curr_delta;
cache->flush_count = cache->count - 1; // We can flush previous frames.
}
if (cache->count_since_key_frame == cache->kmax) {
cache->flush_count = cache->count;
cache->count_since_key_frame = 0;
}
// Update prev_canvas by simply copying from 'curr_canvas'.
CopyPixels(&cache->curr_canvas, prev_canvas);
}
}
DisposeFrame(info->dispose_method, orig_rect, frame, prev_canvas);
cache->is_first_frame = 0;
ok = 1;
End:
WebPPictureFree(&sub_image);
if (!ok) --cache->count; // We reset the count, as the frame addition failed.
return ok;
}
WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
WebPMux* const mux) {
while (cache->flush_count > 0) {
WebPMuxFrameInfo* info;
WebPMuxError err;
EncodedFrame* const curr = CacheGetFrame(cache, 0);
// Pick frame or full canvas.
if (cache->keyframe == 0) {
info = &curr->key_frame;
info->blend_method = WEBP_MUX_NO_BLEND;
cache->keyframe = KEYFRAME_NONE;
cache->best_delta = DELTA_INFINITY;
} else {
info = &curr->sub_frame;
info->blend_method = WEBP_MUX_BLEND;
}
// Add to mux.
err = WebPMuxPushFrame(mux, info, 1);
if (err != WEBP_MUX_OK) return err;
if (verbose) {
printf("Added frame. offset:%d,%d duration:%d dispose:%d blend:%d\n",
info->x_offset, info->y_offset, info->duration,
info->dispose_method, info->blend_method);
}
FrameRelease(curr);
++cache->start;
--cache->flush_count;
--cache->count;
if (cache->keyframe != KEYFRAME_NONE) --cache->keyframe;
}
if (cache->count == 0) CacheReset(cache);
return WEBP_MUX_OK;
}
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
WebPMux* const mux) {
cache->flush_count = cache->count; // Force flushing of all frames.
return WebPFrameCacheFlush(cache, verbose, mux);
}
//------------------------------------------------------------------------------

View File

@ -22,6 +22,21 @@
extern "C" {
#endif
//------------------------------------------------------------------------------
// Helper utilities.
#define WEBP_UTIL_TRANSPARENT_COLOR 0x00ffffff
struct WebPPicture;
typedef struct {
int x_offset, y_offset, width, height;
} WebPFrameRect;
// Clear pixels in 'picture' within given 'rect' to transparent color.
void WebPUtilClearPic(struct WebPPicture* const picture,
const WebPFrameRect* const rect);
//------------------------------------------------------------------------------
// Frame cache.
@ -30,23 +45,20 @@ typedef struct WebPFrameCache WebPFrameCache;
// Given the minimum distance between key frames 'kmin' and maximum distance
// between key frames 'kmax', returns an appropriately allocated cache object.
// Use WebPFrameCacheDelete() to deallocate the 'cache'.
WebPFrameCache* WebPFrameCacheNew(size_t kmin, size_t kmax);
WebPFrameCache* WebPFrameCacheNew(int width, int height,
size_t kmin, size_t kmax);
// Release all the frame data from 'cache' and free 'cache'.
void WebPFrameCacheDelete(WebPFrameCache* const cache);
// Add encoded frame in the cache. 'sub_frame_info' and 'sub_frame_pic' are used
// to encode the frame rectangle, while 'key_frame_info' and 'key_frame_pic' are
// used to encode the key frame. Either 'sub_frame_pic' (and 'sub_frame_info')
// or 'key_frame_pic' (and 'key_frame_info') can be NULL; in which case the
// corresponding variant will be omitted.
// Returns true on success.
// Given an image described by 'frame', 'info' and 'orig_rect', optimize it for
// WebP, encode it and add it to 'cache'.
// This takes care of frame disposal too, according to 'info->dispose_method'.
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
const WebPConfig* const config,
const WebPMuxFrameInfo* const sub_frame_info,
WebPPicture* const sub_frame_pic,
const WebPMuxFrameInfo* const key_frame_info,
WebPPicture* const key_frame_pic);
const WebPFrameRect* const orig_rect,
WebPPicture* const frame,
WebPMuxFrameInfo* const info);
// Flush the *ready* frames from cache and add them to 'mux'. If 'verbose' is
// true, prints the information about these frames.
@ -57,61 +69,6 @@ WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
WebPMux* const mux);
// Returns true if subsequent call to WebPFrameCacheAddFrame() should
// incorporate a potential keyframe.
int WebPFrameCacheShouldTryKeyFrame(const WebPFrameCache* const cache);
//------------------------------------------------------------------------------
// Frame rectangle and related utilities.
#define TRANSPARENT_COLOR 0x00ffffff
typedef struct {
int x_offset, y_offset, width, height;
} WebPFrameRect;
struct WebPPicture;
// Clear pixels in 'picture' within given 'rect' to transparent color.
void WebPUtilClearPic(struct WebPPicture* const picture,
const WebPFrameRect* const rect);
// Copy pixels from 'src' to 'dst' honoring strides. 'src' and 'dst' are assumed
// to be already allocated.
void WebPUtilCopyPixels(const struct WebPPicture* const src,
WebPPicture* const dst);
// Given 'src' picture and its frame rectangle 'rect', blend it into 'dst'.
void WebPUtilBlendPixels(const struct WebPPicture* const src,
const WebPFrameRect* const src_rect,
struct WebPPicture* const dst);
// Replace transparent pixels within 'dst_rect' of 'dst' by those in the 'src'.
void WebPUtilReduceTransparency(const struct WebPPicture* const src,
const WebPFrameRect* const dst_rect,
struct WebPPicture* const dst);
// Replace similar blocks of pixels by a 'see-through' transparent block
// with uniform average color.
void WebPUtilFlattenSimilarBlocks(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst);
//------------------------------------------------------------------------------
// Key frame related.
// Returns true if 'curr' frame with frame rectangle 'curr_rect' is a key frame,
// that is, it can be decoded independently of 'prev' canvas.
int WebPUtilIsKeyFrame(const WebPPicture* const curr,
const WebPFrameRect* const curr_rect,
const WebPPicture* const prev);
// Given 'prev' frame and current frame rectangle 'rect', convert 'curr' frame
// to a key frame.
void WebPUtilConvertToKeyFrame(const WebPPicture* const prev,
WebPFrameRect* const rect,
WebPPicture* const curr);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)