2013-09-12 22:41:09 +02:00
|
|
|
|
// Copyright 2013 Google Inc. All Rights Reserved.
|
|
|
|
|
//
|
|
|
|
|
// Use of this source code is governed by a BSD-style license
|
|
|
|
|
// that can be found in the COPYING file in the root of the source
|
|
|
|
|
// tree. An additional intellectual property rights grant can be found
|
|
|
|
|
// in the file PATENTS. All contributing project authors may
|
|
|
|
|
// be found in the AUTHORS file in the root of the source tree.
|
|
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
//
|
|
|
|
|
// Helper structs and methods for gif2webp tool.
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
|
|
#include "webp/encode.h"
|
|
|
|
|
#include "./gif2webp_util.h"
|
|
|
|
|
|
|
|
|
|
#define DELTA_INFINITY 1ULL << 32
|
|
|
|
|
#define KEYFRAME_NONE -1
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Helper utilities.
|
2013-09-12 22:41:09 +02:00
|
|
|
|
|
|
|
|
|
static void ClearRectangle(WebPPicture* const picture,
|
|
|
|
|
int left, int top, int width, int height) {
|
|
|
|
|
int j;
|
|
|
|
|
for (j = top; j < top + height; ++j) {
|
|
|
|
|
uint32_t* const dst = picture->argb + j * picture->argb_stride;
|
|
|
|
|
int i;
|
|
|
|
|
for (i = left; i < left + width; ++i) {
|
2013-10-14 23:39:46 +02:00
|
|
|
|
dst[i] = WEBP_UTIL_TRANSPARENT_COLOR;
|
2013-09-12 22:41:09 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void WebPUtilClearPic(WebPPicture* const picture,
|
|
|
|
|
const WebPFrameRect* const rect) {
|
|
|
|
|
if (rect != NULL) {
|
|
|
|
|
ClearRectangle(picture, rect->x_offset, rect->y_offset,
|
|
|
|
|
rect->width, rect->height);
|
|
|
|
|
} else {
|
|
|
|
|
ClearRectangle(picture, 0, 0, picture->width, picture->height);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO: Also used in picture.c. Move to a common location?
|
|
|
|
|
// Copy width x height pixels from 'src' to 'dst' honoring the strides.
|
|
|
|
|
static void CopyPlane(const uint8_t* src, int src_stride,
|
|
|
|
|
uint8_t* dst, int dst_stride, int width, int height) {
|
|
|
|
|
while (height-- > 0) {
|
|
|
|
|
memcpy(dst, src, width);
|
|
|
|
|
src += src_stride;
|
|
|
|
|
dst += dst_stride;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Copy pixels from 'src' to 'dst' honoring strides. 'src' and 'dst' are assumed
|
|
|
|
|
// to be already allocated.
|
|
|
|
|
static void CopyPixels(const WebPPicture* const src, WebPPicture* const dst) {
|
2013-09-12 22:41:09 +02:00
|
|
|
|
assert(src->width == dst->width && src->height == dst->height);
|
|
|
|
|
CopyPlane((uint8_t*)src->argb, 4 * src->argb_stride, (uint8_t*)dst->argb,
|
|
|
|
|
4 * dst->argb_stride, 4 * src->width, src->height);
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Given 'src' picture and its frame rectangle 'rect', blend it into 'dst'.
|
|
|
|
|
static void BlendPixels(const WebPPicture* const src,
|
|
|
|
|
const WebPFrameRect* const rect,
|
|
|
|
|
WebPPicture* const dst) {
|
2013-09-12 22:41:09 +02:00
|
|
|
|
int j;
|
|
|
|
|
assert(src->width == dst->width && src->height == dst->height);
|
|
|
|
|
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
|
|
|
|
|
int i;
|
|
|
|
|
for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {
|
|
|
|
|
const uint32_t src_pixel = src->argb[j * src->argb_stride + i];
|
|
|
|
|
const int src_alpha = src_pixel >> 24;
|
|
|
|
|
if (src_alpha != 0) {
|
|
|
|
|
dst->argb[j * dst->argb_stride + i] = src_pixel;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Replace transparent pixels within 'dst_rect' of 'dst' by those in the 'src'.
|
|
|
|
|
static void ReduceTransparency(const WebPPicture* const src,
|
|
|
|
|
const WebPFrameRect* const rect,
|
|
|
|
|
WebPPicture* const dst) {
|
2013-10-08 15:04:52 +02:00
|
|
|
|
int i, j;
|
|
|
|
|
assert(src != NULL && dst != NULL && rect != NULL);
|
2013-10-01 01:48:39 +02:00
|
|
|
|
assert(src->width == dst->width && src->height == dst->height);
|
|
|
|
|
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
|
|
|
|
|
for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {
|
|
|
|
|
const uint32_t src_pixel = src->argb[j * src->argb_stride + i];
|
|
|
|
|
const int src_alpha = src_pixel >> 24;
|
|
|
|
|
const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i];
|
|
|
|
|
const int dst_alpha = dst_pixel >> 24;
|
|
|
|
|
if (dst_alpha == 0 && src_alpha == 0xff) {
|
|
|
|
|
dst->argb[j * dst->argb_stride + i] = src_pixel;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Replace similar blocks of pixels by a 'see-through' transparent block
|
|
|
|
|
// with uniform average color.
|
|
|
|
|
static void FlattenSimilarBlocks(const WebPPicture* const src,
|
|
|
|
|
const WebPFrameRect* const rect,
|
|
|
|
|
WebPPicture* const dst) {
|
2013-10-08 15:04:52 +02:00
|
|
|
|
int i, j;
|
|
|
|
|
const int block_size = 8;
|
|
|
|
|
const int y_start = (rect->y_offset + block_size) & ~(block_size - 1);
|
|
|
|
|
const int y_end = (rect->y_offset + rect->height) & ~(block_size - 1);
|
|
|
|
|
const int x_start = (rect->x_offset + block_size) & ~(block_size - 1);
|
|
|
|
|
const int x_end = (rect->x_offset + rect->width) & ~(block_size - 1);
|
|
|
|
|
assert(src != NULL && dst != NULL && rect != NULL);
|
|
|
|
|
assert(src->width == dst->width && src->height == dst->height);
|
|
|
|
|
assert((block_size & (block_size - 1)) == 0); // must be a power of 2
|
|
|
|
|
// Iterate over each block and count similar pixels.
|
|
|
|
|
for (j = y_start; j < y_end; j += block_size) {
|
|
|
|
|
for (i = x_start; i < x_end; i += block_size) {
|
|
|
|
|
int cnt = 0;
|
|
|
|
|
int avg_r = 0, avg_g = 0, avg_b = 0;
|
|
|
|
|
int x, y;
|
|
|
|
|
const uint32_t* const psrc = src->argb + j * src->argb_stride + i;
|
|
|
|
|
uint32_t* const pdst = dst->argb + j * dst->argb_stride + i;
|
|
|
|
|
for (y = 0; y < block_size; ++y) {
|
|
|
|
|
for (x = 0; x < block_size; ++x) {
|
|
|
|
|
const uint32_t src_pixel = psrc[x + y * src->argb_stride];
|
|
|
|
|
const int alpha = src_pixel >> 24;
|
|
|
|
|
if (alpha == 0xff &&
|
|
|
|
|
src_pixel == pdst[x + y * dst->argb_stride]) {
|
|
|
|
|
++cnt;
|
|
|
|
|
avg_r += (src_pixel >> 16) & 0xff;
|
|
|
|
|
avg_g += (src_pixel >> 8) & 0xff;
|
|
|
|
|
avg_b += (src_pixel >> 0) & 0xff;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If we have a fully similar block, we replace it with an
|
|
|
|
|
// average transparent block. This compresses better in lossy mode.
|
|
|
|
|
if (cnt == block_size * block_size) {
|
|
|
|
|
const uint32_t color = (0x00 << 24) |
|
|
|
|
|
((avg_r / cnt) << 16) |
|
|
|
|
|
((avg_g / cnt) << 8) |
|
|
|
|
|
((avg_b / cnt) << 0);
|
|
|
|
|
for (y = 0; y < block_size; ++y) {
|
|
|
|
|
for (x = 0; x < block_size; ++x) {
|
|
|
|
|
pdst[x + y * dst->argb_stride] = color;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-12 22:41:09 +02:00
|
|
|
|
//------------------------------------------------------------------------------
|
|
|
|
|
// Key frame related utilities.
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Returns true if 'curr' frame with frame rectangle 'curr_rect' is a key frame,
|
|
|
|
|
// that is, it can be decoded independently of 'prev' canvas.
|
|
|
|
|
static int IsKeyFrame(const WebPPicture* const curr,
|
|
|
|
|
const WebPFrameRect* const curr_rect,
|
|
|
|
|
const WebPPicture* const prev) {
|
2013-09-12 22:41:09 +02:00
|
|
|
|
int i, j;
|
|
|
|
|
int is_key_frame = 1;
|
|
|
|
|
|
|
|
|
|
// If previous canvas (with previous frame disposed) is all transparent,
|
|
|
|
|
// current frame is a key frame.
|
2014-06-24 22:20:29 +02:00
|
|
|
|
for (j = 0; j < prev->height; ++j) {
|
|
|
|
|
const uint32_t* const row = &prev->argb[j * prev->argb_stride];
|
|
|
|
|
for (i = 0; i < prev->width; ++i) {
|
|
|
|
|
if (row[i] & 0xff000000u) { // has alpha?
|
2013-09-12 22:41:09 +02:00
|
|
|
|
is_key_frame = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!is_key_frame) break;
|
|
|
|
|
}
|
|
|
|
|
if (is_key_frame) return 1;
|
|
|
|
|
|
|
|
|
|
// If current frame covers the whole canvas and does not contain any
|
|
|
|
|
// transparent pixels that depend on previous canvas, then current frame is
|
|
|
|
|
// a key frame.
|
|
|
|
|
if (curr_rect->width == curr->width && curr_rect->height == curr->height) {
|
|
|
|
|
assert(curr_rect->x_offset == 0 && curr_rect->y_offset == 0);
|
|
|
|
|
is_key_frame = 1;
|
|
|
|
|
for (j = 0; j < prev->height; ++j) {
|
|
|
|
|
for (i = 0; i < prev->width; ++i) {
|
|
|
|
|
const uint32_t prev_alpha =
|
|
|
|
|
(prev->argb[j * prev->argb_stride + i]) >> 24;
|
|
|
|
|
const uint32_t curr_alpha =
|
|
|
|
|
(curr->argb[j * curr->argb_stride + i]) >> 24;
|
|
|
|
|
if (curr_alpha != 0xff && prev_alpha != 0) {
|
|
|
|
|
is_key_frame = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!is_key_frame) break;
|
|
|
|
|
}
|
|
|
|
|
if (is_key_frame) return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Given 'prev' frame and current frame rectangle 'rect', convert 'curr' frame
|
|
|
|
|
// to a key frame.
|
|
|
|
|
static void ConvertToKeyFrame(const WebPPicture* const prev,
|
|
|
|
|
WebPFrameRect* const rect,
|
|
|
|
|
WebPPicture* const curr) {
|
2013-09-12 22:41:09 +02:00
|
|
|
|
int j;
|
|
|
|
|
assert(curr->width == prev->width && curr->height == prev->height);
|
|
|
|
|
|
|
|
|
|
// Replace transparent pixels of current canvas with those from previous
|
|
|
|
|
// canvas (with previous frame disposed).
|
|
|
|
|
for (j = 0; j < curr->height; ++j) {
|
|
|
|
|
int i;
|
|
|
|
|
for (i = 0; i < curr->width; ++i) {
|
|
|
|
|
uint32_t* const curr_pixel = curr->argb + j * curr->argb_stride + i;
|
|
|
|
|
const int curr_alpha = *curr_pixel >> 24;
|
|
|
|
|
if (curr_alpha == 0) {
|
|
|
|
|
*curr_pixel = prev->argb[j * prev->argb_stride + i];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Frame rectangle now covers the whole canvas.
|
|
|
|
|
rect->x_offset = 0;
|
|
|
|
|
rect->y_offset = 0;
|
|
|
|
|
rect->width = curr->width;
|
|
|
|
|
rect->height = curr->height;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
2013-10-14 23:39:46 +02:00
|
|
|
|
// Encoded frame.
|
|
|
|
|
|
|
|
|
|
// Used to store two candidates of encoded data for an animation frame. One of
|
|
|
|
|
// the two will be chosen later.
|
|
|
|
|
typedef struct {
|
|
|
|
|
WebPMuxFrameInfo sub_frame; // Encoded frame rectangle.
|
|
|
|
|
WebPMuxFrameInfo key_frame; // Encoded frame if it was converted to keyframe.
|
|
|
|
|
} EncodedFrame;
|
|
|
|
|
|
|
|
|
|
// Release the data contained by 'encoded_frame'.
|
|
|
|
|
static void FrameRelease(EncodedFrame* const encoded_frame) {
|
2013-12-16 22:31:45 +01:00
|
|
|
|
if (encoded_frame != NULL) {
|
|
|
|
|
WebPDataClear(&encoded_frame->sub_frame.bitstream);
|
|
|
|
|
WebPDataClear(&encoded_frame->key_frame.bitstream);
|
|
|
|
|
memset(encoded_frame, 0, sizeof(*encoded_frame));
|
|
|
|
|
}
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
|
|
|
|
// Frame cache.
|
|
|
|
|
|
|
|
|
|
// Used to store encoded frames that haven't been output yet.
|
|
|
|
|
struct WebPFrameCache {
|
|
|
|
|
EncodedFrame* encoded_frames; // Array of encoded frames.
|
|
|
|
|
size_t size; // Number of allocated data elements.
|
|
|
|
|
size_t start; // Start index.
|
|
|
|
|
size_t count; // Number of valid data elements.
|
|
|
|
|
int flush_count; // If >0, ‘flush_count’ frames starting from
|
|
|
|
|
// 'start' are ready to be added to mux.
|
|
|
|
|
int64_t best_delta; // min(canvas size - frame size) over the frames.
|
|
|
|
|
// Can be negative in certain cases due to
|
|
|
|
|
// transparent pixels in a frame.
|
|
|
|
|
int keyframe; // Index of selected keyframe relative to 'start'.
|
|
|
|
|
|
|
|
|
|
size_t kmin; // Min distance between key frames.
|
|
|
|
|
size_t kmax; // Max distance between key frames.
|
|
|
|
|
size_t count_since_key_frame; // Frames seen since the last key frame.
|
2013-11-18 03:04:07 +01:00
|
|
|
|
int allow_mixed; // If true, each frame can be lossy or lossless.
|
2013-10-14 23:39:46 +02:00
|
|
|
|
WebPPicture prev_canvas; // Previous canvas (properly disposed).
|
|
|
|
|
WebPPicture curr_canvas; // Current canvas (temporary buffer).
|
|
|
|
|
int is_first_frame; // True if no frames have been added to the cache
|
|
|
|
|
// since WebPFrameCacheNew().
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Reset the counters in the cache struct. Doesn't touch 'cache->encoded_frames'
|
|
|
|
|
// and 'cache->size'.
|
|
|
|
|
static void CacheReset(WebPFrameCache* const cache) {
|
|
|
|
|
cache->start = 0;
|
|
|
|
|
cache->count = 0;
|
|
|
|
|
cache->flush_count = 0;
|
|
|
|
|
cache->best_delta = DELTA_INFINITY;
|
|
|
|
|
cache->keyframe = KEYFRAME_NONE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
WebPFrameCache* WebPFrameCacheNew(int width, int height,
|
2013-11-18 03:04:07 +01:00
|
|
|
|
size_t kmin, size_t kmax, int allow_mixed) {
|
2013-10-14 23:39:46 +02:00
|
|
|
|
WebPFrameCache* cache = (WebPFrameCache*)malloc(sizeof(*cache));
|
|
|
|
|
if (cache == NULL) return NULL;
|
|
|
|
|
CacheReset(cache);
|
2013-12-16 22:31:45 +01:00
|
|
|
|
// sanity init, so we can call WebPFrameCacheDelete():
|
|
|
|
|
cache->encoded_frames = NULL;
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
cache->is_first_frame = 1;
|
|
|
|
|
|
|
|
|
|
// Picture buffers.
|
|
|
|
|
if (!WebPPictureInit(&cache->prev_canvas) ||
|
|
|
|
|
!WebPPictureInit(&cache->curr_canvas)) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
cache->prev_canvas.width = width;
|
|
|
|
|
cache->prev_canvas.height = height;
|
|
|
|
|
cache->prev_canvas.use_argb = 1;
|
|
|
|
|
if (!WebPPictureAlloc(&cache->prev_canvas) ||
|
|
|
|
|
!WebPPictureCopy(&cache->prev_canvas, &cache->curr_canvas)) {
|
|
|
|
|
goto Err;
|
|
|
|
|
}
|
|
|
|
|
WebPUtilClearPic(&cache->prev_canvas, NULL);
|
|
|
|
|
|
|
|
|
|
// Cache data.
|
2013-11-18 03:04:07 +01:00
|
|
|
|
cache->allow_mixed = allow_mixed;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
cache->kmin = kmin;
|
|
|
|
|
cache->kmax = kmax;
|
|
|
|
|
cache->count_since_key_frame = 0;
|
|
|
|
|
assert(kmax > kmin);
|
|
|
|
|
cache->size = kmax - kmin;
|
|
|
|
|
cache->encoded_frames =
|
|
|
|
|
(EncodedFrame*)calloc(cache->size, sizeof(*cache->encoded_frames));
|
|
|
|
|
if (cache->encoded_frames == NULL) goto Err;
|
|
|
|
|
|
|
|
|
|
return cache; // All OK.
|
|
|
|
|
|
|
|
|
|
Err:
|
|
|
|
|
WebPFrameCacheDelete(cache);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void WebPFrameCacheDelete(WebPFrameCache* const cache) {
|
|
|
|
|
if (cache != NULL) {
|
2013-12-16 22:31:45 +01:00
|
|
|
|
if (cache->encoded_frames != NULL) {
|
|
|
|
|
size_t i;
|
|
|
|
|
for (i = 0; i < cache->size; ++i) {
|
|
|
|
|
FrameRelease(&cache->encoded_frames[i]);
|
|
|
|
|
}
|
|
|
|
|
free(cache->encoded_frames);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
|
|
|
|
WebPPictureFree(&cache->prev_canvas);
|
|
|
|
|
WebPPictureFree(&cache->curr_canvas);
|
|
|
|
|
free(cache);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic,
|
2013-11-18 03:04:07 +01:00
|
|
|
|
WebPMemoryWriter* const memory) {
|
2013-10-14 23:39:46 +02:00
|
|
|
|
pic->use_argb = 1;
|
|
|
|
|
pic->writer = WebPMemoryWrite;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
pic->custom_ptr = memory;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
if (!WebPEncode(config, pic)) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2013-11-18 03:04:07 +01:00
|
|
|
|
static void GetEncodedData(const WebPMemoryWriter* const memory,
|
|
|
|
|
WebPData* const encoded_data) {
|
|
|
|
|
encoded_data->bytes = memory->mem;
|
|
|
|
|
encoded_data->size = memory->size;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-18 03:04:07 +01:00
|
|
|
|
#define MIN_COLORS_LOSSY 31 // Don't try lossy below this threshold.
|
|
|
|
|
#define MAX_COLORS_LOSSLESS 194 // Don't try lossless above this threshold.
|
|
|
|
|
#define MAX_COLOR_COUNT 256 // Power of 2 greater than MAX_COLORS_LOSSLESS.
|
|
|
|
|
#define HASH_SIZE (MAX_COLOR_COUNT * 4)
|
|
|
|
|
#define HASH_RIGHT_SHIFT 22 // 32 - log2(HASH_SIZE).
|
|
|
|
|
|
|
|
|
|
// TODO(urvang): Also used in enc/vp8l.c. Move to utils.
|
|
|
|
|
// If the number of colors in the 'pic' is at least MAX_COLOR_COUNT, return
|
|
|
|
|
// MAX_COLOR_COUNT. Otherwise, return the exact number of colors in the 'pic'.
|
|
|
|
|
static int GetColorCount(const WebPPicture* const pic) {
|
|
|
|
|
int x, y;
|
|
|
|
|
int num_colors = 0;
|
|
|
|
|
uint8_t in_use[HASH_SIZE] = { 0 };
|
|
|
|
|
uint32_t colors[HASH_SIZE];
|
|
|
|
|
static const uint32_t kHashMul = 0x1e35a7bd;
|
|
|
|
|
const uint32_t* argb = pic->argb;
|
|
|
|
|
const int width = pic->width;
|
|
|
|
|
const int height = pic->height;
|
|
|
|
|
uint32_t last_pix = ~argb[0]; // so we're sure that last_pix != argb[0]
|
|
|
|
|
|
|
|
|
|
for (y = 0; y < height; ++y) {
|
|
|
|
|
for (x = 0; x < width; ++x) {
|
|
|
|
|
int key;
|
|
|
|
|
if (argb[x] == last_pix) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
last_pix = argb[x];
|
|
|
|
|
key = (kHashMul * last_pix) >> HASH_RIGHT_SHIFT;
|
|
|
|
|
while (1) {
|
|
|
|
|
if (!in_use[key]) {
|
|
|
|
|
colors[key] = last_pix;
|
|
|
|
|
in_use[key] = 1;
|
|
|
|
|
++num_colors;
|
|
|
|
|
if (num_colors >= MAX_COLOR_COUNT) {
|
|
|
|
|
return MAX_COLOR_COUNT; // Exact count not needed.
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
} else if (colors[key] == last_pix) {
|
|
|
|
|
break; // The color is already there.
|
|
|
|
|
} else {
|
|
|
|
|
// Some other color sits here, so do linear conflict resolution.
|
|
|
|
|
++key;
|
|
|
|
|
key &= (HASH_SIZE - 1); // Key mask.
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
argb += pic->argb_stride;
|
|
|
|
|
}
|
|
|
|
|
return num_colors;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-18 03:04:07 +01:00
|
|
|
|
#undef MAX_COLOR_COUNT
|
|
|
|
|
#undef HASH_SIZE
|
|
|
|
|
#undef HASH_RIGHT_SHIFT
|
|
|
|
|
|
2014-06-11 23:26:47 +02:00
|
|
|
|
static WebPEncodingError SetFrame(const WebPConfig* const config,
|
|
|
|
|
int allow_mixed, int is_key_frame,
|
|
|
|
|
const WebPPicture* const prev_canvas,
|
|
|
|
|
WebPPicture* const frame,
|
|
|
|
|
const WebPFrameRect* const rect,
|
|
|
|
|
const WebPMuxFrameInfo* const info,
|
|
|
|
|
WebPPicture* const sub_frame,
|
|
|
|
|
EncodedFrame* encoded_frame) {
|
|
|
|
|
WebPEncodingError error_code = VP8_ENC_OK;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
int try_lossless;
|
|
|
|
|
int try_lossy;
|
|
|
|
|
int try_both;
|
|
|
|
|
WebPMemoryWriter mem1, mem2;
|
|
|
|
|
WebPData* encoded_data;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
WebPMuxFrameInfo* const dst =
|
|
|
|
|
is_key_frame ? &encoded_frame->key_frame : &encoded_frame->sub_frame;
|
|
|
|
|
*dst = *info;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
encoded_data = &dst->bitstream;
|
|
|
|
|
WebPMemoryWriterInit(&mem1);
|
|
|
|
|
WebPMemoryWriterInit(&mem2);
|
|
|
|
|
|
|
|
|
|
if (!allow_mixed) {
|
|
|
|
|
try_lossless = config->lossless;
|
|
|
|
|
try_lossy = !try_lossless;
|
|
|
|
|
} else { // Use a heuristic for trying lossless and/or lossy compression.
|
|
|
|
|
const int num_colors = GetColorCount(sub_frame);
|
|
|
|
|
try_lossless = (num_colors < MAX_COLORS_LOSSLESS);
|
|
|
|
|
try_lossy = (num_colors >= MIN_COLORS_LOSSY);
|
|
|
|
|
}
|
|
|
|
|
try_both = try_lossless && try_lossy;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
|
2013-11-18 03:04:07 +01:00
|
|
|
|
if (try_lossless) {
|
|
|
|
|
WebPConfig config_ll = *config;
|
|
|
|
|
config_ll.lossless = 1;
|
|
|
|
|
if (!EncodeFrame(&config_ll, sub_frame, &mem1)) {
|
2014-06-11 23:26:47 +02:00
|
|
|
|
error_code = sub_frame->error_code;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
goto Err;
|
|
|
|
|
}
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
|
|
|
|
|
2013-11-18 03:04:07 +01:00
|
|
|
|
if (try_lossy) {
|
|
|
|
|
WebPConfig config_lossy = *config;
|
|
|
|
|
config_lossy.lossless = 0;
|
|
|
|
|
if (!is_key_frame) {
|
|
|
|
|
// For lossy compression of a frame, it's better to replace transparent
|
|
|
|
|
// pixels of 'curr' with actual RGB values, whenever possible.
|
|
|
|
|
ReduceTransparency(prev_canvas, rect, frame);
|
|
|
|
|
// TODO(later): Investigate if this helps lossless compression as well.
|
|
|
|
|
FlattenSimilarBlocks(prev_canvas, rect, frame);
|
|
|
|
|
}
|
|
|
|
|
if (!EncodeFrame(&config_lossy, sub_frame, &mem2)) {
|
2014-06-11 23:26:47 +02:00
|
|
|
|
error_code = sub_frame->error_code;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
goto Err;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (try_both) { // Pick the encoding with smallest size.
|
|
|
|
|
// TODO(later): Perhaps a rough SSIM/PSNR produced by the encoder should
|
|
|
|
|
// also be a criteria, in addition to sizes.
|
|
|
|
|
if (mem1.size <= mem2.size) {
|
2014-03-27 23:27:32 +01:00
|
|
|
|
WebPMemoryWriterClear(&mem2);
|
2013-11-18 03:04:07 +01:00
|
|
|
|
GetEncodedData(&mem1, encoded_data);
|
|
|
|
|
} else {
|
2014-03-27 23:27:32 +01:00
|
|
|
|
WebPMemoryWriterClear(&mem1);
|
2013-11-18 03:04:07 +01:00
|
|
|
|
GetEncodedData(&mem2, encoded_data);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
GetEncodedData(try_lossless ? &mem1 : &mem2, encoded_data);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
2014-06-11 23:26:47 +02:00
|
|
|
|
return error_code;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
|
|
|
|
|
Err:
|
2014-03-27 23:27:32 +01:00
|
|
|
|
WebPMemoryWriterClear(&mem1);
|
|
|
|
|
WebPMemoryWriterClear(&mem2);
|
2014-06-11 23:26:47 +02:00
|
|
|
|
return error_code;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#undef MIN_COLORS_LOSSY
|
|
|
|
|
#undef MAX_COLORS_LOSSLESS
|
|
|
|
|
|
|
|
|
|
// Returns cached frame at given 'position' index.
|
|
|
|
|
static EncodedFrame* CacheGetFrame(const WebPFrameCache* const cache,
|
|
|
|
|
size_t position) {
|
|
|
|
|
assert(cache->start + position < cache->size);
|
|
|
|
|
return &cache->encoded_frames[cache->start + position];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculate the penalty incurred if we encode given frame as a key frame
|
|
|
|
|
// instead of a sub-frame.
|
|
|
|
|
static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) {
|
|
|
|
|
return ((int64_t)encoded_frame->key_frame.bitstream.size -
|
|
|
|
|
encoded_frame->sub_frame.bitstream.size);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void DisposeFrame(WebPMuxAnimDispose dispose_method,
|
|
|
|
|
const WebPFrameRect* const gif_rect,
|
|
|
|
|
WebPPicture* const frame, WebPPicture* const canvas) {
|
|
|
|
|
if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
|
|
|
|
|
WebPUtilClearPic(frame, NULL);
|
|
|
|
|
WebPUtilClearPic(canvas, gif_rect);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
|
|
|
|
|
const WebPConfig* const config,
|
2014-06-12 08:07:45 +02:00
|
|
|
|
const WebPFrameRect* const orig_rect_ptr,
|
2013-10-14 23:39:46 +02:00
|
|
|
|
WebPPicture* const frame,
|
|
|
|
|
WebPMuxFrameInfo* const info) {
|
|
|
|
|
int ok = 0;
|
2014-06-11 23:26:47 +02:00
|
|
|
|
WebPEncodingError error_code = VP8_ENC_OK;
|
|
|
|
|
WebPFrameRect rect;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
WebPPicture sub_image; // View extracted from 'frame' with rectangle 'rect'.
|
|
|
|
|
WebPPicture* const prev_canvas = &cache->prev_canvas;
|
|
|
|
|
const size_t position = cache->count;
|
2013-11-18 03:04:07 +01:00
|
|
|
|
const int allow_mixed = cache->allow_mixed;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
EncodedFrame* const encoded_frame = CacheGetFrame(cache, position);
|
2014-06-12 08:07:45 +02:00
|
|
|
|
WebPFrameRect orig_rect;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
assert(position < cache->size);
|
|
|
|
|
|
2014-06-11 23:26:47 +02:00
|
|
|
|
if (frame == NULL || info == NULL) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 08:07:45 +02:00
|
|
|
|
if (orig_rect_ptr == NULL) {
|
|
|
|
|
orig_rect.width = frame->width;
|
|
|
|
|
orig_rect.height = frame->height;
|
|
|
|
|
orig_rect.x_offset = 0;
|
|
|
|
|
orig_rect.y_offset = 0;
|
2014-06-11 23:26:47 +02:00
|
|
|
|
} else {
|
2014-06-12 08:07:45 +02:00
|
|
|
|
orig_rect = *orig_rect_ptr;
|
2014-06-11 23:26:47 +02:00
|
|
|
|
}
|
2013-10-14 23:39:46 +02:00
|
|
|
|
|
2014-06-12 08:07:45 +02:00
|
|
|
|
// Snap to even offsets (and adjust dimensions if needed).
|
|
|
|
|
rect = orig_rect;
|
|
|
|
|
rect.width += (rect.x_offset & 1);
|
|
|
|
|
rect.height += (rect.y_offset & 1);
|
|
|
|
|
rect.x_offset &= ~1;
|
|
|
|
|
rect.y_offset &= ~1;
|
|
|
|
|
|
2013-10-14 23:39:46 +02:00
|
|
|
|
if (!WebPPictureView(frame, rect.x_offset, rect.y_offset,
|
|
|
|
|
rect.width, rect.height, &sub_image)) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
info->x_offset = rect.x_offset;
|
|
|
|
|
info->y_offset = rect.y_offset;
|
|
|
|
|
|
|
|
|
|
++cache->count;
|
|
|
|
|
|
|
|
|
|
if (cache->is_first_frame || IsKeyFrame(frame, &rect, prev_canvas)) {
|
|
|
|
|
// Add this as a key frame.
|
2014-06-11 23:26:47 +02:00
|
|
|
|
error_code = SetFrame(config, allow_mixed, 1, NULL, NULL, NULL,
|
|
|
|
|
info, &sub_image, encoded_frame);
|
|
|
|
|
if (error_code != VP8_ENC_OK) {
|
2013-10-14 23:39:46 +02:00
|
|
|
|
goto End;
|
|
|
|
|
}
|
|
|
|
|
cache->keyframe = position;
|
|
|
|
|
cache->flush_count = cache->count;
|
|
|
|
|
cache->count_since_key_frame = 0;
|
|
|
|
|
// Update prev_canvas by simply copying from 'curr'.
|
|
|
|
|
CopyPixels(frame, prev_canvas);
|
|
|
|
|
} else {
|
|
|
|
|
++cache->count_since_key_frame;
|
|
|
|
|
if (cache->count_since_key_frame <= cache->kmin) {
|
|
|
|
|
// Add this as a frame rectangle.
|
2014-06-11 23:26:47 +02:00
|
|
|
|
error_code = SetFrame(config, allow_mixed, 0, prev_canvas, frame,
|
|
|
|
|
&rect, info, &sub_image, encoded_frame);
|
|
|
|
|
if (error_code != VP8_ENC_OK) {
|
2013-10-14 23:39:46 +02:00
|
|
|
|
goto End;
|
|
|
|
|
}
|
|
|
|
|
cache->flush_count = cache->count;
|
|
|
|
|
// Update prev_canvas by blending 'curr' into it.
|
2014-06-12 08:07:45 +02:00
|
|
|
|
BlendPixels(frame, &orig_rect, prev_canvas);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
} else {
|
|
|
|
|
WebPPicture full_image;
|
|
|
|
|
WebPMuxFrameInfo full_image_info;
|
|
|
|
|
int64_t curr_delta;
|
|
|
|
|
|
|
|
|
|
// Add frame rectangle to cache.
|
2014-06-11 23:26:47 +02:00
|
|
|
|
error_code = SetFrame(config, allow_mixed, 0, prev_canvas, frame, &rect,
|
|
|
|
|
info, &sub_image, encoded_frame);
|
|
|
|
|
if (error_code != VP8_ENC_OK) {
|
2013-10-14 23:39:46 +02:00
|
|
|
|
goto End;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Convert to a key frame.
|
|
|
|
|
CopyPixels(frame, &cache->curr_canvas);
|
|
|
|
|
ConvertToKeyFrame(prev_canvas, &rect, &cache->curr_canvas);
|
|
|
|
|
if (!WebPPictureView(&cache->curr_canvas, rect.x_offset, rect.y_offset,
|
|
|
|
|
rect.width, rect.height, &full_image)) {
|
|
|
|
|
goto End;
|
|
|
|
|
}
|
|
|
|
|
full_image_info = *info;
|
|
|
|
|
full_image_info.x_offset = rect.x_offset;
|
|
|
|
|
full_image_info.y_offset = rect.y_offset;
|
|
|
|
|
|
|
|
|
|
// Add key frame to cache, too.
|
2014-06-11 23:26:47 +02:00
|
|
|
|
error_code = SetFrame(config, allow_mixed, 1, NULL, NULL, NULL,
|
|
|
|
|
&full_image_info, &full_image, encoded_frame);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
WebPPictureFree(&full_image);
|
2014-06-11 23:26:47 +02:00
|
|
|
|
if (error_code != VP8_ENC_OK) goto End;
|
2013-10-14 23:39:46 +02:00
|
|
|
|
|
|
|
|
|
// Analyze size difference of the two variants.
|
|
|
|
|
curr_delta = KeyFramePenalty(encoded_frame);
|
|
|
|
|
if (curr_delta <= cache->best_delta) { // Pick this as keyframe.
|
|
|
|
|
cache->keyframe = position;
|
|
|
|
|
cache->best_delta = curr_delta;
|
|
|
|
|
cache->flush_count = cache->count - 1; // We can flush previous frames.
|
|
|
|
|
}
|
|
|
|
|
if (cache->count_since_key_frame == cache->kmax) {
|
|
|
|
|
cache->flush_count = cache->count;
|
|
|
|
|
cache->count_since_key_frame = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update prev_canvas by simply copying from 'curr_canvas'.
|
|
|
|
|
CopyPixels(&cache->curr_canvas, prev_canvas);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 08:07:45 +02:00
|
|
|
|
DisposeFrame(info->dispose_method, &orig_rect, frame, prev_canvas);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
|
|
|
|
|
cache->is_first_frame = 0;
|
|
|
|
|
ok = 1;
|
|
|
|
|
|
|
|
|
|
End:
|
|
|
|
|
WebPPictureFree(&sub_image);
|
2013-11-18 03:04:07 +01:00
|
|
|
|
if (!ok) {
|
|
|
|
|
FrameRelease(encoded_frame);
|
|
|
|
|
--cache->count; // We reset the count, as the frame addition failed.
|
|
|
|
|
}
|
2014-06-11 23:26:47 +02:00
|
|
|
|
frame->error_code = error_code; // report error_code
|
|
|
|
|
assert(ok || error_code != VP8_ENC_OK);
|
2013-10-14 23:39:46 +02:00
|
|
|
|
return ok;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
|
|
|
|
|
WebPMux* const mux) {
|
|
|
|
|
while (cache->flush_count > 0) {
|
|
|
|
|
WebPMuxFrameInfo* info;
|
|
|
|
|
WebPMuxError err;
|
|
|
|
|
EncodedFrame* const curr = CacheGetFrame(cache, 0);
|
|
|
|
|
// Pick frame or full canvas.
|
|
|
|
|
if (cache->keyframe == 0) {
|
|
|
|
|
info = &curr->key_frame;
|
|
|
|
|
info->blend_method = WEBP_MUX_NO_BLEND;
|
|
|
|
|
cache->keyframe = KEYFRAME_NONE;
|
|
|
|
|
cache->best_delta = DELTA_INFINITY;
|
|
|
|
|
} else {
|
|
|
|
|
info = &curr->sub_frame;
|
|
|
|
|
info->blend_method = WEBP_MUX_BLEND;
|
|
|
|
|
}
|
|
|
|
|
// Add to mux.
|
|
|
|
|
err = WebPMuxPushFrame(mux, info, 1);
|
|
|
|
|
if (err != WEBP_MUX_OK) return err;
|
|
|
|
|
if (verbose) {
|
|
|
|
|
printf("Added frame. offset:%d,%d duration:%d dispose:%d blend:%d\n",
|
|
|
|
|
info->x_offset, info->y_offset, info->duration,
|
|
|
|
|
info->dispose_method, info->blend_method);
|
|
|
|
|
}
|
|
|
|
|
FrameRelease(curr);
|
|
|
|
|
++cache->start;
|
|
|
|
|
--cache->flush_count;
|
|
|
|
|
--cache->count;
|
|
|
|
|
if (cache->keyframe != KEYFRAME_NONE) --cache->keyframe;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cache->count == 0) CacheReset(cache);
|
|
|
|
|
return WEBP_MUX_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
|
|
|
|
|
WebPMux* const mux) {
|
|
|
|
|
cache->flush_count = cache->count; // Force flushing of all frames.
|
|
|
|
|
return WebPFrameCacheFlush(cache, verbose, mux);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|