mirror of
https://github.com/webmproject/libwebp.git
synced 2024-12-25 13:18:22 +01:00
Create a WebPAnimDecoder API.
This is designed for the simple use-case where one wants to decode all frames one-by-one in order. Also, use this API in anim_util library, which is in turn used by anim_diff tool. Change-Id: Ie8b653c04e867d40fd23321b3dd41b87689656c7
This commit is contained in:
parent
03fb75221c
commit
d39dc8f3cc
@ -33,6 +33,7 @@ dec_srcs := \
|
||||
src/dec/webp.c \
|
||||
|
||||
demux_srcs := \
|
||||
src/demux/anim_decode.c \
|
||||
src/demux/demux.c \
|
||||
|
||||
dsp_dec_srcs := \
|
||||
|
@ -181,6 +181,7 @@ DEC_OBJS = \
|
||||
$(DIROBJ)\dec\webp.obj \
|
||||
|
||||
DEMUX_OBJS = \
|
||||
$(DIROBJ)\demux\anim_decode.obj \
|
||||
$(DIROBJ)\demux\demux.obj \
|
||||
|
||||
DSP_DEC_OBJS = \
|
||||
|
4
NEWS
4
NEWS
@ -1,3 +1,7 @@
|
||||
- Next version:
|
||||
* New WebPAnimEncoder API for creating animations
|
||||
* New WebPAnimDecoder API for decoding animations
|
||||
|
||||
- 3/3/15: version 0.4.3
|
||||
This is a binary compatible release.
|
||||
* Android / gcc / iOS / MSVS build fixes and improvements
|
||||
|
@ -149,237 +149,74 @@ static bool IsWebP(const std::string& file_str) {
|
||||
file_str.length(), NULL, NULL) != 0;
|
||||
}
|
||||
|
||||
// Returns true if the current frame is a key-frame.
|
||||
static bool IsKeyFrameWebP(const WebPIterator& curr, const WebPIterator& prev,
|
||||
const DecodedFrame* const prev_frame,
|
||||
int canvas_width, int canvas_height) {
|
||||
if (prev_frame == NULL) {
|
||||
return true;
|
||||
} else if ((!curr.has_alpha || curr.blend_method == WEBP_MUX_NO_BLEND) &&
|
||||
IsFullFrame(curr.width, curr.height,
|
||||
canvas_width, canvas_height)) {
|
||||
return true;
|
||||
} else {
|
||||
return (prev.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) &&
|
||||
(IsFullFrame(prev.width, prev.height, canvas_width, canvas_height) ||
|
||||
prev_frame->is_key_frame);
|
||||
}
|
||||
}
|
||||
|
||||
// Blend a single channel of 'src' over 'dst', given their alpha channel values.
|
||||
static uint8_t BlendChannelWebP(uint32_t src, uint8_t src_a, uint32_t dst,
|
||||
uint8_t dst_a, uint32_t scale, int shift) {
|
||||
const uint8_t src_channel = (src >> shift) & 0xff;
|
||||
const uint8_t dst_channel = (dst >> shift) & 0xff;
|
||||
const uint32_t blend_unscaled = src_channel * src_a + dst_channel * dst_a;
|
||||
assert(blend_unscaled < (1ULL << 32) / scale);
|
||||
return (blend_unscaled * scale) >> 24;
|
||||
}
|
||||
|
||||
// Blend 'src' over 'dst' assuming they are NOT pre-multiplied by alpha.
|
||||
static uint32_t BlendPixelWebP(uint32_t src, uint32_t dst) {
|
||||
const uint8_t src_a = (src >> 24) & 0xff;
|
||||
|
||||
if (src_a == 0) {
|
||||
return dst;
|
||||
} else {
|
||||
const uint8_t dst_a = (dst >> 24) & 0xff;
|
||||
// This is the approximate integer arithmetic for the actual formula:
|
||||
// dst_factor_a = (dst_a * (255 - src_a)) / 255.
|
||||
const uint8_t dst_factor_a = (dst_a * (256 - src_a)) >> 8;
|
||||
assert(src_a + dst_factor_a < 256);
|
||||
const uint8_t blend_a = src_a + dst_factor_a;
|
||||
const uint32_t scale = (1UL << 24) / blend_a;
|
||||
|
||||
const uint8_t blend_r =
|
||||
BlendChannelWebP(src, src_a, dst, dst_factor_a, scale, 0);
|
||||
const uint8_t blend_g =
|
||||
BlendChannelWebP(src, src_a, dst, dst_factor_a, scale, 8);
|
||||
const uint8_t blend_b =
|
||||
BlendChannelWebP(src, src_a, dst, dst_factor_a, scale, 16);
|
||||
|
||||
return (blend_r << 0) | (blend_g << 8) | (blend_b << 16) | (blend_a << 24);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns two ranges (<left, width> pairs) at row 'canvas_y', that belong to
|
||||
// 'src' but not 'dst'. A point range is empty if the corresponding width is 0.
|
||||
static void FindBlendRangeAtRowWebP(const WebPIterator* const src,
|
||||
const WebPIterator* const dst, int canvas_y,
|
||||
int* const left1, int* const width1,
|
||||
int* const left2, int* const width2) {
|
||||
const int src_max_x = src->x_offset + src->width;
|
||||
const int dst_max_x = dst->x_offset + dst->width;
|
||||
const int dst_max_y = dst->y_offset + dst->height;
|
||||
assert(canvas_y >= src->y_offset && canvas_y < (src->y_offset + src->height));
|
||||
*left1 = -1;
|
||||
*width1 = 0;
|
||||
*left2 = -1;
|
||||
*width2 = 0;
|
||||
|
||||
if (canvas_y < dst->y_offset || canvas_y >= dst_max_y ||
|
||||
src->x_offset >= dst_max_x || src_max_x <= dst->x_offset) {
|
||||
*left1 = src->x_offset;
|
||||
*width1 = src->width;
|
||||
return;
|
||||
}
|
||||
|
||||
if (src->x_offset < dst->x_offset) {
|
||||
*left1 = src->x_offset;
|
||||
*width1 = dst->x_offset - src->x_offset;
|
||||
}
|
||||
|
||||
if (src_max_x > dst_max_x) {
|
||||
*left2 = dst_max_x;
|
||||
*width2 = src_max_x - dst_max_x;
|
||||
}
|
||||
}
|
||||
|
||||
// Blend 'num_pixels' in 'src' over 'dst'.
|
||||
static void BlendPixelRowWebP(uint32_t* const src, const uint32_t* const dst,
|
||||
int num_pixels) {
|
||||
for (int i = 0; i < num_pixels; ++i) {
|
||||
uint32_t* const src_pixel_ptr = &src[i];
|
||||
const uint8_t src_alpha = (*src_pixel_ptr >> 24) & 0xff;
|
||||
if (src_alpha != 0xff) {
|
||||
const uint32_t dst_pixel = dst[i];
|
||||
*src_pixel_ptr = BlendPixelWebP(*src_pixel_ptr, dst_pixel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read animated WebP bitstream 'file_str' into 'AnimatedImage' struct.
|
||||
static bool ReadAnimatedWebP(const char filename[], const std::string& file_str,
|
||||
AnimatedImage* const image, bool dump_frames,
|
||||
const char dump_folder[]) {
|
||||
bool ok = true;
|
||||
bool ok = false;
|
||||
bool dump_ok = true;
|
||||
uint32_t frame_index = 0;
|
||||
int prev_frame_timestamp = 0;
|
||||
|
||||
const WebPData webp_data = {
|
||||
reinterpret_cast<const uint8_t*>(file_str.data()), file_str.size()
|
||||
};
|
||||
WebPDemuxer* const demux = WebPDemux(&webp_data);
|
||||
if (demux == NULL) return false;
|
||||
|
||||
WebPAnimDecoder* dec = WebPAnimDecoderNew(&webp_data);
|
||||
if (dec == NULL) {
|
||||
fprintf(stderr, "Error parsing image: %s\n", filename);
|
||||
goto End;
|
||||
}
|
||||
|
||||
WebPAnimInfo anim_info;
|
||||
if (!WebPAnimDecoderGetInfo(dec, &anim_info)) {
|
||||
fprintf(stderr, "Error getting global info about the animation\n");
|
||||
goto End;
|
||||
}
|
||||
|
||||
// Animation properties.
|
||||
image->canvas_width = WebPDemuxGetI(demux, WEBP_FF_CANVAS_WIDTH);
|
||||
image->canvas_height = WebPDemuxGetI(demux, WEBP_FF_CANVAS_HEIGHT);
|
||||
image->loop_count = WebPDemuxGetI(demux, WEBP_FF_LOOP_COUNT);
|
||||
image->bgcolor = WebPDemuxGetI(demux, WEBP_FF_BACKGROUND_COLOR);
|
||||
|
||||
const uint32_t frame_count = WebPDemuxGetI(demux, WEBP_FF_FRAME_COUNT);
|
||||
const uint32_t canvas_width = image->canvas_width;
|
||||
const uint32_t canvas_height = image->canvas_height;
|
||||
image->canvas_width = anim_info.canvas_width;
|
||||
image->canvas_height = anim_info.canvas_height;
|
||||
image->loop_count = anim_info.loop_count;
|
||||
image->bgcolor = anim_info.bgcolor;
|
||||
|
||||
// Allocate frames.
|
||||
AllocateFrames(image, frame_count);
|
||||
AllocateFrames(image, anim_info.frame_count);
|
||||
|
||||
// Decode and reconstruct frames.
|
||||
WebPIterator prev_iter = WebPIterator();
|
||||
WebPIterator curr_iter = WebPIterator();
|
||||
|
||||
for (uint32_t i = 0; i < frame_count; ++i) {
|
||||
prev_iter = curr_iter;
|
||||
|
||||
// Get frame.
|
||||
if (!WebPDemuxGetFrame(demux, i + 1, &curr_iter)) {
|
||||
fprintf(stderr, "Error retrieving frame #%u\n", i);
|
||||
return false;
|
||||
// Decode frames.
|
||||
while (WebPAnimDecoderHasMoreFrames(dec)) {
|
||||
uint8_t* frame_rgba;
|
||||
int timestamp;
|
||||
if (!WebPAnimDecoderGetNext(dec, &frame_rgba, ×tamp)) {
|
||||
fprintf(stderr, "Error decoding frame #%u\n", frame_index);
|
||||
goto End;
|
||||
}
|
||||
|
||||
DecodedFrame* const prev_frame = (i > 0) ? &image->frames[i - 1] : NULL;
|
||||
uint8_t* const prev_rgba =
|
||||
(prev_frame != NULL) ? prev_frame->rgba.data() : NULL;
|
||||
DecodedFrame* const curr_frame = &image->frames[i];
|
||||
DecodedFrame* const curr_frame = &image->frames[frame_index];
|
||||
uint8_t* const curr_rgba = curr_frame->rgba.data();
|
||||
|
||||
curr_frame->duration = curr_iter.duration;
|
||||
curr_frame->is_key_frame = IsKeyFrameWebP(curr_iter, prev_iter, prev_frame,
|
||||
canvas_width, canvas_height);
|
||||
|
||||
// TODO(urvang): The logic of decoding and reconstructing the next animated
|
||||
// frame given the previous one should be a single library call (ideally a
|
||||
// user-facing API), which takes care of frame disposal, blending etc.
|
||||
|
||||
// Initialize.
|
||||
if (curr_frame->is_key_frame) {
|
||||
ZeroFillCanvas(curr_rgba, canvas_width, canvas_height);
|
||||
} else {
|
||||
CopyCanvas(prev_rgba, curr_rgba, canvas_width, canvas_height);
|
||||
if (prev_iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
|
||||
ZeroFillFrameRect(curr_rgba, canvas_width * kNumChannels,
|
||||
prev_iter.x_offset, prev_iter.y_offset,
|
||||
prev_iter.width, prev_iter.height);
|
||||
}
|
||||
}
|
||||
|
||||
// Decode.
|
||||
const uint8_t* input = curr_iter.fragment.bytes;
|
||||
const size_t input_size = curr_iter.fragment.size;
|
||||
const size_t output_offset =
|
||||
(curr_iter.y_offset * canvas_width + curr_iter.x_offset) * kNumChannels;
|
||||
uint8_t* output = curr_rgba + output_offset;
|
||||
const int output_stride = kNumChannels * canvas_width;
|
||||
const size_t output_size = output_stride * curr_iter.height;
|
||||
|
||||
if (WebPDecodeRGBAInto(input, input_size, output, output_size,
|
||||
output_stride) == NULL) {
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// During the decoding of current frame, we may have set some pixels to be
|
||||
// transparent (i.e. alpha < 255). However, the value of each of these
|
||||
// pixels should have been determined by blending it against the value of
|
||||
// that pixel in the previous frame if blending method of is WEBP_MUX_BLEND.
|
||||
if (i > 0 && curr_iter.blend_method == WEBP_MUX_BLEND &&
|
||||
!curr_frame->is_key_frame) {
|
||||
if (prev_iter.dispose_method == WEBP_MUX_DISPOSE_NONE) {
|
||||
// Blend transparent pixels with pixels in previous canvas.
|
||||
for (int y = 0; y < curr_iter.height; ++y) {
|
||||
const size_t offset =
|
||||
(curr_iter.y_offset + y) * canvas_width + curr_iter.x_offset;
|
||||
BlendPixelRowWebP(reinterpret_cast<uint32_t*>(curr_rgba) + offset,
|
||||
reinterpret_cast<uint32_t*>(prev_rgba) + offset,
|
||||
curr_iter.width);
|
||||
}
|
||||
} else {
|
||||
assert(prev_iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND);
|
||||
// We need to blend a transparent pixel with its value just after
|
||||
// initialization. That is, blend it with:
|
||||
// * Fully transparent pixel if it belongs to prevRect <-- No-op.
|
||||
// * The pixel in the previous canvas otherwise <-- Need alpha-blending.
|
||||
for (int y = 0; y < curr_iter.height; ++y) {
|
||||
const int canvas_y = curr_iter.y_offset + y;
|
||||
int left1, width1, left2, width2;
|
||||
FindBlendRangeAtRowWebP(&curr_iter, &prev_iter, canvas_y, &left1,
|
||||
&width1, &left2, &width2);
|
||||
if (width1 > 0) {
|
||||
const size_t offset1 = canvas_y * canvas_width + left1;
|
||||
BlendPixelRowWebP(reinterpret_cast<uint32_t*>(curr_rgba) + offset1,
|
||||
reinterpret_cast<uint32_t*>(prev_rgba) + offset1,
|
||||
width1);
|
||||
}
|
||||
if (width2 > 0) {
|
||||
const size_t offset2 = canvas_y * canvas_width + left2;
|
||||
BlendPixelRowWebP(reinterpret_cast<uint32_t*>(curr_rgba) + offset2,
|
||||
reinterpret_cast<uint32_t*>(prev_rgba) + offset2,
|
||||
width2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
curr_frame->duration = timestamp - prev_frame_timestamp;
|
||||
curr_frame->is_key_frame = false; // Unused.
|
||||
memcpy(curr_rgba, frame_rgba,
|
||||
image->canvas_width * kNumChannels * image->canvas_height);
|
||||
|
||||
// Needed only because we may want to compare with GIF later.
|
||||
CleanupTransparentPixels(reinterpret_cast<uint32_t*>(curr_rgba),
|
||||
canvas_width, canvas_height);
|
||||
image->canvas_width, image->canvas_height);
|
||||
|
||||
if (dump_frames) {
|
||||
ok = ok && DumpFrame(filename, dump_folder, i, curr_rgba,
|
||||
canvas_width, canvas_height);
|
||||
if (dump_frames && dump_ok) {
|
||||
dump_ok = DumpFrame(filename, dump_folder, frame_index, curr_rgba,
|
||||
image->canvas_width, image->canvas_height);
|
||||
if (!dump_ok) { // Print error once, but continue decode loop.
|
||||
fprintf(stderr, "Error dumping frames to %s\n", dump_folder);
|
||||
}
|
||||
}
|
||||
|
||||
++frame_index;
|
||||
prev_frame_timestamp = timestamp;
|
||||
}
|
||||
WebPDemuxReleaseIterator(&prev_iter);
|
||||
WebPDemuxReleaseIterator(&curr_iter);
|
||||
WebPDemuxDelete(demux);
|
||||
ok = dump_ok;
|
||||
|
||||
End:
|
||||
WebPAnimDecoderDelete(dec);
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
@ -123,6 +123,7 @@ DEC_OBJS = \
|
||||
src/dec/webp.o \
|
||||
|
||||
DEMUX_OBJS = \
|
||||
src/demux/anim_decode.o \
|
||||
src/demux/demux.o \
|
||||
|
||||
DSP_DEC_OBJS = \
|
||||
|
@ -1,7 +1,7 @@
|
||||
lib_LTLIBRARIES = libwebpdemux.la
|
||||
|
||||
libwebpdemux_la_SOURCES =
|
||||
libwebpdemux_la_SOURCES += demux.c
|
||||
libwebpdemux_la_SOURCES += anim_decode.c demux.c
|
||||
|
||||
libwebpdemuxinclude_HEADERS =
|
||||
libwebpdemuxinclude_HEADERS += ../webp/demux.h
|
||||
|
351
src/demux/anim_decode.c
Normal file
351
src/demux/anim_decode.c
Normal file
@ -0,0 +1,351 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the COPYING file in the root of the source
|
||||
// tree. An additional intellectual property rights grant can be found
|
||||
// in the file PATENTS. All contributing project authors may
|
||||
// be found in the AUTHORS file in the root of the source tree.
|
||||
// -----------------------------------------------------------------------------
|
||||
//
|
||||
// AnimDecoder implementation.
|
||||
//
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "../webp/config.h"
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "../utils/utils.h"
|
||||
#include "../webp/decode.h"
|
||||
#include "../webp/demux.h"
|
||||
|
||||
#define NUM_CHANNELS 4
|
||||
|
||||
struct WebPAnimDecoder {
|
||||
WebPDemuxer* demux_; // Demuxer created from given WebP bitstream.
|
||||
WebPAnimInfo info_; // Global info about the animation.
|
||||
uint8_t* curr_frame_; // Current canvas (not disposed).
|
||||
uint8_t* prev_frame_disposed_; // Previous canvas (properly disposed).
|
||||
int prev_frame_timestamp_; // Previous frame timestamp (milliseconds).
|
||||
WebPIterator prev_iter_; // Iterator object for previous frame.
|
||||
int prev_frame_was_keyframe_; // True if previous frame was a keyframe.
|
||||
int next_frame_; // Index of the next frame to be decoded
|
||||
// (starting from 1).
|
||||
};
|
||||
|
||||
WebPAnimDecoder* WebPAnimDecoderNewInternal(const WebPData* webp_data,
|
||||
int abi_version) {
|
||||
WebPAnimDecoder* dec = NULL;
|
||||
if (webp_data == NULL ||
|
||||
WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Note: calloc() so that the pointer members are initialized to NULL.
|
||||
dec = (WebPAnimDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
|
||||
if (dec == NULL) goto Error;
|
||||
|
||||
dec->demux_ = WebPDemux(webp_data);
|
||||
if (dec->demux_ == NULL) goto Error;
|
||||
|
||||
dec->info_.canvas_width = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_WIDTH);
|
||||
dec->info_.canvas_height = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_HEIGHT);
|
||||
dec->info_.loop_count = WebPDemuxGetI(dec->demux_, WEBP_FF_LOOP_COUNT);
|
||||
dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR);
|
||||
dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT);
|
||||
|
||||
{
|
||||
const int canvas_bytes =
|
||||
dec->info_.canvas_width * NUM_CHANNELS * dec->info_.canvas_height;
|
||||
// Note: calloc() because we fill frame with zeroes as well.
|
||||
dec->curr_frame_ = WebPSafeCalloc(1ULL, canvas_bytes);
|
||||
if (dec->curr_frame_ == NULL) goto Error;
|
||||
dec->prev_frame_disposed_ = WebPSafeCalloc(1ULL, canvas_bytes);
|
||||
if (dec->prev_frame_disposed_ == NULL) goto Error;
|
||||
}
|
||||
|
||||
WebPAnimDecoderReset(dec);
|
||||
|
||||
return dec;
|
||||
|
||||
Error:
|
||||
WebPAnimDecoderDelete(dec);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) {
|
||||
if (dec == NULL || info == NULL) return 0;
|
||||
*info = dec->info_;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Returns true if the frame covers the full canvas.
|
||||
static int IsFullFrame(int width, int height, int canvas_width,
|
||||
int canvas_height) {
|
||||
return (width == canvas_width && height == canvas_height);
|
||||
}
|
||||
|
||||
// Clear the canvas to transparent.
|
||||
static void ZeroFillCanvas(uint8_t* rgba, uint32_t canvas_width,
|
||||
uint32_t canvas_height) {
|
||||
memset(rgba, 0, canvas_width * NUM_CHANNELS * canvas_height);
|
||||
}
|
||||
|
||||
// Clear given frame rectangle to transparent.
|
||||
static void ZeroFillFrameRect(uint8_t* rgba, int rgba_stride, int x_offset,
|
||||
int y_offset, int width, int height) {
|
||||
int j;
|
||||
assert(width * NUM_CHANNELS <= rgba_stride);
|
||||
rgba += y_offset * rgba_stride + x_offset * NUM_CHANNELS;
|
||||
for (j = 0; j < height; ++j) {
|
||||
memset(rgba, 0, width * NUM_CHANNELS);
|
||||
rgba += rgba_stride;
|
||||
}
|
||||
}
|
||||
|
||||
// Copy width * height pixels from 'src' to 'dst'.
|
||||
static void CopyCanvas(const uint8_t* src, uint8_t* dst,
|
||||
uint32_t width, uint32_t height) {
|
||||
assert(src != NULL && dst != NULL);
|
||||
memcpy(dst, src, width * NUM_CHANNELS * height);
|
||||
}
|
||||
|
||||
// Returns true if the current frame is a key-frame.
|
||||
static int IsKeyFrame(const WebPIterator* const curr,
|
||||
const WebPIterator* const prev,
|
||||
int prev_frame_was_key_frame,
|
||||
int canvas_width, int canvas_height) {
|
||||
if (curr->frame_num == 1) {
|
||||
return 1;
|
||||
} else if ((!curr->has_alpha || curr->blend_method == WEBP_MUX_NO_BLEND) &&
|
||||
IsFullFrame(curr->width, curr->height,
|
||||
canvas_width, canvas_height)) {
|
||||
return 1;
|
||||
} else {
|
||||
return (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) &&
|
||||
(IsFullFrame(prev->width, prev->height, canvas_width,
|
||||
canvas_height) ||
|
||||
prev_frame_was_key_frame);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Blend a single channel of 'src' over 'dst', given their alpha channel values.
|
||||
static uint8_t BlendChannel(uint32_t src, uint8_t src_a, uint32_t dst,
|
||||
uint8_t dst_a, uint32_t scale, int shift) {
|
||||
const uint8_t src_channel = (src >> shift) & 0xff;
|
||||
const uint8_t dst_channel = (dst >> shift) & 0xff;
|
||||
const uint32_t blend_unscaled = src_channel * src_a + dst_channel * dst_a;
|
||||
assert(blend_unscaled < (1ULL << 32) / scale);
|
||||
return (blend_unscaled * scale) >> 24;
|
||||
}
|
||||
|
||||
// Blend 'src' over 'dst' assuming they are NOT pre-multiplied by alpha.
|
||||
static uint32_t BlendPixel(uint32_t src, uint32_t dst) {
|
||||
const uint8_t src_a = (src >> 24) & 0xff;
|
||||
|
||||
if (src_a == 0) {
|
||||
return dst;
|
||||
} else {
|
||||
const uint8_t dst_a = (dst >> 24) & 0xff;
|
||||
// This is the approximate integer arithmetic for the actual formula:
|
||||
// dst_factor_a = (dst_a * (255 - src_a)) / 255.
|
||||
const uint8_t dst_factor_a = (dst_a * (256 - src_a)) >> 8;
|
||||
const uint8_t blend_a = src_a + dst_factor_a;
|
||||
const uint32_t scale = (1UL << 24) / blend_a;
|
||||
|
||||
const uint8_t blend_r =
|
||||
BlendChannel(src, src_a, dst, dst_factor_a, scale, 0);
|
||||
const uint8_t blend_g =
|
||||
BlendChannel(src, src_a, dst, dst_factor_a, scale, 8);
|
||||
const uint8_t blend_b =
|
||||
BlendChannel(src, src_a, dst, dst_factor_a, scale, 16);
|
||||
assert(src_a + dst_factor_a < 256);
|
||||
|
||||
return (blend_r << 0) |
|
||||
(blend_g << 8) |
|
||||
(blend_b << 16) |
|
||||
((uint32_t)blend_a << 24);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns two ranges (<left, width> pairs) at row 'canvas_y', that belong to
|
||||
// 'src' but not 'dst'. A point range is empty if the corresponding width is 0.
|
||||
static void FindBlendRangeAtRow(const WebPIterator* const src,
|
||||
const WebPIterator* const dst, int canvas_y,
|
||||
int* const left1, int* const width1,
|
||||
int* const left2, int* const width2) {
|
||||
const int src_max_x = src->x_offset + src->width;
|
||||
const int dst_max_x = dst->x_offset + dst->width;
|
||||
const int dst_max_y = dst->y_offset + dst->height;
|
||||
assert(canvas_y >= src->y_offset && canvas_y < (src->y_offset + src->height));
|
||||
*left1 = -1;
|
||||
*width1 = 0;
|
||||
*left2 = -1;
|
||||
*width2 = 0;
|
||||
|
||||
if (canvas_y < dst->y_offset || canvas_y >= dst_max_y ||
|
||||
src->x_offset >= dst_max_x || src_max_x <= dst->x_offset) {
|
||||
*left1 = src->x_offset;
|
||||
*width1 = src->width;
|
||||
return;
|
||||
}
|
||||
|
||||
if (src->x_offset < dst->x_offset) {
|
||||
*left1 = src->x_offset;
|
||||
*width1 = dst->x_offset - src->x_offset;
|
||||
}
|
||||
|
||||
if (src_max_x > dst_max_x) {
|
||||
*left2 = dst_max_x;
|
||||
*width2 = src_max_x - dst_max_x;
|
||||
}
|
||||
}
|
||||
|
||||
// Blend 'num_pixels' in 'src' over 'dst'.
|
||||
static void BlendPixelRow(uint32_t* const src, const uint32_t* const dst,
|
||||
int num_pixels) {
|
||||
int i;
|
||||
for (i = 0; i < num_pixels; ++i) {
|
||||
uint32_t* const src_pixel_ptr = &src[i];
|
||||
const uint8_t src_alpha = (*src_pixel_ptr >> 24) & 0xff;
|
||||
if (src_alpha != 0xff) {
|
||||
const uint32_t dst_pixel = dst[i];
|
||||
*src_pixel_ptr = BlendPixel(*src_pixel_ptr, dst_pixel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
|
||||
uint8_t** rgba_ptr, int* timestamp_ptr) {
|
||||
WebPIterator iter;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
int is_key_frame;
|
||||
int timestamp;
|
||||
|
||||
if (dec == NULL || rgba_ptr == NULL || timestamp_ptr == NULL) return 0;
|
||||
if (!WebPAnimDecoderHasMoreFrames(dec)) return 0;
|
||||
|
||||
width = dec->info_.canvas_width;
|
||||
height = dec->info_.canvas_height;
|
||||
|
||||
// Get compressed frame.
|
||||
if (!WebPDemuxGetFrame(dec->demux_, dec->next_frame_, &iter)) {
|
||||
return 0;
|
||||
}
|
||||
timestamp = dec->prev_frame_timestamp_ + iter.duration;
|
||||
|
||||
// Initialize.
|
||||
is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_,
|
||||
dec->prev_frame_was_keyframe_, width, height);
|
||||
if (is_key_frame) {
|
||||
ZeroFillCanvas(dec->curr_frame_, width, height);
|
||||
} else {
|
||||
CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_, width, height);
|
||||
}
|
||||
|
||||
// Decode.
|
||||
{
|
||||
const uint8_t* input = iter.fragment.bytes;
|
||||
const size_t input_size = iter.fragment.size;
|
||||
const size_t output_offset =
|
||||
(iter.y_offset * width + iter.x_offset) * NUM_CHANNELS;
|
||||
uint8_t* output = dec->curr_frame_ + output_offset;
|
||||
const int output_stride = NUM_CHANNELS * width;
|
||||
const size_t output_size = output_stride * iter.height;
|
||||
|
||||
if (WebPDecodeRGBAInto(input, input_size, output, output_size,
|
||||
output_stride) == NULL) {
|
||||
goto Error;
|
||||
}
|
||||
}
|
||||
|
||||
// During the decoding of current frame, we may have set some pixels to be
|
||||
// transparent (i.e. alpha < 255). However, the value of each of these
|
||||
// pixels should have been determined by blending it against the value of
|
||||
// that pixel in the previous frame if blending method of is WEBP_MUX_BLEND.
|
||||
if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND &&
|
||||
!is_key_frame) {
|
||||
if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_NONE) {
|
||||
int y;
|
||||
// Blend transparent pixels with pixels in previous canvas.
|
||||
for (y = 0; y < iter.height; ++y) {
|
||||
const size_t offset =
|
||||
(iter.y_offset + y) * width + iter.x_offset;
|
||||
BlendPixelRow((uint32_t*)dec->curr_frame_ + offset,
|
||||
(uint32_t*)dec->prev_frame_disposed_ + offset,
|
||||
iter.width);
|
||||
}
|
||||
} else {
|
||||
int y;
|
||||
assert(dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND);
|
||||
// We need to blend a transparent pixel with its value just after
|
||||
// initialization. That is, blend it with:
|
||||
// * Fully transparent pixel if it belongs to prevRect <-- No-op.
|
||||
// * The pixel in the previous canvas otherwise <-- Need alpha-blending.
|
||||
for (y = 0; y < iter.height; ++y) {
|
||||
const int canvas_y = iter.y_offset + y;
|
||||
int left1, width1, left2, width2;
|
||||
FindBlendRangeAtRow(&iter, &dec->prev_iter_, canvas_y, &left1, &width1,
|
||||
&left2, &width2);
|
||||
if (width1 > 0) {
|
||||
const size_t offset1 = canvas_y * width + left1;
|
||||
BlendPixelRow((uint32_t*)dec->curr_frame_ + offset1,
|
||||
(uint32_t*)dec->prev_frame_disposed_ + offset1, width1);
|
||||
}
|
||||
if (width2 > 0) {
|
||||
const size_t offset2 = canvas_y * width + left2;
|
||||
BlendPixelRow((uint32_t*)dec->curr_frame_ + offset2,
|
||||
(uint32_t*)dec->prev_frame_disposed_ + offset2, width2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update info of the previous frame and dispose it for the next iteration.
|
||||
dec->prev_frame_timestamp_ = timestamp;
|
||||
dec->prev_iter_ = iter;
|
||||
dec->prev_frame_was_keyframe_ = is_key_frame;
|
||||
CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height);
|
||||
if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
|
||||
ZeroFillFrameRect(dec->prev_frame_disposed_, width * NUM_CHANNELS,
|
||||
dec->prev_iter_.x_offset, dec->prev_iter_.y_offset,
|
||||
dec->prev_iter_.width, dec->prev_iter_.height);
|
||||
}
|
||||
++dec->next_frame_;
|
||||
|
||||
// All OK, fill in the values.
|
||||
*rgba_ptr = dec->curr_frame_;
|
||||
*timestamp_ptr = timestamp;
|
||||
return 1;
|
||||
|
||||
Error:
|
||||
WebPDemuxReleaseIterator(&iter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) {
|
||||
if (dec == NULL) return 0;
|
||||
return (dec->next_frame_ <= (int)dec->info_.frame_count);
|
||||
}
|
||||
|
||||
void WebPAnimDecoderReset(WebPAnimDecoder* dec) {
|
||||
if (dec != NULL) {
|
||||
dec->prev_frame_timestamp_ = 0;
|
||||
memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_));
|
||||
dec->prev_frame_was_keyframe_ = 0;
|
||||
dec->next_frame_ = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void WebPAnimDecoderDelete(WebPAnimDecoder* dec) {
|
||||
if (dec != NULL) {
|
||||
WebPDemuxDelete(dec->demux_);
|
||||
WebPSafeFree(dec->curr_frame_);
|
||||
WebPSafeFree(dec->prev_frame_disposed_);
|
||||
WebPSafeFree(dec);
|
||||
}
|
||||
}
|
@ -54,7 +54,7 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define WEBP_DEMUX_ABI_VERSION 0x0101 // MAJOR(8b) + MINOR(8b)
|
||||
#define WEBP_DEMUX_ABI_VERSION 0x0102 // MAJOR(8b) + MINOR(8b)
|
||||
|
||||
// Note: forward declaring enumerations is not allowed in (strict) C and C++,
|
||||
// the types are left here for reference.
|
||||
@ -63,6 +63,7 @@ extern "C" {
|
||||
typedef struct WebPDemuxer WebPDemuxer;
|
||||
typedef struct WebPIterator WebPIterator;
|
||||
typedef struct WebPChunkIterator WebPChunkIterator;
|
||||
typedef struct WebPAnimInfo WebPAnimInfo;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
@ -216,6 +217,98 @@ WEBP_EXTERN(int) WebPDemuxPrevChunk(WebPChunkIterator* iter);
|
||||
WEBP_EXTERN(void) WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// WebPAnimDecoder API
|
||||
//
|
||||
// This API allows decoding (possibly) animated WebP images.
|
||||
//
|
||||
// Code Example:
|
||||
/*
|
||||
WebPAnimDecoder* dec = WebPAnimDecoderNew(webp_data);
|
||||
WebPAnimInfo anim_info;
|
||||
WebPAnimDecoderGetInfo(dec, &anim_info);
|
||||
for (uint32_t i = 0; i < anim_info.loop_count; ++i) {
|
||||
while (WebPAnimDecoderHasMoreFrames(dec)) {
|
||||
uint8_t* frame_rgba;
|
||||
int timestamp;
|
||||
WebPAnimDecoderGetNext(dec, &frame_rgba, ×tamp);
|
||||
// ... (Render 'frame_rgba' based on 'timestamp').
|
||||
}
|
||||
WebPAnimDecoderReset(dec);
|
||||
}
|
||||
WebPAnimDecoderDelete(dec);
|
||||
*/
|
||||
|
||||
typedef struct WebPAnimDecoder WebPAnimDecoder; // Main opaque object.
|
||||
|
||||
// Internal, version-checked, entry point.
|
||||
WEBP_EXTERN(WebPAnimDecoder*) WebPAnimDecoderNewInternal(const WebPData*, int);
|
||||
|
||||
// Creates and initializes a WebPAnimDecoder object.
|
||||
// Parameters:
|
||||
// webp_data - (in) WebP bitstream. This should remain unchanged during the
|
||||
// lifetime of the output WebPAnimDecoder object.
|
||||
// Returns:
|
||||
// A pointer to the newly created WebPAnimDecoder object, or NULL in case of
|
||||
// parsing/memory error.
|
||||
static WEBP_INLINE WebPAnimDecoder* WebPAnimDecoderNew(
|
||||
const WebPData* webp_data) {
|
||||
return WebPAnimDecoderNewInternal(webp_data, WEBP_DEMUX_ABI_VERSION);
|
||||
}
|
||||
|
||||
// Global information about the animation..
|
||||
struct WebPAnimInfo {
|
||||
uint32_t canvas_width;
|
||||
uint32_t canvas_height;
|
||||
uint32_t loop_count;
|
||||
uint32_t bgcolor;
|
||||
uint32_t frame_count;
|
||||
uint32_t pad[4]; // padding for later use
|
||||
};
|
||||
|
||||
// Get global information about the animation.
|
||||
// Parameters:
|
||||
// dec - (in) decoder instance to get information from.
|
||||
// info - (out) global information fetched from the animation.
|
||||
// Returns:
|
||||
// True on success.
|
||||
WEBP_EXTERN(int) WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec,
|
||||
WebPAnimInfo* info);
|
||||
|
||||
// Fetch the next frame from 'dec' in RGBA format. This will be a fully
|
||||
// reconstructed canvas of size 'canvas_width * 4 * canvas_height', and not just
|
||||
// the frame sub-rectangle.
|
||||
// The returned 'rgba' buffer is valid only until the next call to
|
||||
// WebPAnimDecoderGetNext(), WebPAnimDecoderReset() or WebPAnimDecoderDelete().
|
||||
// Parameters:
|
||||
// dec - (in/out) decoder instance from which the next frame is to be fetched.
|
||||
// rgba - (out) decoded frame in RGBA format.
|
||||
// timestamp - (out) timestamp of the frame in milliseconds.
|
||||
// Returns:
|
||||
// False if any of the arguments are NULL, or if there is a parsing or
|
||||
// decoding error, or if there are no more frames. Otherwise, returns true.
|
||||
WEBP_EXTERN(int) WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
|
||||
uint8_t** rgba, int* timestamp);
|
||||
|
||||
// Check if there are more frames left to decode.
|
||||
// Parameters:
|
||||
// dec - (in) decoder instance to be checked.
|
||||
// Returns:
|
||||
// True if 'dec' is not NULL and some frames are yet to be decoded.
|
||||
// Otherwise, returns false.
|
||||
WEBP_EXTERN(int) WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec);
|
||||
|
||||
// Resets the WebPAnimDecoder object, so that next call to
|
||||
// WebPAnimDecoderGetNext() will restart decoding from 1st frame. This would be
|
||||
// helpful when all frames need to be decoded multiple times (e.g.
|
||||
// info.loop_count times) without destroying and recreating the 'dec' object.
|
||||
// Parameters:
|
||||
// dec - (in/out) decoder instance to be reset
|
||||
WEBP_EXTERN(void) WebPAnimDecoderReset(WebPAnimDecoder* dec);
|
||||
|
||||
// Deletes the WebPAnimDecoder object.
|
||||
// Parameters:
|
||||
// dec - (in/out) decoder instance to be deleted
|
||||
WEBP_EXTERN(void) WebPAnimDecoderDelete(WebPAnimDecoder* dec);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
Loading…
Reference in New Issue
Block a user