mirror of
https://github.com/webmproject/libwebp.git
synced 2024-12-27 22:28:22 +01:00
gif2webp: Insert independent frames at regular intervals.
We use the 'do not blend' option for creating independent frames. We also mark the already independent frames as 'do not blend'. This bounds the maximum number of frames that need to be decoded to decode a given frame, thus leading to a much better decoding performance. Change-Id: I7cef98af2b53751ec36993fd2bd54f7f4c4aad2b
This commit is contained in:
parent
ede3602e5d
commit
f0d6a14b64
@ -36,11 +36,60 @@
|
|||||||
|
|
||||||
static int transparent_index = -1; // No transparency by default.
|
static int transparent_index = -1; // No transparency by default.
|
||||||
|
|
||||||
static void ClearPicture(WebPPicture* const picture) {
|
static void ClearRectangle(WebPPicture* const picture,
|
||||||
int x, y;
|
int left, int top, int width, int height) {
|
||||||
for (y = 0; y < picture->height; ++y) {
|
int j;
|
||||||
uint32_t* const dst = picture->argb + y * picture->argb_stride;
|
for (j = top; j < top + height; ++j) {
|
||||||
for (x = 0; x < picture->width; ++x) dst[x] = TRANSPARENT_COLOR;
|
uint32_t* const dst = picture->argb + j * picture->argb_stride;
|
||||||
|
int i;
|
||||||
|
for (i = left; i < left + width; ++i) {
|
||||||
|
dst[i] = TRANSPARENT_COLOR;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef struct WebPFrameRect {
|
||||||
|
int x_offset, y_offset, width, height;
|
||||||
|
} WebPFrameRect;
|
||||||
|
|
||||||
|
// Clear pixels in 'picture' within given 'rect' to transparent color.
|
||||||
|
static void ClearPicture(WebPPicture* const picture,
|
||||||
|
const WebPFrameRect* const rect) {
|
||||||
|
if (rect != NULL) {
|
||||||
|
ClearRectangle(picture, rect->x_offset, rect->y_offset,
|
||||||
|
rect->width, rect->height);
|
||||||
|
} else {
|
||||||
|
ClearRectangle(picture, 0, 0, picture->width, picture->height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Also used in picture.c. Move to a common location.
|
||||||
|
static void CopyPlane(const uint8_t* src, int src_stride,
|
||||||
|
uint8_t* dst, int dst_stride, int width, int height) {
|
||||||
|
while (height-- > 0) {
|
||||||
|
memcpy(dst, src, width);
|
||||||
|
src += src_stride;
|
||||||
|
dst += dst_stride;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given 'curr_frame' and its 'rect', blend it in the 'canvas'.
|
||||||
|
static void BlendCurrentFrameInCanvas(const WebPPicture* const curr_frame,
|
||||||
|
const WebPFrameRect* const rect,
|
||||||
|
WebPPicture* const canvas) {
|
||||||
|
int j;
|
||||||
|
assert(canvas->width == curr_frame->width &&
|
||||||
|
canvas->height == curr_frame->height);
|
||||||
|
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
|
||||||
|
int i;
|
||||||
|
for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {
|
||||||
|
const uint32_t src =
|
||||||
|
curr_frame->argb[j * curr_frame->argb_stride + i];
|
||||||
|
const int src_alpha = src >> 24;
|
||||||
|
if (src_alpha != 0) {
|
||||||
|
canvas->argb[j * canvas->argb_stride + i] = src;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,25 +109,104 @@ static void Remap(const uint8_t* const src, const GifFileType* const gif,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ReadSubImage(GifFileType* gif, WebPPicture* pic, WebPPicture* view) {
|
// Returns true if 'curr' frame is a key frame, that is, it can be decoded
|
||||||
|
// independently of 'prev' canvas.
|
||||||
|
static int IsKeyFrame(const WebPPicture* const curr,
|
||||||
|
const WebPFrameRect* const curr_rect,
|
||||||
|
const WebPPicture* const prev) {
|
||||||
|
int i, j;
|
||||||
|
int is_key_frame = 1;
|
||||||
|
|
||||||
|
// If previous canvas (with previous frame disposed) is all transparent,
|
||||||
|
// current frame is a key frame.
|
||||||
|
for (i = 0; i < prev->width; ++i) {
|
||||||
|
for (j = 0; j < prev->height; ++j) {
|
||||||
|
const uint32_t prev_alpha = (prev->argb[j * prev->argb_stride + i]) >> 24;
|
||||||
|
if (prev_alpha != 0) {
|
||||||
|
is_key_frame = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!is_key_frame) break;
|
||||||
|
}
|
||||||
|
if (is_key_frame) return 1;
|
||||||
|
|
||||||
|
// If current frame covers the whole canvas and does not contain any
|
||||||
|
// transparent pixels that depend on previous canvas, then current frame is
|
||||||
|
// a key frame.
|
||||||
|
if (curr_rect->width == curr->width && curr_rect->height == curr->height) {
|
||||||
|
assert(curr_rect->x_offset == 0 && curr_rect->y_offset == 0);
|
||||||
|
is_key_frame = 1;
|
||||||
|
for (j = 0; j < prev->height; ++j) {
|
||||||
|
for (i = 0; i < prev->width; ++i) {
|
||||||
|
const uint32_t prev_alpha =
|
||||||
|
(prev->argb[j * prev->argb_stride + i]) >> 24;
|
||||||
|
const uint32_t curr_alpha =
|
||||||
|
(curr->argb[j * curr->argb_stride + i]) >> 24;
|
||||||
|
if (curr_alpha != 0xff && prev_alpha != 0) {
|
||||||
|
is_key_frame = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!is_key_frame) break;
|
||||||
|
}
|
||||||
|
if (is_key_frame) return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert 'curr' frame to a key frame.
|
||||||
|
static void ConvertToKeyFrame(WebPPicture* const curr,
|
||||||
|
const WebPPicture* const prev,
|
||||||
|
WebPFrameRect* const rect) {
|
||||||
|
int j;
|
||||||
|
assert(curr->width == prev->width && curr->height == prev->height);
|
||||||
|
|
||||||
|
// Replace transparent pixels of current canvas with those from previous
|
||||||
|
// canvas (with previous frame disposed).
|
||||||
|
for (j = 0; j < curr->height; ++j) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < curr->width; ++i) {
|
||||||
|
uint32_t* const curr_pixel = curr->argb + j * curr->argb_stride + i;
|
||||||
|
const int curr_alpha = *curr_pixel >> 24;
|
||||||
|
if (curr_alpha == 0) {
|
||||||
|
*curr_pixel = prev->argb[j * prev->argb_stride + i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Frame rectangle now covers the whole canvas.
|
||||||
|
rect->x_offset = 0;
|
||||||
|
rect->y_offset = 0;
|
||||||
|
rect->width = curr->width;
|
||||||
|
rect->height = curr->height;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ReadSubImage(GifFileType* gif, WebPPicture* const prev,
|
||||||
|
int is_first_frame, size_t key_frame_interval,
|
||||||
|
size_t* const count_since_key_frame,
|
||||||
|
WebPFrameRect* const gif_rect, WebPPicture* sub_image,
|
||||||
|
WebPMuxFrameInfo* const info, WebPPicture* const curr) {
|
||||||
const GifImageDesc image_desc = gif->Image;
|
const GifImageDesc image_desc = gif->Image;
|
||||||
const int offset_x = image_desc.Left;
|
|
||||||
const int offset_y = image_desc.Top;
|
|
||||||
const int sub_w = image_desc.Width;
|
|
||||||
const int sub_h = image_desc.Height;
|
|
||||||
uint32_t* dst = NULL;
|
uint32_t* dst = NULL;
|
||||||
uint8_t* tmp = NULL;
|
uint8_t* tmp = NULL;
|
||||||
int ok = 0;
|
int ok = 0;
|
||||||
|
WebPFrameRect rect = {
|
||||||
|
image_desc.Left, image_desc.Top, image_desc.Width, image_desc.Height
|
||||||
|
};
|
||||||
|
*gif_rect = rect;
|
||||||
|
|
||||||
// Use a view for the sub-picture:
|
// Use a view for the sub-picture:
|
||||||
if (!WebPPictureView(pic, offset_x, offset_y, sub_w, sub_h, view)) {
|
if (!WebPPictureView(curr, rect.x_offset, rect.y_offset,
|
||||||
|
rect.width, rect.height, sub_image)) {
|
||||||
fprintf(stderr, "Sub-image %dx%d at position %d,%d is invalid!\n",
|
fprintf(stderr, "Sub-image %dx%d at position %d,%d is invalid!\n",
|
||||||
sub_w, sub_h, offset_x, offset_y);
|
rect.width, rect.height, rect.x_offset, rect.y_offset);
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
dst = view->argb;
|
dst = sub_image->argb;
|
||||||
|
|
||||||
tmp = (uint8_t*)malloc(sub_w * sizeof(*tmp));
|
tmp = (uint8_t*)malloc(rect.width * sizeof(*tmp));
|
||||||
if (tmp == NULL) goto End;
|
if (tmp == NULL) goto End;
|
||||||
|
|
||||||
if (image_desc.Interlace) { // Interlaced image.
|
if (image_desc.Interlace) { // Interlaced image.
|
||||||
@ -88,21 +216,56 @@ static int ReadSubImage(GifFileType* gif, WebPPicture* pic, WebPPicture* view) {
|
|||||||
int pass;
|
int pass;
|
||||||
for (pass = 0; pass < 4; ++pass) {
|
for (pass = 0; pass < 4; ++pass) {
|
||||||
int y;
|
int y;
|
||||||
for (y = interlace_offsets[pass]; y < sub_h; y += interlace_jumps[pass]) {
|
for (y = interlace_offsets[pass]; y < rect.height;
|
||||||
if (DGifGetLine(gif, tmp, sub_w) == GIF_ERROR) goto End;
|
y += interlace_jumps[pass]) {
|
||||||
Remap(tmp, gif, dst + y * view->argb_stride, sub_w);
|
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
|
||||||
|
Remap(tmp, gif, dst + y * sub_image->argb_stride, rect.width);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { // Non-interlaced image.
|
} else { // Non-interlaced image.
|
||||||
int y;
|
int y;
|
||||||
for (y = 0; y < sub_h; ++y) {
|
for (y = 0; y < rect.height; ++y) {
|
||||||
if (DGifGetLine(gif, tmp, sub_w) == GIF_ERROR) goto End;
|
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
|
||||||
Remap(tmp, gif, dst + y * view->argb_stride, sub_w);
|
Remap(tmp, gif, dst + y * sub_image->argb_stride, rect.width);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// re-align the view with even offset (and adjust dimensions if needed).
|
|
||||||
WebPPictureView(pic, offset_x & ~1, offset_y & ~1,
|
// Snap to even offsets (and adjust dimensions if needed).
|
||||||
sub_w + (offset_x & 1), sub_h + (offset_y & 1), view);
|
rect.width += (rect.x_offset & 1);
|
||||||
|
rect.height += (rect.y_offset & 1);
|
||||||
|
rect.x_offset &= ~1;
|
||||||
|
rect.y_offset &= ~1;
|
||||||
|
|
||||||
|
// Make this a key frame if needed.
|
||||||
|
if (is_first_frame || IsKeyFrame(curr, &rect, prev)) {
|
||||||
|
*count_since_key_frame = 0;
|
||||||
|
} else {
|
||||||
|
++*count_since_key_frame;
|
||||||
|
if (*count_since_key_frame > key_frame_interval) {
|
||||||
|
ConvertToKeyFrame(curr, prev, &rect);
|
||||||
|
*count_since_key_frame = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (*count_since_key_frame == 0) {
|
||||||
|
info->blend_method = WEBP_MUX_NO_BLEND; // Key frame, so no need to blend.
|
||||||
|
} else {
|
||||||
|
info->blend_method = WEBP_MUX_BLEND; // The blending method in GIF.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the canvas so that it can be used to check for and/or create an
|
||||||
|
// key frame in the next iterations.
|
||||||
|
if (info->blend_method == WEBP_MUX_NO_BLEND) {
|
||||||
|
CopyPlane((uint8_t*)curr->argb, 4 * curr->argb_stride, (uint8_t*)prev->argb,
|
||||||
|
4 * prev->argb_stride, 4 * curr->width, curr->height);
|
||||||
|
} else {
|
||||||
|
BlendCurrentFrameInCanvas(curr, gif_rect, prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
WebPPictureView(curr, rect.x_offset, rect.y_offset, rect.width, rect.height,
|
||||||
|
sub_image);
|
||||||
|
info->x_offset = rect.x_offset;
|
||||||
|
info->y_offset = rect.y_offset;
|
||||||
ok = 1;
|
ok = 1;
|
||||||
|
|
||||||
End:
|
End:
|
||||||
@ -171,6 +334,7 @@ static void Help(void) {
|
|||||||
printf(" -lossy ................. Encode image using lossy compression.\n");
|
printf(" -lossy ................. Encode image using lossy compression.\n");
|
||||||
printf(" -q <float> ............. quality factor (0:small..100:big)\n");
|
printf(" -q <float> ............. quality factor (0:small..100:big)\n");
|
||||||
printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
|
printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
|
||||||
|
printf(" -kmax <int> ............ Max distance between key frames\n");
|
||||||
printf(" -f <int> ............... filter strength (0=off..100)\n");
|
printf(" -f <int> ............... filter strength (0=off..100)\n");
|
||||||
printf("\n");
|
printf("\n");
|
||||||
printf(" -version ............... print version number and exit.\n");
|
printf(" -version ............... print version number and exit.\n");
|
||||||
@ -189,11 +353,14 @@ int main(int argc, const char *argv[]) {
|
|||||||
const char *in_file = NULL, *out_file = NULL;
|
const char *in_file = NULL, *out_file = NULL;
|
||||||
FILE* out = NULL;
|
FILE* out = NULL;
|
||||||
GifFileType* gif = NULL;
|
GifFileType* gif = NULL;
|
||||||
WebPPicture picture;
|
WebPPicture current;
|
||||||
|
WebPPicture previous;
|
||||||
WebPMuxFrameInfo frame;
|
WebPMuxFrameInfo frame;
|
||||||
WebPMuxAnimParams anim = { WHITE_COLOR, 0 };
|
WebPMuxAnimParams anim = { WHITE_COLOR, 0 };
|
||||||
|
|
||||||
int is_first_frame = 1;
|
int is_first_frame = 1;
|
||||||
|
size_t count_since_key_frame = 0; // Frames seen since the last key frame.
|
||||||
|
size_t key_frame_interval = 9; // Max distance between key frames.
|
||||||
int done;
|
int done;
|
||||||
int c;
|
int c;
|
||||||
int quiet = 0;
|
int quiet = 0;
|
||||||
@ -206,8 +373,10 @@ int main(int argc, const char *argv[]) {
|
|||||||
memset(&frame, 0, sizeof(frame));
|
memset(&frame, 0, sizeof(frame));
|
||||||
frame.id = WEBP_CHUNK_ANMF;
|
frame.id = WEBP_CHUNK_ANMF;
|
||||||
frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
|
frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
|
||||||
|
frame.blend_method = WEBP_MUX_BLEND;
|
||||||
|
|
||||||
if (!WebPConfigInit(&config) || !WebPPictureInit(&picture)) {
|
if (!WebPConfigInit(&config) ||
|
||||||
|
!WebPPictureInit(¤t) || !WebPPictureInit(&previous)) {
|
||||||
fprintf(stderr, "Error! Version mismatch!\n");
|
fprintf(stderr, "Error! Version mismatch!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -230,6 +399,9 @@ int main(int argc, const char *argv[]) {
|
|||||||
config.quality = (float)strtod(argv[++c], NULL);
|
config.quality = (float)strtod(argv[++c], NULL);
|
||||||
} else if (!strcmp(argv[c], "-m") && c < argc - 1) {
|
} else if (!strcmp(argv[c], "-m") && c < argc - 1) {
|
||||||
config.method = strtol(argv[++c], NULL, 0);
|
config.method = strtol(argv[++c], NULL, 0);
|
||||||
|
} else if (!strcmp(argv[c], "-kmax") && c < argc - 1) {
|
||||||
|
key_frame_interval = strtoul(argv[++c], NULL, 0);
|
||||||
|
if (key_frame_interval == 0) key_frame_interval = ~0;
|
||||||
} else if (!strcmp(argv[c], "-f") && c < argc - 1) {
|
} else if (!strcmp(argv[c], "-f") && c < argc - 1) {
|
||||||
config.filter_strength = strtol(argv[++c], NULL, 0);
|
config.filter_strength = strtol(argv[++c], NULL, 0);
|
||||||
} else if (!strcmp(argv[c], "-version")) {
|
} else if (!strcmp(argv[c], "-version")) {
|
||||||
@ -272,11 +444,12 @@ int main(int argc, const char *argv[]) {
|
|||||||
#endif
|
#endif
|
||||||
if (gif == NULL) goto End;
|
if (gif == NULL) goto End;
|
||||||
|
|
||||||
// Allocate picture buffer
|
// Allocate current buffer
|
||||||
picture.width = gif->SWidth;
|
current.width = gif->SWidth;
|
||||||
picture.height = gif->SHeight;
|
current.height = gif->SHeight;
|
||||||
picture.use_argb = 1;
|
current.use_argb = 1;
|
||||||
if (!WebPPictureAlloc(&picture)) goto End;
|
if (!WebPPictureAlloc(¤t)) goto End;
|
||||||
|
if (!WebPPictureCopy(¤t, &previous)) goto End;
|
||||||
|
|
||||||
mux = WebPMuxNew();
|
mux = WebPMuxNew();
|
||||||
if (mux == NULL) {
|
if (mux == NULL) {
|
||||||
@ -293,14 +466,15 @@ int main(int argc, const char *argv[]) {
|
|||||||
switch (type) {
|
switch (type) {
|
||||||
case IMAGE_DESC_RECORD_TYPE: {
|
case IMAGE_DESC_RECORD_TYPE: {
|
||||||
WebPPicture sub_image;
|
WebPPicture sub_image;
|
||||||
|
WebPFrameRect gif_rect;
|
||||||
WebPMemoryWriter memory;
|
WebPMemoryWriter memory;
|
||||||
|
|
||||||
if (frame.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
|
|
||||||
ClearPicture(&picture);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!DGifGetImageDesc(gif)) goto End;
|
if (!DGifGetImageDesc(gif)) goto End;
|
||||||
if (!ReadSubImage(gif, &picture, &sub_image)) goto End;
|
if (!ReadSubImage(gif, &previous, is_first_frame, key_frame_interval,
|
||||||
|
&count_since_key_frame, &gif_rect, &sub_image, &frame,
|
||||||
|
¤t)) {
|
||||||
|
goto End;
|
||||||
|
}
|
||||||
|
|
||||||
if (!config.lossless) {
|
if (!config.lossless) {
|
||||||
// We need to call BGRA variant because of the way we do Remap(). Note
|
// We need to call BGRA variant because of the way we do Remap(). Note
|
||||||
@ -317,7 +491,7 @@ int main(int argc, const char *argv[]) {
|
|||||||
sub_image.custom_ptr = &memory;
|
sub_image.custom_ptr = &memory;
|
||||||
WebPMemoryWriterInit(&memory);
|
WebPMemoryWriterInit(&memory);
|
||||||
if (!WebPEncode(&config, &sub_image)) {
|
if (!WebPEncode(&config, &sub_image)) {
|
||||||
fprintf(stderr, "Error! Cannot encode picture as WebP\n");
|
fprintf(stderr, "Error! Cannot encode current as WebP\n");
|
||||||
fprintf(stderr, "Error code: %d\n", sub_image.error_code);
|
fprintf(stderr, "Error code: %d\n", sub_image.error_code);
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
@ -325,8 +499,6 @@ int main(int argc, const char *argv[]) {
|
|||||||
// Now we have all the info about the frame, as a Graphic Control
|
// Now we have all the info about the frame, as a Graphic Control
|
||||||
// Extension Block always appears before the Image Descriptor Block.
|
// Extension Block always appears before the Image Descriptor Block.
|
||||||
// So add the frame to mux.
|
// So add the frame to mux.
|
||||||
frame.x_offset = gif->Image.Left & ~1;
|
|
||||||
frame.y_offset = gif->Image.Top & ~1;
|
|
||||||
frame.bitstream.bytes = memory.mem;
|
frame.bitstream.bytes = memory.mem;
|
||||||
frame.bitstream.size = memory.size;
|
frame.bitstream.size = memory.size;
|
||||||
err = WebPMuxPushFrame(mux, &frame, 1);
|
err = WebPMuxPushFrame(mux, &frame, 1);
|
||||||
@ -340,11 +512,17 @@ int main(int argc, const char *argv[]) {
|
|||||||
sub_image.width, sub_image.height,
|
sub_image.width, sub_image.height,
|
||||||
frame.x_offset, frame.y_offset,
|
frame.x_offset, frame.y_offset,
|
||||||
frame.duration);
|
frame.duration);
|
||||||
printf("dispose:%d transparent index:%d\n",
|
printf("dispose:%d blend:%d transparent index:%d\n",
|
||||||
frame.dispose_method, transparent_index);
|
frame.dispose_method, frame.blend_method, transparent_index);
|
||||||
}
|
}
|
||||||
WebPDataClear(&frame.bitstream);
|
WebPDataClear(&frame.bitstream);
|
||||||
WebPPictureFree(&sub_image);
|
WebPPictureFree(&sub_image);
|
||||||
|
is_first_frame = 0;
|
||||||
|
|
||||||
|
if (frame.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
|
||||||
|
ClearPicture(¤t, NULL);
|
||||||
|
ClearPicture(&previous, &gif_rect);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case EXTENSION_RECORD_TYPE: {
|
case EXTENSION_RECORD_TYPE: {
|
||||||
@ -380,8 +558,8 @@ int main(int argc, const char *argv[]) {
|
|||||||
fprintf(stderr, "GIF decode warning: invalid background color "
|
fprintf(stderr, "GIF decode warning: invalid background color "
|
||||||
"index. Assuming white background.\n");
|
"index. Assuming white background.\n");
|
||||||
}
|
}
|
||||||
ClearPicture(&picture);
|
ClearPicture(¤t, NULL);
|
||||||
is_first_frame = 0;
|
ClearPicture(&previous, NULL);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -520,7 +698,8 @@ int main(int argc, const char *argv[]) {
|
|||||||
End:
|
End:
|
||||||
WebPDataClear(&webp_data);
|
WebPDataClear(&webp_data);
|
||||||
WebPMuxDelete(mux);
|
WebPMuxDelete(mux);
|
||||||
WebPPictureFree(&picture);
|
WebPPictureFree(¤t);
|
||||||
|
WebPPictureFree(&previous);
|
||||||
if (out != NULL && out_file != NULL) fclose(out);
|
if (out != NULL && out_file != NULL) fclose(out);
|
||||||
|
|
||||||
if (gif_error != GIF_OK) {
|
if (gif_error != GIF_OK) {
|
||||||
|
@ -49,6 +49,19 @@ additional encoding possibilities and decide on the quality gain.
|
|||||||
Lower value can result is faster processing time at the expense of
|
Lower value can result is faster processing time at the expense of
|
||||||
larger file size and lower compression quality.
|
larger file size and lower compression quality.
|
||||||
.TP
|
.TP
|
||||||
|
.BI \-kmax " int
|
||||||
|
Relevant only for animated images with large number of frames (>50).
|
||||||
|
Specify the maximum distance between consecutive key frames (independently
|
||||||
|
decodable frames) in the output image. The tool will insert some key frames into
|
||||||
|
the output image as needed so that this criteria is satisfied. A value of 0 will
|
||||||
|
turn off insertion of key frames.
|
||||||
|
Typical values are in the range 5 to 30. Default value is 9.
|
||||||
|
When lower values are used, more frames will be converted to key frames. This
|
||||||
|
may lead to smaller number of frames required to decode a frame on average,
|
||||||
|
thereby improving the decoding performance. But this may lead to slightly bigger
|
||||||
|
file sizes.
|
||||||
|
Higher values may lead to worse decoding performance, but smaller file sizes.
|
||||||
|
.TP
|
||||||
.BI \-f " int
|
.BI \-f " int
|
||||||
For lossy encoding only (specified by the \-lossy option). Specify the strength
|
For lossy encoding only (specified by the \-lossy option). Specify the strength
|
||||||
of the deblocking filter, between 0 (no filtering) and 100 (maximum filtering).
|
of the deblocking filter, between 0 (no filtering) and 100 (maximum filtering).
|
||||||
|
Loading…
Reference in New Issue
Block a user