Compare commits

..

No commits in common. "main" and "webp-rfc9649" have entirely different histories.

212 changed files with 5410 additions and 7481 deletions

View File

@ -17,4 +17,3 @@ Roberto Alanis <alanisbaez@google.com>
Brian Ledger <brianpl@google.com>
Maryla Ustarroz-Calonge <maryla@google.com>
Yannis Guyon <yguyon@google.com>
Henner Zeller <hzeller@google.com> <h.zeller@acm.org>

View File

@ -10,16 +10,12 @@ Contributors:
- Christian Duvivier (cduvivier at google dot com)
- Christopher Degawa (ccom at randomderp dot com)
- Clement Courbet (courbet at google dot com)
- devtools-clrobot at google dot com (devtools-clrobot@google dot com)
- Djordje Pesut (djordje dot pesut at imgtec dot com)
- Frank (1433351828 at qq dot com)
- Frank Barchard (fbarchard at google dot com)
- Henner Zeller (hzeller at google dot com)
- Hui Su (huisu at google dot com)
- H. Vetinari (h dot vetinari at gmx dot com)
- Ilya Kurdyukov (jpegqs at gmail dot com)
- Ingvar Stepanyan (rreverser at google dot com)
- Istvan Stefan (Istvan dot Stefan at arm dot com)
- James Zern (jzern at google dot com)
- Jan Engelhardt (jengelh at medozas dot de)
- Jehan (jehan at girinstud dot io)
@ -66,7 +62,6 @@ Contributors:
- Vincent Rabaud (vrabaud at google dot com)
- Vlad Tsyrklevich (vtsyrklevich at chromium dot org)
- Wan-Teh Chang (wtc at google dot com)
- wrv (wrv at utexas dot edu)
- Yang Zhang (yang dot zhang at arm dot com)
- Yannis Guyon (yguyon at google dot com)
- Zhi An Ng (zhin at chromium dot org)

View File

@ -9,7 +9,11 @@
if(APPLE)
cmake_minimum_required(VERSION 3.17)
else()
cmake_minimum_required(VERSION 3.16)
cmake_minimum_required(VERSION 3.7)
endif()
if(POLICY CMP0072)
cmake_policy(SET CMP0072 NEW)
endif()
project(WebP C)
@ -84,15 +88,6 @@ if(WEBP_BUILD_WEBP_JS)
message(NOTICE
"wasm2js does not support SIMD, disabling webp.js generation.")
endif()
if(NOT EMSCRIPTEN_VERSION)
message(
WARNING
"EMSCRIPTEN_VERSION not detected!\n"
"WEBP_BUILD_WEBP_JS is only supported with emcmake/emmake.\n"
"The build may fail if those tools are not used. See webp_js/README.md."
)
endif()
endif()
set(SHARPYUV_DEP_LIBRARIES)
@ -572,8 +567,7 @@ if(WEBP_BUILD_GIF2WEBP)
add_executable(gif2webp ${GIF2WEBP_SRCS})
target_link_libraries(gif2webp exampleutil imageioutil webp libwebpmux
${WEBP_DEP_GIF_LIBRARIES})
target_include_directories(gif2webp PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src
${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(gif2webp PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src)
install(TARGETS gif2webp RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
endif()

200
ChangeLog
View File

@ -1,203 +1,3 @@
370aa581 webp_js/README.md: add some more code formatting (``)
f83c6b32 CMakeLists: add warning for incorrect emscripten config
6a3e656b update ChangeLog (tag: v1.6.0-rc1)
bf0bf1e7 api.md: add WebPValidateDecoderConfig to pseudocode
e8ae210d update NEWS
ce53efd7 bump version to 1.6.0
1c333170 update AUTHORS
85e098e5 webpmux: fix heap overflow w/-get/-set
418340d8 Merge "Make histogram allocation and access more readable and type-safe." into main
23ce76fa Merge "VP8BitReaderSetBuffer: move NULL check to call site" into main
bbf3cbb1 VP8BitReaderSetBuffer: move NULL check to call site
f6b87e03 Fix const style guide
8852f89a Have lossless return the same results with/without -mt
e015dcc0 Make histogram allocation and access more readable and type-safe.
753ed11e enc_neon.c: fix aarch64 compilation w/gcc < 8.5.0
0cd0b7a7 enc_fuzzer.cc: remove duplicate <cstdlib> include
2209ffba swig,cosmetics: normalize includes
15e2e1ee analysis_enc.c: remove unused include
98c27801 IWYU: Include all headers for symbols used in files.
eb3ff781 Only use valid histograms in VP8LHistogramSet
57e324e2 Refactor VP8LHistogram histogram_enc.cc
7191a602 Merge "Generalize trivial histograms" into main
19696e0a Merge "alpha_processing_sse2: quiet signed conv warning" into main
89b01ecc Merge "cwebp: add `-resize_mode`" into main
52a430a7 Generalize trivial histograms
e53e2130 Cache all costs in the histograms
f8b360c4 alpha_processing_sse2: quiet signed conv warning
eb4f8137 cwebp: add `-resize_mode`
ad52d5fc dec/dsp/enc/utils,cosmetics: rm struct member '_' suffix
ed7cd6a7 utils.c,cosmetics: rm struct member '_' suffix
3a23b0f0 random_utils.[hc],cosmetics: rm struct member '_' suffix
a99d0e6f quant_levels_dec_utils.c,cosmetics: rm struct member '_' suffix
1ed4654d huffman_encode_utils.[hc],cosmetics: rm struct member '_' suffix
f0689e48 config_enc.c,cosmetics: rm struct member '_' suffix
24262266 mux,cosmetics: rm struct member '_' suffix
3f54b1aa demux,cosmetics: rm struct member '_' suffix
295804e4 examples,cosmetics: rm struct member '_' suffix
5225592f Refactor VP8LHistogram to hide initializations from the user.
00338240 Remove some computations in histogram clustering
44f91b0d Speed DispatchAlpha_SSE2 up
ee8e8c62 Fix member naming for VP8LHistogram
a1ad3f1e Merge "Remove now unused ExtraCostCombined" into main
321561b4 Remove now unused ExtraCostCombined
e0ae21d2 WebPMemoryWriterClear: use WebPMemoryWriterInit
a4183d94 Remove the computation of ExtraCost when comparing histograms
f2b3f527 Get AVX2 into WebP lossless
7c70ff7a Clean dsp/lossless includes
9dd5ae81 Use the full register in PredictorSub13_SSE2
613be8fc Makefile.vc: add /MP to CFLAGS
1d86819f Merge changes I1437390a,I10a20de5,I1ac777d1 into main
743a5f09 enc_neon: enable vld1q_u8_x4 for clang & msvc
565da148 pngdec.c: add support for 'eXIf' tag
319860e9 pngdec.c: support ImageMagick app1 exif text data
815fc1e1 pngdec.c: add missing #ifdef for png_get_iCCP
980b708e enc_neon: fix build w/aarch64 gcc < 9.4.0
73b728cb cmake: bump minimum version to 3.16
6a22b670 Add a function to validate a WebPDecoderConfig
7ed2b10e Use consistently signed stride types.
654bfb04 Avoid nullptr arithmetic in VP8BitReaderSetBuffer
f8f24107 Fix potential "divide by zero" in examples found by coverity
2af6c034 Merge tag 'v1.5.0'
a4d7a715 update ChangeLog (tag: v1.5.0, origin/1.5.0)
c3d85ce4 update NEWS
ad14e811 tests/fuzzer/*: add missing <string_view> include
74cd026e fuzz_utils.cc: fix build error w/WEBP_REDUCE_SIZE
a027aa93 mux_demux_api_fuzzer.cc: fix -Wshadow warning
25e17c68 update ChangeLog (tag: v1.5.0-rc1)
aa2684fc update NEWS
36923846 bump version to 1.5.0
ceea8ff6 update AUTHORS
e4f7a9f0 img2webp: add a warning for unused options
1b4c967f Merge "Properly check the data size against the end of the RIFF chunk" into main
9e5ecfaf Properly check the data size against the end of the RIFF chunk
da0d9c7d examples: exit w/failure w/no args
fcff86c7 {gif,img}2webp: sync -m help w/cwebp
b76c4a84 man/img2webp.1: sync -m text w/cwebp.1 & gif2webp.1
30633519 muxread: fix reading of buffers > riff size
4c85d860 yuv.h: update RGB<->YUV coefficients in comment
0ab789e0 Merge changes I6dfedfd5,I2376e2dc into main
03236450 {ios,xcframework}build.sh: fix compilation w/Xcode 16
61e2cfda rework AddVectorEq_SSE2
7bda3deb rework AddVector_SSE2
2ddaaf0a Fix variable names in SharpYuvComputeConversionMatrix
a3ba6f19 Makefile.vc: fix gif2webp link error
f999d94f gif2webp: add -sharp_yuv/-near_lossless
dfdcb7f9 Merge "lossless.h: fix function declaration mismatches" into main (tag: webp-rfc9649)
78ed6839 fix overread in Intra4Preds_NEON
d516a68e lossless.h: fix function declaration mismatches
87406904 Merge "Improve documentation of SharpYuvConversionMatrix." into main
fdb229ea Merge changes I07a7e36a,Ib29980f7,I2316122d,I2356e314,I32b53dd3, ... into main
0c3cd9cc Improve documentation of SharpYuvConversionMatrix.
169dfbf9 disable Intra4Preds_NEON
2dd5eb98 dsp/yuv*: use WEBP_RESTRICT qualifier
23bbafbe dsp/upsampling*: use WEBP_RESTRICT qualifier
35915b38 dsp/rescaler*: use WEBP_RESTRICT qualifier
a32b436b dsp/lossless*: use WEBP_RESTRICT qualifier
04d4b4f3 dsp/filters*: use WEBP_RESTRICT qualifier
b1cb37e6 dsp/enc*: use WEBP_RESTRICT qualifier
201894ef dsp/dec*: use WEBP_RESTRICT qualifier
02eac8a7 dsp/cost*: use WEBP_RESTRICT qualifier
84b118c9 Merge "webp-container-spec: normalize notes & unknown chunk link" into main
052cf42f webp-container-spec: normalize notes & unknown chunk link
220ee529 Search for best predictor transform bits
78619478 Try to reduce the sampling for the entropy image
14f09ab7 webp-container-spec: reorder chunk size - N text
a78c5356 Remove a useless malloc for entropy image
bc491763 Merge "Refactor predictor finding" into main
34f92238 man/{cwebp,img2webp}.1: rm 'if needed' from -sharp_yuv
367ca938 Refactor predictor finding
a582b53b webp-lossless-bitstream-spec: clarify some text
0fd25d84 Merge "anim_encode.c: fix function ref in comment" into main
f8882913 anim_encode.c: fix function ref in comment
40e4ca60 specs_generation.md: update kramdown command line
57883c78 img2webp: add -exact/-noexact per-frame options
1c8eba97 img2webp,cosmetics: add missing '.' spacers to help
2e81017c Convert predictor_enc.c to fixed point
94de6c7f Merge "Fix fuzztest link errors w/-DBUILD_SHARED_LIBS=1" into main
51d9832a Fix fuzztest link errors w/-DBUILD_SHARED_LIBS=1
7bcb36b8 Merge "Fix static overflow warning." into main
8e0cc14c Fix static overflow warning.
cea68462 README.md: add security report note
615e5874 Merge "make VP8LPredictor[01]_C() static" into main
233e86b9 Merge changes Ie43dc5ef,I94cd8bab into main
1a29fd2f make VP8LPredictor[01]_C() static
dd9d3770 Do*Filter_*: remove row & num_rows parameters
ab451a49 Do*Filter_C: remove dead 'inverse' code paths
f9a480f7 {TrueMotion,TM16}_NEON: remove zero extension
04834aca Merge changes I25c30a9e,I0a192fc6,I4cf89575 into main
39a602af webp-lossless-bitstream-spec: normalize predictor transform ref
f28c837d Merge "webp-container-spec: align anim pseudocode w/prose" into main
74be8e22 Fix implicit conversion issues
0c01db7c Merge "Increase the transform bits if possible." into main
f2d6dc1e Increase the transform bits if possible.
caa19e5b update link to issue tracker
c9dd9bd4 webp-container-spec: align anim pseudocode w/prose
8a7c8dc6 WASM: Enable VP8L_USE_FAST_LOAD
f0c53cd9 WASM: don't use USE_GENERIC_TREE
eef903d0 WASM: Enable 64-bit BITS caching
6296cc8d iterator_enc: make VP8IteratorReset() static
fbd93896 histogram_enc: make VP8LGetHistogramSize static
cc7ff545 cost_enc: make VP8CalculateLevelCosts[] static
4e2828ba vp8l_dec: make VP8LClear() static
d742b24a Intra16Preds_NEON: fix truemotion saturation
c7bb4cb5 Intra4Preds_NEON: fix truemotion saturation
952a989b Merge "Remove TODO now that log is using fixed point." into main
dde11574 Remove TODO now that log is using fixed point.
a1ca153d Fix hidden myerr in my_error_exit
3bd94202 Merge changes Iff6e47ed,I24c67cd5,Id781e761 into main
d27d246e Merge "Convert VP8LFastSLog2 to fixed point" into main
4838611f Disable msg_code use in fuzzing mode
314a142a Use QuantizeBlock_NEON for VP8EncQuantizeBlockWHT on Arm
3bfb05e3 Add AArch64 Neon implementation of Intra16Preds
baa93808 Add AArch64 Neon implementation of Intra4Preds
41a5e582 Fix errors when compiling code as C++
fb444b69 Convert VP8LFastSLog2 to fixed point
c1c89f51 Fix WEBP_NODISCARD comment and C++ version
66408c2c Switch the histogram_enc.h API to fixed point
ac1e410d Remove leftover tiff dep
b78d3957 Disable TIFF on fuzztest.
cff21a7d Do not build statically on oss-fuzz.
6853a8e5 Merge "Move more internal fuzzers to public." into main
9bc09db4 Merge "Convert VP8LFastLog2 to fixed point" into main
0a9f1c19 Convert VP8LFastLog2 to fixed point
db0cb9c2 Move more internal fuzzers to public.
ff2b5b15 Merge "advanced_api_fuzzer.cc: use crop dims in OOM check" into main
c4af79d0 Put 0 at the end of a palette and do not store it.
0ec80aef Delete last references to delta palettization
96d79f84 advanced_api_fuzzer.cc: use crop dims in OOM check
c35c7e02 Fix huffman fuzzer to not leak.
f2fe8dec Bump fuzztest dependency.
9ce982fd Fix fuzz tests to work on oss-fuzz
3ba8af1a Do not escape quotes anymore in build.sh
ea0e121b Allow centipede to be used as a fuzzing engine.
27731afd make VP8I4ModeOffsets & VP8MakeIntra4Preds static
ddd6245e oss-fuzz/build.sh: use heredoc for script creation
50074930 oss-fuzz/build.sh,cosmetics: fix indent
20e92f7d Limit the possible fuzz engines.
4f200de5 Switch public fuzz tests to fuzztest.
64186bb3 Add huffman_fuzzer to .gitignore
0905f61c Move build script from oss-fuzz repo to here.
e8678758 Fix link to Javascript documentation
5e5b8f0c Fix SSE2 Transform_AC3 function name
45129ee0 Revert "Check all the rows."
ee26766a Check all the rows.
7ec51c59 Increase the transform bits if possible.
3cd16fd3 Revert "Increase the transform bits if possible."
971a03d8 Increase the transform bits if possible.
1bf198a2 Allow transform_bits to be different during encoding.
1e462ca8 Define MAX_TRANSFORM_BITS according to the specification.
64d1ec23 Use (MIN/NUM)_(TRANSFORM/HUFFMAN)_BITS where appropriate
a90160e1 Refactor histograms in predictors.
a7aa7525 Fix some function declarations
68ff4e1e Merge "jpegdec: add a hint for EOF/READ errors" into main
79e7968a jpegdec: add a hint for EOF/READ errors
d33455cd man/*: s/BUGS/REPORTING BUGS/
a67ff735 normalize example exit status
edc28909 upsampling_{neon,sse41}: fix int sanitizer warning
3cada4ce ImgIoUtilReadFile: check ftell() return
dc950585 Merge tag 'v1.4.0'
845d5476 update ChangeLog (tag: v1.4.0, origin/1.4.0)
8a6a55bb update NEWS
cf7c5a5d provide a way to opt-out/override WEBP_NODISCARD
cc34288a update ChangeLog (tag: v1.4.0-rc1)

View File

@ -32,7 +32,7 @@ PLATFORM_LDFLAGS = /SAFESEH
NOLOGO = /nologo
CCNODBG = cl.exe $(NOLOGO) /O2 /DNDEBUG
CCDEBUG = cl.exe $(NOLOGO) /Od /Zi /D_DEBUG /RTC1
CFLAGS = /I. /Isrc $(NOLOGO) /MP /W3 /EHsc /c
CFLAGS = /I. /Isrc $(NOLOGO) /W3 /EHsc /c
CFLAGS = $(CFLAGS) /DWIN32 /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN
LDFLAGS = /LARGEADDRESSAWARE /MANIFEST:EMBED /NXCOMPAT /DYNAMICBASE
LDFLAGS = $(LDFLAGS) $(PLATFORM_LDFLAGS)
@ -231,7 +231,6 @@ DSP_DEC_OBJS = \
$(DIROBJ)\dsp\lossless_neon.obj \
$(DIROBJ)\dsp\lossless_sse2.obj \
$(DIROBJ)\dsp\lossless_sse41.obj \
$(DIROBJ)\dsp\lossless_avx2.obj \
$(DIROBJ)\dsp\rescaler.obj \
$(DIROBJ)\dsp\rescaler_mips32.obj \
$(DIROBJ)\dsp\rescaler_mips_dsp_r2.obj \
@ -271,7 +270,6 @@ DSP_ENC_OBJS = \
$(DIROBJ)\dsp\lossless_enc_neon.obj \
$(DIROBJ)\dsp\lossless_enc_sse2.obj \
$(DIROBJ)\dsp\lossless_enc_sse41.obj \
$(DIROBJ)\dsp\lossless_enc_avx2.obj \
$(DIROBJ)\dsp\ssim.obj \
$(DIROBJ)\dsp\ssim_sse2.obj \
@ -395,7 +393,7 @@ $(DIRBIN)\dwebp.exe: $(IMAGEIO_UTIL_OBJS)
$(DIRBIN)\dwebp.exe: $(LIBWEBPDEMUX)
$(DIRBIN)\gif2webp.exe: $(DIROBJ)\examples\gif2webp.obj $(EX_GIF_DEC_OBJS)
$(DIRBIN)\gif2webp.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBPMUX)
$(DIRBIN)\gif2webp.exe: $(LIBWEBP) $(LIBSHARPYUV)
$(DIRBIN)\gif2webp.exe: $(LIBWEBP)
$(DIRBIN)\vwebp.exe: $(DIROBJ)\examples\vwebp.obj $(EX_UTIL_OBJS)
$(DIRBIN)\vwebp.exe: $(IMAGEIO_UTIL_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP)
$(DIRBIN)\vwebp_sdl.exe: $(DIROBJ)\extras\vwebp_sdl.obj

35
NEWS
View File

@ -1,38 +1,3 @@
- 6/30/2025 version 1.6.0
This is a binary compatible release.
API changes:
- libwebp: WebPValidateDecoderConfig
* additional x86 (AVX2, SSE2), general optimizations and compression
improvements for lossless
* `-mt` returns same results as single-threaded lossless (regressed in
1.5.0, #426506716)
* miscellaneous warning, bug & build fixes (#393104377, #397130631,
#398288323, #398066379, #427503509)
Tool updates:
* cwebp can restrict the use of `-resize` with `-resize_mode` (#405437935)
- 12/19/2024 version 1.5.0
This is a binary compatible release.
API changes:
- `cross_color_transform_bits` added to WebPAuxStats
* minor lossless encoder speed and compression improvements
* lossless encoding does not use floats anymore
* additional Arm optimizations for lossy & lossless + general code generation
improvements
* improvements to WASM performance (#643)
* improvements and corrections in webp-container-spec.txt and
webp-lossless-bitstream-spec.txt (#646, #355607636)
* further security related hardening and increased fuzzing coverage w/fuzztest
(oss-fuzz: #382816119, #70112, #70102, #69873, #69825, #69508, #69208)
* miscellaneous warning, bug & build fixes (#499, #562, #381372617,
#381109771, #42340561, #375011696, #372109644, chromium: #334120888)
Tool updates:
* gif2webp: add -sharp_yuv & -near_lossless
* img2webp: add -exact & -noexact
* exit codes normalized; running an example program with no
arguments will output its help and exit with an error (#42340557,
#381372617)
- 4/12/2024: version 1.4.0
This is a binary compatible release.
* API changes:

View File

@ -7,7 +7,7 @@
\__\__/\____/\_____/__/ ____ ___
/ _/ / \ \ / _ \/ _/
/ \_/ / / \ \ __/ \__
\____/____/\_____/_____/____/v1.6.0
\____/____/\_____/_____/____/v1.4.0
```
WebP codec is a library to encode and decode images in WebP format. This package

View File

@ -94,9 +94,6 @@
/* Set to 1 if SSE4.1 is supported */
#cmakedefine WEBP_HAVE_SSE41 1
/* Set to 1 if AVX2 is supported */
#cmakedefine WEBP_HAVE_AVX2 1
/* Set to 1 if TIFF library is installed */
#cmakedefine WEBP_HAVE_TIFF 1

View File

@ -38,9 +38,9 @@ function(webp_check_compiler_flag WEBP_SIMD_FLAG ENABLE_SIMD)
endfunction()
# those are included in the names of WEBP_USE_* in c++ code.
set(WEBP_SIMD_FLAGS "AVX2;SSE41;SSE2;MIPS32;MIPS_DSP_R2;NEON;MSA")
set(WEBP_SIMD_FLAGS "SSE41;SSE2;MIPS32;MIPS_DSP_R2;NEON;MSA")
set(WEBP_SIMD_FILE_EXTENSIONS
"_avx2.c;_sse41.c;_sse2.c;_mips32.c;_mips_dsp_r2.c;_neon.c;_msa.c")
"_sse41.c;_sse2.c;_mips32.c;_mips_dsp_r2.c;_neon.c;_msa.c")
if(MSVC AND CMAKE_C_COMPILER_ID STREQUAL "MSVC")
# With at least Visual Studio 12 (2013)+ /arch is not necessary to build SSE2
# or SSE4 code unless a lesser /arch is forced. MSVC does not have a SSE4
@ -50,12 +50,12 @@ if(MSVC AND CMAKE_C_COMPILER_ID STREQUAL "MSVC")
if(MSVC_VERSION GREATER_EQUAL 1800 AND NOT CMAKE_C_FLAGS MATCHES "/arch:")
set(SIMD_ENABLE_FLAGS)
else()
set(SIMD_ENABLE_FLAGS "/arch:AVX2;/arch:AVX;/arch:SSE2;;;;")
set(SIMD_ENABLE_FLAGS "/arch:AVX;/arch:SSE2;;;;")
endif()
set(SIMD_DISABLE_FLAGS)
else()
set(SIMD_ENABLE_FLAGS "-mavx2;-msse4.1;-msse2;-mips32;-mdspr2;-mfpu=neon;-mmsa")
set(SIMD_DISABLE_FLAGS "-mno-avx2;-mno-sse4.1;-mno-sse2;;-mno-dspr2;;-mno-msa")
set(SIMD_ENABLE_FLAGS "-msse4.1;-msse2;-mips32;-mdspr2;-mfpu=neon;-mmsa")
set(SIMD_DISABLE_FLAGS "-mno-sse4.1;-mno-sse2;;-mno-dspr2;;-mno-msa")
endif()
set(WEBP_SIMD_FILES_TO_INCLUDE)

View File

@ -1,4 +1,4 @@
AC_INIT([libwebp], [1.6.0],
AC_INIT([libwebp], [1.4.0],
[https://issues.webmproject.org],,
[https://developers.google.com/speed/webp])
AC_CANONICAL_HOST
@ -161,25 +161,6 @@ AS_IF([test "$GCC" = "yes" ], [
AC_SUBST([AM_CFLAGS])
dnl === Check for machine specific flags
AC_ARG_ENABLE([avx2],
AS_HELP_STRING([--disable-avx2],
[Disable detection of AVX2 support
@<:@default=auto@:>@]))
AS_IF([test "x$enable_avx2" != "xno" -a "x$enable_sse4_1" != "xno"
-a "x$enable_sse2" != "xno"], [
AVX2_FLAGS="$INTRINSICS_CFLAGS $AVX2_FLAGS"
TEST_AND_ADD_CFLAGS([AVX2_FLAGS], [-mavx2])
AS_IF([test -n "$AVX2_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $AVX2_FLAGS"
AC_CHECK_HEADER([immintrin.h],
[AC_DEFINE(WEBP_HAVE_AVX2, [1],
[Set to 1 if AVX2 is supported])],
[AVX2_FLAGS=""])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([AVX2_FLAGS])])
AC_ARG_ENABLE([sse4.1],
AS_HELP_STRING([--disable-sse4.1],
[Disable detection of SSE4.1 support

View File

@ -202,7 +202,6 @@ config.output.u.RGBA.rgba = (uint8_t*) memory_buffer;
config.output.u.RGBA.stride = scanline_stride;
config.output.u.RGBA.size = total_size_of_the_memory_buffer;
config.output.is_external_memory = 1;
config_error = WebPValidateDecoderConfig(&config); // not mandatory, but useful
// E) Decode the WebP image. There are two variants w.r.t decoding image.
// The first one (E.1) decodes the full image and the second one (E.2) is

View File

@ -65,7 +65,6 @@ Options:
(default: 0 100)
-crop <x> <y> <w> <h> .. crop picture with the given rectangle
-resize <w> <h> ........ resize picture (*after* any cropping)
-resize_mode <string> .. one of: up_only, down_only, always (default)
-mt .................... use multi-threading if available
-low_memory ............ reduce memory usage (slower encoding)
-map <int> ............. print map of extra info
@ -325,7 +324,7 @@ Per-frame options (only used for subsequent images input):
-lossless ............ use lossless mode (default)
-lossy ............... use lossy mode
-q <float> ........... quality
-m <int> ............. compression method (0=fast, 6=slowest), default=4
-m <int> ............. method to use
-exact, -noexact ..... preserve or alter RGB values in transparent area
(default: -noexact, may cause artifacts
with lossy animations)
@ -355,12 +354,8 @@ Options:
-lossy ................. encode image using lossy compression
-mixed ................. for each frame in the image, pick lossy
or lossless compression heuristically
-near_lossless <int> ... use near-lossless image preprocessing
(0..100=off), default=100
-sharp_yuv ............. use sharper (and slower) RGB->YUV conversion
(lossy only)
-q <float> ............. quality factor (0:small..100:big)
-m <int> ............... compression method (0=fast, 6=slowest), default=4
-m <int> ............... compression method (0=fast, 6=slowest)
-min_size .............. minimize output size (default:off)
lossless compression by default; can be
combined with -q, -m, -lossy or -mixed

View File

@ -67,7 +67,7 @@ dwebp_LDADD += ../src/libwebp.la
dwebp_LDADD +=$(PNG_LIBS) $(JPEG_LIBS)
gif2webp_SOURCES = gif2webp.c gifdec.c gifdec.h
gif2webp_CPPFLAGS = $(AM_CPPFLAGS) $(GIF_INCLUDES) -I$(top_srcdir)
gif2webp_CPPFLAGS = $(AM_CPPFLAGS) $(GIF_INCLUDES)
gif2webp_LDADD =
gif2webp_LDADD += libexample_util.la
gif2webp_LDADD += ../imageio/libimageio_util.la

View File

@ -22,7 +22,6 @@
#include "./anim_util.h"
#include "./example_util.h"
#include "./unicode.h"
#include "webp/types.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf

View File

@ -15,11 +15,10 @@
#include <stdlib.h>
#include <string.h> // for 'strcmp'.
#include "../imageio/image_enc.h"
#include "./anim_util.h"
#include "./unicode.h"
#include "webp/decode.h"
#include "webp/types.h"
#include "../imageio/image_enc.h"
#include "./unicode.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf

View File

@ -19,16 +19,13 @@
#if defined(WEBP_HAVE_GIF)
#include <gif_lib.h>
#endif
#include "webp/format_constants.h"
#include "webp/decode.h"
#include "webp/demux.h"
#include "../imageio/imageio_util.h"
#include "./gifdec.h"
#include "./unicode.h"
#include "./unicode_gif.h"
#include "webp/decode.h"
#include "webp/demux.h"
#include "webp/format_constants.h"
#include "webp/mux_types.h"
#include "webp/types.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf
@ -774,7 +771,6 @@ void GetDiffAndPSNR(const uint8_t rgba1[], const uint8_t rgba2[],
*psnr = 99.; // PSNR when images are identical.
} else {
sse /= stride * height;
assert(sse != 0.0);
*psnr = 4.3429448 * log(255. * 255. / sse);
}
}

View File

@ -27,10 +27,8 @@
#include "../imageio/webpdec.h"
#include "./stopwatch.h"
#include "./unicode.h"
#include "imageio/metadata.h"
#include "sharpyuv/sharpyuv.h"
#include "webp/encode.h"
#include "webp/types.h"
#ifndef WEBP_DLL
#ifdef __cplusplus
@ -517,37 +515,6 @@ static int WriteWebPWithMetadata(FILE* const out,
return (fwrite(webp, webp_size, 1, out) == 1);
}
//------------------------------------------------------------------------------
// Resize
enum {
RESIZE_MODE_DOWN_ONLY,
RESIZE_MODE_UP_ONLY,
RESIZE_MODE_ALWAYS,
RESIZE_MODE_DEFAULT = RESIZE_MODE_ALWAYS
};
static void ApplyResizeMode(const int resize_mode,
const WebPPicture* const pic,
int* const resize_w, int* const resize_h) {
const int src_w = pic->width;
const int src_h = pic->height;
const int dst_w = *resize_w;
const int dst_h = *resize_h;
if (resize_mode == RESIZE_MODE_DOWN_ONLY) {
if ((dst_w == 0 && src_h <= dst_h) ||
(dst_h == 0 && src_w <= dst_w) ||
(src_w <= dst_w && src_h <= dst_h)) {
*resize_w = *resize_h = 0;
}
} else if (resize_mode == RESIZE_MODE_UP_ONLY) {
if (src_w >= dst_w && src_h >= dst_h) {
*resize_w = *resize_h = 0;
}
}
}
//------------------------------------------------------------------------------
static int ProgressReport(int percent, const WebPPicture* const picture) {
@ -616,8 +583,6 @@ static void HelpLong(void) {
" (default: 0 100)\n");
printf(" -crop <x> <y> <w> <h> .. crop picture with the given rectangle\n");
printf(" -resize <w> <h> ........ resize picture (*after* any cropping)\n");
printf(" -resize_mode <string> .. one of: up_only, down_only,"
" always (default)\n");
printf(" -mt .................... use multi-threading if available\n");
printf(" -low_memory ............ reduce memory usage (slower encoding)\n");
printf(" -map <int> ............. print map of extra info\n");
@ -705,7 +670,6 @@ int main(int argc, const char* argv[]) {
uint32_t background_color = 0xffffffu;
int crop = 0, crop_x = 0, crop_y = 0, crop_w = 0, crop_h = 0;
int resize_w = 0, resize_h = 0;
int resize_mode = RESIZE_MODE_DEFAULT;
int lossless_preset = 6;
int use_lossless_preset = -1; // -1=unset, 0=don't use, 1=use it
int show_progress = 0;
@ -734,7 +698,7 @@ int main(int argc, const char* argv[]) {
if (argc == 1) {
HelpShort();
FREE_WARGV_AND_RETURN(EXIT_FAILURE);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS);
}
for (c = 1; c < argc; ++c) {
@ -873,18 +837,6 @@ int main(int argc, const char* argv[]) {
} else if (!strcmp(argv[c], "-resize") && c + 2 < argc) {
resize_w = ExUtilGetInt(argv[++c], 0, &parse_error);
resize_h = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-resize_mode") && c + 1 < argc) {
++c;
if (!strcmp(argv[c], "down_only")) {
resize_mode = RESIZE_MODE_DOWN_ONLY;
} else if (!strcmp(argv[c], "up_only")) {
resize_mode = RESIZE_MODE_UP_ONLY;
} else if (!strcmp(argv[c], "always")) {
resize_mode = RESIZE_MODE_ALWAYS;
} else {
fprintf(stderr, "Error! Unrecognized resize mode: %s\n", argv[c]);
goto Error;
}
#ifndef WEBP_DLL
} else if (!strcmp(argv[c], "-noasm")) {
VP8GetCPUInfo = NULL;
@ -1105,7 +1057,6 @@ int main(int argc, const char* argv[]) {
goto Error;
}
}
ApplyResizeMode(resize_mode, &picture, &resize_w, &resize_h);
if ((resize_w | resize_h) > 0) {
WebPPicture picture_no_alpha;
if (config.exact) {

View File

@ -25,8 +25,6 @@
#include "../imageio/webpdec.h"
#include "./stopwatch.h"
#include "./unicode.h"
#include "webp/decode.h"
#include "webp/types.h"
static int verbose = 0;
static int quiet = 0;

View File

@ -17,9 +17,8 @@
#include <stdlib.h>
#include <string.h>
#include "../imageio/imageio_util.h"
#include "webp/mux_types.h"
#include "webp/types.h"
#include "../imageio/imageio_util.h"
//------------------------------------------------------------------------------
// String parsing
@ -67,17 +66,17 @@ float ExUtilGetFloat(const char* const v, int* const error) {
static void ResetCommandLineArguments(int argc, const char* argv[],
CommandLineArguments* const args) {
assert(args != NULL);
args->argc = argc;
args->argv = argv;
args->own_argv = 0;
WebPDataInit(&args->argv_data);
args->argc_ = argc;
args->argv_ = argv;
args->own_argv_ = 0;
WebPDataInit(&args->argv_data_);
}
void ExUtilDeleteCommandLineArguments(CommandLineArguments* const args) {
if (args != NULL) {
if (args->own_argv) {
WebPFree((void*)args->argv);
WebPDataClear(&args->argv_data);
if (args->own_argv_) {
WebPFree((void*)args->argv_);
WebPDataClear(&args->argv_data_);
}
ResetCommandLineArguments(0, NULL, args);
}
@ -99,18 +98,18 @@ int ExUtilInitCommandLineArguments(int argc, const char* argv[],
return 0;
#endif
if (!ExUtilReadFileToWebPData(argv[0], &args->argv_data)) {
if (!ExUtilReadFileToWebPData(argv[0], &args->argv_data_)) {
return 0;
}
args->own_argv = 1;
args->argv = (const char**)WebPMalloc(MAX_ARGC * sizeof(*args->argv));
if (args->argv == NULL) {
args->own_argv_ = 1;
args->argv_ = (const char**)WebPMalloc(MAX_ARGC * sizeof(*args->argv_));
if (args->argv_ == NULL) {
ExUtilDeleteCommandLineArguments(args);
return 0;
}
argc = 0;
for (cur = strtok((char*)args->argv_data.bytes, sep);
for (cur = strtok((char*)args->argv_data_.bytes, sep);
cur != NULL;
cur = strtok(NULL, sep)) {
if (argc == MAX_ARGC) {
@ -119,9 +118,9 @@ int ExUtilInitCommandLineArguments(int argc, const char* argv[],
return 0;
}
assert(strlen(cur) != 0);
args->argv[argc++] = cur;
args->argv_[argc++] = cur;
}
args->argc = argc;
args->argc_ = argc;
}
return 1;
}

View File

@ -45,10 +45,10 @@ int ExUtilReadFileToWebPData(const char* const filename,
// Command-line arguments
typedef struct {
int argc;
const char** argv;
WebPData argv_data;
int own_argv;
int argc_;
const char** argv_;
WebPData argv_data_;
int own_argv_;
} CommandLineArguments;
// Initializes the structure from the command-line parameters. If there is

View File

@ -28,7 +28,6 @@
#endif
#include <gif_lib.h>
#include "sharpyuv/sharpyuv.h"
#include "webp/encode.h"
#include "webp/mux.h"
#include "../examples/example_util.h"
@ -71,14 +70,8 @@ static void Help(void) {
printf(" -lossy ................. encode image using lossy compression\n");
printf(" -mixed ................. for each frame in the image, pick lossy\n"
" or lossless compression heuristically\n");
printf(" -near_lossless <int> ... use near-lossless image preprocessing\n"
" (0..100=off), default=100\n");
printf(" -sharp_yuv ............. use sharper (and slower) RGB->YUV "
"conversion\n"
" (lossy only)\n");
printf(" -q <float> ............. quality factor (0:small..100:big)\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest), "
"default=4\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
printf(" -min_size .............. minimize output size (default:off)\n"
" lossless compression by default; can be\n"
" combined with -q, -m, -lossy or -mixed\n"
@ -158,7 +151,7 @@ int main(int argc, const char* argv[]) {
if (argc == 1) {
Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS);
}
for (c = 1; c < argc; ++c) {
@ -173,10 +166,6 @@ int main(int argc, const char* argv[]) {
} else if (!strcmp(argv[c], "-mixed")) {
enc_options.allow_mixed = 1;
config.lossless = 0;
} else if (!strcmp(argv[c], "-near_lossless") && c < argc - 1) {
config.near_lossless = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-sharp_yuv")) {
config.use_sharp_yuv = 1;
} else if (!strcmp(argv[c], "-loop_compatibility")) {
loop_compatibility = 1;
} else if (!strcmp(argv[c], "-q") && c < argc - 1) {
@ -237,13 +226,10 @@ int main(int argc, const char* argv[]) {
} else if (!strcmp(argv[c], "-version")) {
const int enc_version = WebPGetEncoderVersion();
const int mux_version = WebPGetMuxVersion();
const int sharpyuv_version = SharpYuvGetVersion();
printf("WebP Encoder version: %d.%d.%d\nWebP Mux version: %d.%d.%d\n",
(enc_version >> 16) & 0xff, (enc_version >> 8) & 0xff,
enc_version & 0xff, (mux_version >> 16) & 0xff,
(mux_version >> 8) & 0xff, mux_version & 0xff);
printf("libsharpyuv: %d.%d.%d\n", (sharpyuv_version >> 24) & 0xff,
(sharpyuv_version >> 16) & 0xffff, sharpyuv_version & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS);
} else if (!strcmp(argv[c], "-quiet")) {
quiet = 1;

View File

@ -19,7 +19,6 @@
#include <string.h>
#include "webp/encode.h"
#include "webp/types.h"
#include "webp/mux_types.h"
#define GIF_TRANSPARENT_COLOR 0x00000000u

View File

@ -13,7 +13,6 @@
#define WEBP_EXAMPLES_GIFDEC_H_
#include <stdio.h>
#include "webp/types.h"
#ifdef HAVE_CONFIG_H

View File

@ -31,8 +31,6 @@
#include "sharpyuv/sharpyuv.h"
#include "webp/encode.h"
#include "webp/mux.h"
#include "webp/mux_types.h"
#include "webp/types.h"
//------------------------------------------------------------------------------
@ -64,8 +62,7 @@ static void Help(void) {
printf(" -lossless ............ use lossless mode (default)\n");
printf(" -lossy ............... use lossy mode\n");
printf(" -q <float> ........... quality\n");
printf(" -m <int> ............. compression method (0=fast, 6=slowest), "
"default=4\n");
printf(" -m <int> ............. method to use\n");
printf(" -exact, -noexact ..... preserve or alter RGB values in transparent "
"area\n"
" (default: -noexact, may cause artifacts\n"
@ -153,7 +150,6 @@ int main(int argc, const char* argv[]) {
WebPData webp_data;
int c;
int have_input = 0;
int last_input_index = 0;
CommandLineArguments cmd_args;
int ok;
@ -162,8 +158,8 @@ int main(int argc, const char* argv[]) {
ok = ExUtilInitCommandLineArguments(argc - 1, argv + 1, &cmd_args);
if (!ok) FREE_WARGV_AND_RETURN(EXIT_FAILURE);
argc = cmd_args.argc;
argv = cmd_args.argv;
argc = cmd_args.argc_;
argv = cmd_args.argv_;
WebPDataInit(&webp_data);
if (!WebPAnimEncoderOptionsInit(&anim_config) ||
@ -232,8 +228,6 @@ int main(int argc, const char* argv[]) {
}
if (!have_input) {
fprintf(stderr, "No input file(s) for generating animation!\n");
ok = 0;
Help();
goto End;
}
@ -282,7 +276,6 @@ int main(int argc, const char* argv[]) {
// read next input image
pic.use_argb = 1;
ok = ReadImage((const char*)GET_WARGV_SHIFTED(argv, c), &pic);
last_input_index = c;
if (!ok) goto End;
if (enc == NULL) {
@ -321,13 +314,6 @@ int main(int argc, const char* argv[]) {
++pic_num;
}
for (c = last_input_index + 1; c < argc; ++c) {
if (argv[c] != NULL) {
fprintf(stderr, "Warning: unused option [%s]!"
" Frame options go before the input frame.\n", argv[c]);
}
}
// add a last fake frame to signal the last duration
ok = ok && WebPAnimEncoderAdd(enc, NULL, timestamp_ms, NULL);
ok = ok && WebPAnimEncoderAssemble(enc, &webp_data);

View File

@ -568,7 +568,7 @@ int main(int argc, char* argv[]) {
if (kParams.file_name == NULL) {
printf("missing input file!!\n");
Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS);
}
if (!ImgIoUtilReadFile(kParams.file_name,

View File

@ -15,7 +15,6 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_CONFIG_H
#include "webp/config.h"
@ -26,7 +25,6 @@
#include "webp/decode.h"
#include "webp/format_constants.h"
#include "webp/mux_types.h"
#include "webp/types.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf
@ -34,17 +32,17 @@
#define LOG_ERROR(MESSAGE) \
do { \
if (webp_info->show_diagnosis) { \
if (webp_info->show_diagnosis_) { \
fprintf(stderr, "Error: %s\n", MESSAGE); \
} \
} while (0)
#define LOG_WARN(MESSAGE) \
do { \
if (webp_info->show_diagnosis) { \
if (webp_info->show_diagnosis_) { \
fprintf(stderr, "Warning: %s\n", MESSAGE); \
} \
++webp_info->num_warnings; \
++webp_info->num_warnings_; \
} while (0)
static const char* const kFormats[3] = {
@ -92,36 +90,36 @@ typedef enum ChunkID {
} ChunkID;
typedef struct {
size_t start;
size_t end;
const uint8_t* buf;
size_t start_;
size_t end_;
const uint8_t* buf_;
} MemBuffer;
typedef struct {
size_t offset;
size_t size;
const uint8_t* payload;
ChunkID id;
size_t offset_;
size_t size_;
const uint8_t* payload_;
ChunkID id_;
} ChunkData;
typedef struct WebPInfo {
int canvas_width;
int canvas_height;
int loop_count;
int num_frames;
int chunk_counts[CHUNK_TYPES];
int anmf_subchunk_counts[3]; // 0 VP8; 1 VP8L; 2 ALPH.
uint32_t bgcolor;
int feature_flags;
int has_alpha;
int canvas_width_;
int canvas_height_;
int loop_count_;
int num_frames_;
int chunk_counts_[CHUNK_TYPES];
int anmf_subchunk_counts_[3]; // 0 VP8; 1 VP8L; 2 ALPH.
uint32_t bgcolor_;
int feature_flags_;
int has_alpha_;
// Used for parsing ANMF chunks.
int frame_width, frame_height;
size_t anim_frame_data_size;
int is_processing_anim_frame, seen_alpha_subchunk, seen_image_subchunk;
int frame_width_, frame_height_;
size_t anim_frame_data_size_;
int is_processing_anim_frame_, seen_alpha_subchunk_, seen_image_subchunk_;
// Print output control.
int quiet, show_diagnosis, show_summary;
int num_warnings;
int parse_bitstream;
int quiet_, show_diagnosis_, show_summary_;
int num_warnings_;
int parse_bitstream_;
} WebPInfo;
static void WebPInfoInit(WebPInfo* const webp_info) {
@ -187,25 +185,25 @@ static int ReadFileToWebPData(const char* const filename,
// MemBuffer object.
static void InitMemBuffer(MemBuffer* const mem, const WebPData* webp_data) {
mem->buf = webp_data->bytes;
mem->start = 0;
mem->end = webp_data->size;
mem->buf_ = webp_data->bytes;
mem->start_ = 0;
mem->end_ = webp_data->size;
}
static size_t MemDataSize(const MemBuffer* const mem) {
return (mem->end - mem->start);
return (mem->end_ - mem->start_);
}
static const uint8_t* GetBuffer(MemBuffer* const mem) {
return mem->buf + mem->start;
return mem->buf_ + mem->start_;
}
static void Skip(MemBuffer* const mem, size_t size) {
mem->start += size;
mem->start_ += size;
}
static uint32_t ReadMemBufLE32(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start;
const uint8_t* const data = mem->buf_ + mem->start_;
const uint32_t val = GetLE32(data);
assert(MemDataSize(mem) >= 4);
Skip(mem, 4);
@ -336,8 +334,8 @@ static WebPInfoStatus ParseLossyFilterHeader(const WebPInfo* const webp_info,
static WebPInfoStatus ParseLossyHeader(const ChunkData* const chunk_data,
const WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload;
size_t data_size = chunk_data->size - CHUNK_HEADER_SIZE;
const uint8_t* data = chunk_data->payload_;
size_t data_size = chunk_data->size_ - CHUNK_HEADER_SIZE;
const uint32_t bits = (uint32_t)data[0] | (data[1] << 8) | (data[2] << 16);
const int key_frame = !(bits & 1);
const int profile = (bits >> 1) & 7;
@ -349,7 +347,7 @@ static WebPInfoStatus ParseLossyHeader(const ChunkData* const chunk_data,
int colorspace, clamp_type;
printf(" Parsing lossy bitstream...\n");
// Calling WebPGetFeatures() in ProcessImageChunk() should ensure this.
assert(chunk_data->size >= CHUNK_HEADER_SIZE + 10);
assert(chunk_data->size_ >= CHUNK_HEADER_SIZE + 10);
if (profile > 3) {
LOG_ERROR("Unknown profile.");
return WEBP_INFO_BITSTREAM_ERROR;
@ -507,8 +505,8 @@ static WebPInfoStatus ParseLosslessTransform(WebPInfo* const webp_info,
static WebPInfoStatus ParseLosslessHeader(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload;
size_t data_size = chunk_data->size - CHUNK_HEADER_SIZE;
const uint8_t* data = chunk_data->payload_;
size_t data_size = chunk_data->size_ - CHUNK_HEADER_SIZE;
uint64_t bit_position = 0;
uint64_t* const bit_pos = &bit_position;
WebPInfoStatus status;
@ -543,8 +541,8 @@ static WebPInfoStatus ParseLosslessHeader(const ChunkData* const chunk_data,
static WebPInfoStatus ParseAlphaHeader(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload;
size_t data_size = chunk_data->size - CHUNK_HEADER_SIZE;
const uint8_t* data = chunk_data->payload_;
size_t data_size = chunk_data->size_ - CHUNK_HEADER_SIZE;
if (data_size <= ALPHA_HEADER_LEN) {
LOG_ERROR("Truncated ALPH chunk.");
return WEBP_INFO_TRUNCATED_DATA;
@ -609,14 +607,14 @@ static WebPInfoStatus ParseRIFFHeader(WebPInfo* const webp_info,
return WEBP_INFO_PARSE_ERROR;
}
riff_size += CHUNK_HEADER_SIZE;
if (!webp_info->quiet) {
if (!webp_info->quiet_) {
printf("RIFF HEADER:\n");
printf(" File size: %6d\n", (int)riff_size);
}
if (riff_size < mem->end) {
if (riff_size < mem->end_) {
LOG_WARN("RIFF size is smaller than the file size.");
mem->end = riff_size;
} else if (riff_size > mem->end) {
mem->end_ = riff_size;
} else if (riff_size > mem->end_) {
LOG_ERROR("Truncated data detected when parsing RIFF payload.");
return WEBP_INFO_TRUNCATED_DATA;
}
@ -632,7 +630,7 @@ static WebPInfoStatus ParseChunk(const WebPInfo* const webp_info,
LOG_ERROR("Truncated data detected when parsing chunk header.");
return WEBP_INFO_TRUNCATED_DATA;
} else {
const size_t chunk_start_offset = mem->start;
const size_t chunk_start_offset = mem->start_;
const uint32_t fourcc = ReadMemBufLE32(mem);
const uint32_t payload_size = ReadMemBufLE32(mem);
const uint32_t payload_size_padded = payload_size + (payload_size & 1);
@ -649,11 +647,11 @@ static WebPInfoStatus ParseChunk(const WebPInfo* const webp_info,
for (i = 0; i < CHUNK_TYPES; ++i) {
if (kWebPChunkTags[i] == fourcc) break;
}
chunk_data->offset = chunk_start_offset;
chunk_data->size = chunk_size;
chunk_data->id = (ChunkID)i;
chunk_data->payload = GetBuffer(mem);
if (chunk_data->id == CHUNK_ANMF) {
chunk_data->offset_ = chunk_start_offset;
chunk_data->size_ = chunk_size;
chunk_data->id_ = (ChunkID)i;
chunk_data->payload_ = GetBuffer(mem);
if (chunk_data->id_ == CHUNK_ANMF) {
if (payload_size != payload_size_padded) {
LOG_ERROR("ANMF chunk size should always be even.");
return WEBP_INFO_PARSE_ERROR;
@ -672,39 +670,39 @@ static WebPInfoStatus ParseChunk(const WebPInfo* const webp_info,
static WebPInfoStatus ProcessVP8XChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload;
if (webp_info->chunk_counts[CHUNK_VP8] ||
webp_info->chunk_counts[CHUNK_VP8L] ||
webp_info->chunk_counts[CHUNK_VP8X]) {
const uint8_t* data = chunk_data->payload_;
if (webp_info->chunk_counts_[CHUNK_VP8] ||
webp_info->chunk_counts_[CHUNK_VP8L] ||
webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("Already seen a VP8/VP8L/VP8X chunk when parsing VP8X chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (chunk_data->size != VP8X_CHUNK_SIZE + CHUNK_HEADER_SIZE) {
if (chunk_data->size_ != VP8X_CHUNK_SIZE + CHUNK_HEADER_SIZE) {
LOG_ERROR("Corrupted VP8X chunk.");
return WEBP_INFO_PARSE_ERROR;
}
++webp_info->chunk_counts[CHUNK_VP8X];
webp_info->feature_flags = *data;
++webp_info->chunk_counts_[CHUNK_VP8X];
webp_info->feature_flags_ = *data;
data += 4;
webp_info->canvas_width = 1 + ReadLE24(&data);
webp_info->canvas_height = 1 + ReadLE24(&data);
if (!webp_info->quiet) {
webp_info->canvas_width_ = 1 + ReadLE24(&data);
webp_info->canvas_height_ = 1 + ReadLE24(&data);
if (!webp_info->quiet_) {
printf(" ICCP: %d\n Alpha: %d\n EXIF: %d\n XMP: %d\n Animation: %d\n",
(webp_info->feature_flags & ICCP_FLAG) != 0,
(webp_info->feature_flags & ALPHA_FLAG) != 0,
(webp_info->feature_flags & EXIF_FLAG) != 0,
(webp_info->feature_flags & XMP_FLAG) != 0,
(webp_info->feature_flags & ANIMATION_FLAG) != 0);
(webp_info->feature_flags_ & ICCP_FLAG) != 0,
(webp_info->feature_flags_ & ALPHA_FLAG) != 0,
(webp_info->feature_flags_ & EXIF_FLAG) != 0,
(webp_info->feature_flags_ & XMP_FLAG) != 0,
(webp_info->feature_flags_ & ANIMATION_FLAG) != 0);
printf(" Canvas size %d x %d\n",
webp_info->canvas_width, webp_info->canvas_height);
webp_info->canvas_width_, webp_info->canvas_height_);
}
if (webp_info->canvas_width > MAX_CANVAS_SIZE) {
if (webp_info->canvas_width_ > MAX_CANVAS_SIZE) {
LOG_WARN("Canvas width is out of range in VP8X chunk.");
}
if (webp_info->canvas_height > MAX_CANVAS_SIZE) {
if (webp_info->canvas_height_ > MAX_CANVAS_SIZE) {
LOG_WARN("Canvas height is out of range in VP8X chunk.");
}
if ((uint64_t)webp_info->canvas_width * webp_info->canvas_height >
if ((uint64_t)webp_info->canvas_width_ * webp_info->canvas_height_ >
MAX_IMAGE_AREA) {
LOG_WARN("Canvas area is out of range in VP8X chunk.");
}
@ -713,27 +711,27 @@ static WebPInfoStatus ProcessVP8XChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessANIMChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload;
if (!webp_info->chunk_counts[CHUNK_VP8X]) {
const uint8_t* data = chunk_data->payload_;
if (!webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("ANIM chunk detected before VP8X chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (chunk_data->size != ANIM_CHUNK_SIZE + CHUNK_HEADER_SIZE) {
if (chunk_data->size_ != ANIM_CHUNK_SIZE + CHUNK_HEADER_SIZE) {
LOG_ERROR("Corrupted ANIM chunk.");
return WEBP_INFO_PARSE_ERROR;
}
webp_info->bgcolor = ReadLE32(&data);
webp_info->loop_count = ReadLE16(&data);
++webp_info->chunk_counts[CHUNK_ANIM];
if (!webp_info->quiet) {
webp_info->bgcolor_ = ReadLE32(&data);
webp_info->loop_count_ = ReadLE16(&data);
++webp_info->chunk_counts_[CHUNK_ANIM];
if (!webp_info->quiet_) {
printf(" Background color:(ARGB) %02x %02x %02x %02x\n",
(webp_info->bgcolor >> 24) & 0xff,
(webp_info->bgcolor >> 16) & 0xff,
(webp_info->bgcolor >> 8) & 0xff,
webp_info->bgcolor & 0xff);
printf(" Loop count : %d\n", webp_info->loop_count);
(webp_info->bgcolor_ >> 24) & 0xff,
(webp_info->bgcolor_ >> 16) & 0xff,
(webp_info->bgcolor_ >> 8) & 0xff,
webp_info->bgcolor_ & 0xff);
printf(" Loop count : %d\n", webp_info->loop_count_);
}
if (webp_info->loop_count > MAX_LOOP_COUNT) {
if (webp_info->loop_count_ > MAX_LOOP_COUNT) {
LOG_WARN("Loop count is out of range in ANIM chunk.");
}
return WEBP_INFO_OK;
@ -741,17 +739,17 @@ static WebPInfoStatus ProcessANIMChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload;
const uint8_t* data = chunk_data->payload_;
int offset_x, offset_y, width, height, duration, blend, dispose, temp;
if (webp_info->is_processing_anim_frame) {
if (webp_info->is_processing_anim_frame_) {
LOG_ERROR("ANMF chunk detected within another ANMF chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (!webp_info->chunk_counts[CHUNK_ANIM]) {
if (!webp_info->chunk_counts_[CHUNK_ANIM]) {
LOG_ERROR("ANMF chunk detected before ANIM chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (chunk_data->size <= CHUNK_HEADER_SIZE + ANMF_CHUNK_SIZE) {
if (chunk_data->size_ <= CHUNK_HEADER_SIZE + ANMF_CHUNK_SIZE) {
LOG_ERROR("Truncated data detected when parsing ANMF chunk.");
return WEBP_INFO_TRUNCATED_DATA;
}
@ -763,8 +761,8 @@ static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data,
temp = *data;
dispose = temp & 1;
blend = (temp >> 1) & 1;
++webp_info->chunk_counts[CHUNK_ANMF];
if (!webp_info->quiet) {
++webp_info->chunk_counts_[CHUNK_ANMF];
if (!webp_info->quiet_) {
printf(" Offset_X: %d\n Offset_Y: %d\n Width: %d\n Height: %d\n"
" Duration: %d\n Dispose: %d\n Blend: %d\n",
offset_x, offset_y, width, height, duration, dispose, blend);
@ -777,92 +775,92 @@ static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data,
LOG_ERROR("Invalid offset parameters in ANMF chunk.");
return WEBP_INFO_INVALID_PARAM;
}
if ((uint64_t)offset_x + width > (uint64_t)webp_info->canvas_width ||
(uint64_t)offset_y + height > (uint64_t)webp_info->canvas_height) {
if ((uint64_t)offset_x + width > (uint64_t)webp_info->canvas_width_ ||
(uint64_t)offset_y + height > (uint64_t)webp_info->canvas_height_) {
LOG_ERROR("Frame exceeds canvas in ANMF chunk.");
return WEBP_INFO_INVALID_PARAM;
}
webp_info->is_processing_anim_frame = 1;
webp_info->seen_alpha_subchunk = 0;
webp_info->seen_image_subchunk = 0;
webp_info->frame_width = width;
webp_info->frame_height = height;
webp_info->anim_frame_data_size =
chunk_data->size - CHUNK_HEADER_SIZE - ANMF_CHUNK_SIZE;
webp_info->is_processing_anim_frame_ = 1;
webp_info->seen_alpha_subchunk_ = 0;
webp_info->seen_image_subchunk_ = 0;
webp_info->frame_width_ = width;
webp_info->frame_height_ = height;
webp_info->anim_frame_data_size_ =
chunk_data->size_ - CHUNK_HEADER_SIZE - ANMF_CHUNK_SIZE;
return WEBP_INFO_OK;
}
static WebPInfoStatus ProcessImageChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload - CHUNK_HEADER_SIZE;
const uint8_t* data = chunk_data->payload_ - CHUNK_HEADER_SIZE;
WebPBitstreamFeatures features;
const VP8StatusCode vp8_status =
WebPGetFeatures(data, chunk_data->size, &features);
WebPGetFeatures(data, chunk_data->size_, &features);
if (vp8_status != VP8_STATUS_OK) {
LOG_ERROR("VP8/VP8L bitstream error.");
return WEBP_INFO_BITSTREAM_ERROR;
}
if (!webp_info->quiet) {
if (!webp_info->quiet_) {
assert(features.format >= 0 && features.format <= 2);
printf(" Width: %d\n Height: %d\n Alpha: %d\n Animation: %d\n"
" Format: %s (%d)\n",
features.width, features.height, features.has_alpha,
features.has_animation, kFormats[features.format], features.format);
}
if (webp_info->is_processing_anim_frame) {
++webp_info->anmf_subchunk_counts[chunk_data->id == CHUNK_VP8 ? 0 : 1];
if (chunk_data->id == CHUNK_VP8L && webp_info->seen_alpha_subchunk) {
if (webp_info->is_processing_anim_frame_) {
++webp_info->anmf_subchunk_counts_[chunk_data->id_ == CHUNK_VP8 ? 0 : 1];
if (chunk_data->id_ == CHUNK_VP8L && webp_info->seen_alpha_subchunk_) {
LOG_ERROR("Both VP8L and ALPH sub-chunks are present in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (webp_info->frame_width != features.width ||
webp_info->frame_height != features.height) {
if (webp_info->frame_width_ != features.width ||
webp_info->frame_height_ != features.height) {
LOG_ERROR("Frame size in VP8/VP8L sub-chunk differs from ANMF header.");
return WEBP_INFO_PARSE_ERROR;
}
if (webp_info->seen_image_subchunk) {
if (webp_info->seen_image_subchunk_) {
LOG_ERROR("Consecutive VP8/VP8L sub-chunks in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR;
}
webp_info->seen_image_subchunk = 1;
webp_info->seen_image_subchunk_ = 1;
} else {
if (webp_info->chunk_counts[CHUNK_VP8] ||
webp_info->chunk_counts[CHUNK_VP8L]) {
if (webp_info->chunk_counts_[CHUNK_VP8] ||
webp_info->chunk_counts_[CHUNK_VP8L]) {
LOG_ERROR("Multiple VP8/VP8L chunks detected.");
return WEBP_INFO_PARSE_ERROR;
}
if (chunk_data->id == CHUNK_VP8L &&
webp_info->chunk_counts[CHUNK_ALPHA]) {
if (chunk_data->id_ == CHUNK_VP8L &&
webp_info->chunk_counts_[CHUNK_ALPHA]) {
LOG_WARN("Both VP8L and ALPH chunks are detected.");
}
if (webp_info->chunk_counts[CHUNK_ANIM] ||
webp_info->chunk_counts[CHUNK_ANMF]) {
if (webp_info->chunk_counts_[CHUNK_ANIM] ||
webp_info->chunk_counts_[CHUNK_ANMF]) {
LOG_ERROR("VP8/VP8L chunk and ANIM/ANMF chunk are both detected.");
return WEBP_INFO_PARSE_ERROR;
}
if (webp_info->chunk_counts[CHUNK_VP8X]) {
if (webp_info->canvas_width != features.width ||
webp_info->canvas_height != features.height) {
if (webp_info->chunk_counts_[CHUNK_VP8X]) {
if (webp_info->canvas_width_ != features.width ||
webp_info->canvas_height_ != features.height) {
LOG_ERROR("Image size in VP8/VP8L chunk differs from VP8X chunk.");
return WEBP_INFO_PARSE_ERROR;
}
} else {
webp_info->canvas_width = features.width;
webp_info->canvas_height = features.height;
if (webp_info->canvas_width < 1 || webp_info->canvas_height < 1 ||
webp_info->canvas_width > MAX_CANVAS_SIZE ||
webp_info->canvas_height > MAX_CANVAS_SIZE ||
(uint64_t)webp_info->canvas_width * webp_info->canvas_height >
webp_info->canvas_width_ = features.width;
webp_info->canvas_height_ = features.height;
if (webp_info->canvas_width_ < 1 || webp_info->canvas_height_ < 1 ||
webp_info->canvas_width_ > MAX_CANVAS_SIZE ||
webp_info->canvas_height_ > MAX_CANVAS_SIZE ||
(uint64_t)webp_info->canvas_width_ * webp_info->canvas_height_ >
MAX_IMAGE_AREA) {
LOG_WARN("Invalid parameters in VP8/VP8L chunk.");
}
}
++webp_info->chunk_counts[chunk_data->id];
++webp_info->chunk_counts_[chunk_data->id_];
}
++webp_info->num_frames;
webp_info->has_alpha |= features.has_alpha;
if (webp_info->parse_bitstream) {
const int is_lossy = (chunk_data->id == CHUNK_VP8);
++webp_info->num_frames_;
webp_info->has_alpha_ |= features.has_alpha;
if (webp_info->parse_bitstream_) {
const int is_lossy = (chunk_data->id_ == CHUNK_VP8);
const WebPInfoStatus status =
is_lossy ? ParseLossyHeader(chunk_data, webp_info)
: ParseLosslessHeader(chunk_data, webp_info);
@ -873,41 +871,41 @@ static WebPInfoStatus ProcessImageChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessALPHChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
if (webp_info->is_processing_anim_frame) {
++webp_info->anmf_subchunk_counts[2];
if (webp_info->seen_alpha_subchunk) {
if (webp_info->is_processing_anim_frame_) {
++webp_info->anmf_subchunk_counts_[2];
if (webp_info->seen_alpha_subchunk_) {
LOG_ERROR("Consecutive ALPH sub-chunks in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR;
}
webp_info->seen_alpha_subchunk = 1;
webp_info->seen_alpha_subchunk_ = 1;
if (webp_info->seen_image_subchunk) {
if (webp_info->seen_image_subchunk_) {
LOG_ERROR("ALPHA sub-chunk detected after VP8 sub-chunk "
"in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR;
}
} else {
if (webp_info->chunk_counts[CHUNK_ANIM] ||
webp_info->chunk_counts[CHUNK_ANMF]) {
if (webp_info->chunk_counts_[CHUNK_ANIM] ||
webp_info->chunk_counts_[CHUNK_ANMF]) {
LOG_ERROR("ALPHA chunk and ANIM/ANMF chunk are both detected.");
return WEBP_INFO_PARSE_ERROR;
}
if (!webp_info->chunk_counts[CHUNK_VP8X]) {
if (!webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("ALPHA chunk detected before VP8X chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (webp_info->chunk_counts[CHUNK_VP8]) {
if (webp_info->chunk_counts_[CHUNK_VP8]) {
LOG_ERROR("ALPHA chunk detected after VP8 chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (webp_info->chunk_counts[CHUNK_ALPHA]) {
if (webp_info->chunk_counts_[CHUNK_ALPHA]) {
LOG_ERROR("Multiple ALPHA chunks detected.");
return WEBP_INFO_PARSE_ERROR;
}
++webp_info->chunk_counts[CHUNK_ALPHA];
++webp_info->chunk_counts_[CHUNK_ALPHA];
}
webp_info->has_alpha = 1;
if (webp_info->parse_bitstream) {
webp_info->has_alpha_ = 1;
if (webp_info->parse_bitstream_) {
const WebPInfoStatus status = ParseAlphaHeader(chunk_data, webp_info);
if (status != WEBP_INFO_OK) return status;
}
@ -917,41 +915,41 @@ static WebPInfoStatus ProcessALPHChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessICCPChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
(void)chunk_data;
if (!webp_info->chunk_counts[CHUNK_VP8X]) {
if (!webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("ICCP chunk detected before VP8X chunk.");
return WEBP_INFO_PARSE_ERROR;
}
if (webp_info->chunk_counts[CHUNK_VP8] ||
webp_info->chunk_counts[CHUNK_VP8L] ||
webp_info->chunk_counts[CHUNK_ANIM]) {
if (webp_info->chunk_counts_[CHUNK_VP8] ||
webp_info->chunk_counts_[CHUNK_VP8L] ||
webp_info->chunk_counts_[CHUNK_ANIM]) {
LOG_ERROR("ICCP chunk detected after image data.");
return WEBP_INFO_PARSE_ERROR;
}
++webp_info->chunk_counts[CHUNK_ICCP];
++webp_info->chunk_counts_[CHUNK_ICCP];
return WEBP_INFO_OK;
}
static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) {
WebPInfoStatus status = WEBP_INFO_OK;
ChunkID id = chunk_data->id;
if (chunk_data->id == CHUNK_UNKNOWN) {
ChunkID id = chunk_data->id_;
if (chunk_data->id_ == CHUNK_UNKNOWN) {
char error_message[50];
snprintf(error_message, 50, "Unknown chunk at offset %6d, length %6d",
(int)chunk_data->offset, (int)chunk_data->size);
(int)chunk_data->offset_, (int)chunk_data->size_);
LOG_WARN(error_message);
} else {
if (!webp_info->quiet) {
if (!webp_info->quiet_) {
char tag[4];
uint32_t fourcc = kWebPChunkTags[chunk_data->id];
uint32_t fourcc = kWebPChunkTags[chunk_data->id_];
#ifdef WORDS_BIGENDIAN
fourcc = (fourcc >> 24) | ((fourcc >> 8) & 0xff00) |
((fourcc << 8) & 0xff0000) | (fourcc << 24);
#endif
memcpy(tag, &fourcc, sizeof(tag));
printf("Chunk %c%c%c%c at offset %6d, length %6d\n",
tag[0], tag[1], tag[2], tag[3], (int)chunk_data->offset,
(int)chunk_data->size);
tag[0], tag[1], tag[2], tag[3], (int)chunk_data->offset_,
(int)chunk_data->size_);
}
}
switch (id) {
@ -976,21 +974,21 @@ static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data,
break;
case CHUNK_EXIF:
case CHUNK_XMP:
++webp_info->chunk_counts[id];
++webp_info->chunk_counts_[id];
break;
case CHUNK_UNKNOWN:
default:
break;
}
if (webp_info->is_processing_anim_frame && id != CHUNK_ANMF) {
if (webp_info->anim_frame_data_size == chunk_data->size) {
if (!webp_info->seen_image_subchunk) {
if (webp_info->is_processing_anim_frame_ && id != CHUNK_ANMF) {
if (webp_info->anim_frame_data_size_ == chunk_data->size_) {
if (!webp_info->seen_image_subchunk_) {
LOG_ERROR("No VP8/VP8L chunk detected in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR;
}
webp_info->is_processing_anim_frame = 0;
} else if (webp_info->anim_frame_data_size > chunk_data->size) {
webp_info->anim_frame_data_size -= chunk_data->size;
webp_info->is_processing_anim_frame_ = 0;
} else if (webp_info->anim_frame_data_size_ > chunk_data->size_) {
webp_info->anim_frame_data_size_ -= chunk_data->size_;
} else {
LOG_ERROR("Truncated data detected when parsing ANMF chunk.");
return WEBP_INFO_TRUNCATED_DATA;
@ -1000,55 +998,55 @@ static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data,
}
static WebPInfoStatus Validate(WebPInfo* const webp_info) {
if (webp_info->num_frames < 1) {
if (webp_info->num_frames_ < 1) {
LOG_ERROR("No image/frame detected.");
return WEBP_INFO_MISSING_DATA;
}
if (webp_info->chunk_counts[CHUNK_VP8X]) {
const int iccp = !!(webp_info->feature_flags & ICCP_FLAG);
const int exif = !!(webp_info->feature_flags & EXIF_FLAG);
const int xmp = !!(webp_info->feature_flags & XMP_FLAG);
const int animation = !!(webp_info->feature_flags & ANIMATION_FLAG);
const int alpha = !!(webp_info->feature_flags & ALPHA_FLAG);
if (!alpha && webp_info->has_alpha) {
if (webp_info->chunk_counts_[CHUNK_VP8X]) {
const int iccp = !!(webp_info->feature_flags_ & ICCP_FLAG);
const int exif = !!(webp_info->feature_flags_ & EXIF_FLAG);
const int xmp = !!(webp_info->feature_flags_ & XMP_FLAG);
const int animation = !!(webp_info->feature_flags_ & ANIMATION_FLAG);
const int alpha = !!(webp_info->feature_flags_ & ALPHA_FLAG);
if (!alpha && webp_info->has_alpha_) {
LOG_ERROR("Unexpected alpha data detected.");
return WEBP_INFO_PARSE_ERROR;
}
if (alpha && !webp_info->has_alpha) {
if (alpha && !webp_info->has_alpha_) {
LOG_WARN("Alpha flag is set with no alpha data present.");
}
if (iccp && !webp_info->chunk_counts[CHUNK_ICCP]) {
if (iccp && !webp_info->chunk_counts_[CHUNK_ICCP]) {
LOG_ERROR("Missing ICCP chunk.");
return WEBP_INFO_MISSING_DATA;
}
if (exif && !webp_info->chunk_counts[CHUNK_EXIF]) {
if (exif && !webp_info->chunk_counts_[CHUNK_EXIF]) {
LOG_ERROR("Missing EXIF chunk.");
return WEBP_INFO_MISSING_DATA;
}
if (xmp && !webp_info->chunk_counts[CHUNK_XMP]) {
if (xmp && !webp_info->chunk_counts_[CHUNK_XMP]) {
LOG_ERROR("Missing XMP chunk.");
return WEBP_INFO_MISSING_DATA;
}
if (!iccp && webp_info->chunk_counts[CHUNK_ICCP]) {
if (!iccp && webp_info->chunk_counts_[CHUNK_ICCP]) {
LOG_ERROR("Unexpected ICCP chunk detected.");
return WEBP_INFO_PARSE_ERROR;
}
if (!exif && webp_info->chunk_counts[CHUNK_EXIF]) {
if (!exif && webp_info->chunk_counts_[CHUNK_EXIF]) {
LOG_ERROR("Unexpected EXIF chunk detected.");
return WEBP_INFO_PARSE_ERROR;
}
if (!xmp && webp_info->chunk_counts[CHUNK_XMP]) {
if (!xmp && webp_info->chunk_counts_[CHUNK_XMP]) {
LOG_ERROR("Unexpected XMP chunk detected.");
return WEBP_INFO_PARSE_ERROR;
}
// Incomplete animation frame.
if (webp_info->is_processing_anim_frame) return WEBP_INFO_MISSING_DATA;
if (!animation && webp_info->num_frames > 1) {
if (webp_info->is_processing_anim_frame_) return WEBP_INFO_MISSING_DATA;
if (!animation && webp_info->num_frames_ > 1) {
LOG_ERROR("More than 1 frame detected in non-animation file.");
return WEBP_INFO_PARSE_ERROR;
}
if (animation && (!webp_info->chunk_counts[CHUNK_ANIM] ||
!webp_info->chunk_counts[CHUNK_ANMF])) {
if (animation && (!webp_info->chunk_counts_[CHUNK_ANIM] ||
!webp_info->chunk_counts_[CHUNK_ANMF])) {
LOG_ERROR("No ANIM/ANMF chunk detected in animation file.");
return WEBP_INFO_PARSE_ERROR;
}
@ -1059,17 +1057,17 @@ static WebPInfoStatus Validate(WebPInfo* const webp_info) {
static void ShowSummary(const WebPInfo* const webp_info) {
int i;
printf("Summary:\n");
printf("Number of frames: %d\n", webp_info->num_frames);
printf("Number of frames: %d\n", webp_info->num_frames_);
printf("Chunk type : VP8 VP8L VP8X ALPH ANIM ANMF(VP8 /VP8L/ALPH) ICCP "
"EXIF XMP\n");
printf("Chunk counts: ");
for (i = 0; i < CHUNK_TYPES; ++i) {
printf("%4d ", webp_info->chunk_counts[i]);
printf("%4d ", webp_info->chunk_counts_[i]);
if (i == CHUNK_ANMF) {
printf("%4d %4d %4d ",
webp_info->anmf_subchunk_counts[0],
webp_info->anmf_subchunk_counts[1],
webp_info->anmf_subchunk_counts[2]);
webp_info->anmf_subchunk_counts_[0],
webp_info->anmf_subchunk_counts_[1],
webp_info->anmf_subchunk_counts_[2]);
}
}
printf("\n");
@ -1092,20 +1090,20 @@ static WebPInfoStatus AnalyzeWebP(WebPInfo* const webp_info,
webp_info_status = ProcessChunk(&chunk_data, webp_info);
}
if (webp_info_status != WEBP_INFO_OK) goto Error;
if (webp_info->show_summary) ShowSummary(webp_info);
if (webp_info->show_summary_) ShowSummary(webp_info);
// Final check.
webp_info_status = Validate(webp_info);
Error:
if (!webp_info->quiet) {
if (!webp_info->quiet_) {
if (webp_info_status == WEBP_INFO_OK) {
printf("No error detected.\n");
} else {
printf("Errors detected.\n");
}
if (webp_info->num_warnings > 0) {
printf("There were %d warning(s).\n", webp_info->num_warnings);
if (webp_info->num_warnings_ > 0) {
printf("There were %d warning(s).\n", webp_info->num_warnings_);
}
}
return webp_info_status;
@ -1134,7 +1132,7 @@ int main(int argc, const char* argv[]) {
if (argc == 1) {
Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS);
}
// Parse command-line input.
@ -1171,10 +1169,10 @@ int main(int argc, const char* argv[]) {
WebPData webp_data;
const W_CHAR* in_file = NULL;
WebPInfoInit(&webp_info);
webp_info.quiet = quiet;
webp_info.show_diagnosis = show_diag;
webp_info.show_summary = show_summary;
webp_info.parse_bitstream = parse_bitstream;
webp_info.quiet_ = quiet;
webp_info.show_diagnosis_ = show_diag;
webp_info.show_summary_ = show_summary;
webp_info.parse_bitstream_ = parse_bitstream;
in_file = GET_WARGV(argv, c);
if (in_file == NULL ||
!ReadFileToWebPData((const char*)in_file, &webp_data)) {
@ -1182,7 +1180,7 @@ int main(int argc, const char* argv[]) {
WFPRINTF(stderr, "Failed to open input file %s.\n", in_file);
continue;
}
if (!webp_info.quiet) WPRINTF("File: %s\n", in_file);
if (!webp_info.quiet_) WPRINTF("File: %s\n", in_file);
webp_info_status = AnalyzeWebP(&webp_info, &webp_data);
WebPDataClear(&webp_data);
}

View File

@ -60,13 +60,11 @@
#include <stdlib.h>
#include <string.h>
#include "webp/decode.h"
#include "webp/mux.h"
#include "../examples/example_util.h"
#include "../imageio/imageio_util.h"
#include "./unicode.h"
#include "webp/decode.h"
#include "webp/mux.h"
#include "webp/mux_types.h"
#include "webp/types.h"
//------------------------------------------------------------------------------
// Config object to parse command-line arguments.
@ -89,9 +87,9 @@ typedef enum {
} FeatureSubType;
typedef struct {
FeatureSubType subtype;
const char* filename;
const char* params;
FeatureSubType subtype_;
const char* filename_;
const char* params_;
} FeatureArg;
typedef enum {
@ -116,14 +114,14 @@ static const char* const kDescriptions[LAST_FEATURE] = {
};
typedef struct {
CommandLineArguments cmd_args;
CommandLineArguments cmd_args_;
ActionType action_type;
const char* input;
const char* output;
FeatureType type;
FeatureArg* args;
int arg_count;
ActionType action_type_;
const char* input_;
const char* output_;
FeatureType type_;
FeatureArg* args_;
int arg_count_;
} Config;
//------------------------------------------------------------------------------
@ -134,8 +132,8 @@ static int CountOccurrences(const CommandLineArguments* const args,
int i;
int num_occurences = 0;
for (i = 0; i < args->argc; ++i) {
if (!strcmp(args->argv[i], arg)) {
for (i = 0; i < args->argc_; ++i) {
if (!strcmp(args->argv_[i], arg)) {
++num_occurences;
}
}
@ -529,8 +527,8 @@ static int ParseBgcolorArgs(const char* args, uint32_t* const bgcolor) {
static void DeleteConfig(Config* const config) {
if (config != NULL) {
free(config->args);
ExUtilDeleteCommandLineArguments(&config->cmd_args);
free(config->args_);
ExUtilDeleteCommandLineArguments(&config->cmd_args_);
memset(config, 0, sizeof(*config));
}
}
@ -607,9 +605,9 @@ static int ValidateCommandLine(const CommandLineArguments* const cmd_args,
return ok;
}
#define ACTION_IS_NIL (config->action_type == NIL_ACTION)
#define ACTION_IS_NIL (config->action_type_ == NIL_ACTION)
#define FEATURETYPE_IS_NIL (config->type == NIL_FEATURE)
#define FEATURETYPE_IS_NIL (config->type_ == NIL_FEATURE)
#define CHECK_NUM_ARGS_AT_LEAST(NUM, LABEL) \
do { \
@ -639,97 +637,98 @@ static int ParseCommandLine(Config* config, const W_CHAR** const unicode_argv) {
int i = 0;
int feature_arg_index = 0;
int ok = 1;
int argc = config->cmd_args.argc;
const char* const* argv = config->cmd_args.argv;
int argc = config->cmd_args_.argc_;
const char* const* argv = config->cmd_args_.argv_;
// Unicode file paths will be used if available.
const char* const* wargv =
(unicode_argv != NULL) ? (const char**)(unicode_argv + 1) : argv;
while (i < argc) {
FeatureArg* const arg = &config->args[feature_arg_index];
FeatureArg* const arg = &config->args_[feature_arg_index];
if (argv[i][0] == '-') { // One of the action types or output.
if (!strcmp(argv[i], "-set")) {
if (ACTION_IS_NIL) {
config->action_type = ACTION_SET;
config->action_type_ = ACTION_SET;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
++i;
} else if (!strcmp(argv[i], "-duration")) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
if (ACTION_IS_NIL || config->action_type == ACTION_DURATION) {
config->action_type = ACTION_DURATION;
if (ACTION_IS_NIL || config->action_type_ == ACTION_DURATION) {
config->action_type_ = ACTION_DURATION;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
if (FEATURETYPE_IS_NIL || config->type == FEATURE_DURATION) {
config->type = FEATURE_DURATION;
if (FEATURETYPE_IS_NIL || config->type_ == FEATURE_DURATION) {
config->type_ = FEATURE_DURATION;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
arg->params = argv[i + 1];
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "-get")) {
if (ACTION_IS_NIL) {
config->action_type = ACTION_GET;
config->action_type_ = ACTION_GET;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
++i;
} else if (!strcmp(argv[i], "-strip")) {
if (ACTION_IS_NIL) {
config->action_type = ACTION_STRIP;
config->action_type_ = ACTION_STRIP;
config->arg_count_ = 0;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
++i;
} else if (!strcmp(argv[i], "-frame")) {
CHECK_NUM_ARGS_AT_LEAST(3, ErrParse);
if (ACTION_IS_NIL || config->action_type == ACTION_SET) {
config->action_type = ACTION_SET;
if (ACTION_IS_NIL || config->action_type_ == ACTION_SET) {
config->action_type_ = ACTION_SET;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
if (FEATURETYPE_IS_NIL || config->type == FEATURE_ANMF) {
config->type = FEATURE_ANMF;
if (FEATURETYPE_IS_NIL || config->type_ == FEATURE_ANMF) {
config->type_ = FEATURE_ANMF;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
arg->subtype = SUBTYPE_ANMF;
arg->filename = wargv[i + 1];
arg->params = argv[i + 2];
arg->subtype_ = SUBTYPE_ANMF;
arg->filename_ = wargv[i + 1];
arg->params_ = argv[i + 2];
++feature_arg_index;
i += 3;
} else if (!strcmp(argv[i], "-loop") || !strcmp(argv[i], "-bgcolor")) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
if (ACTION_IS_NIL || config->action_type == ACTION_SET) {
config->action_type = ACTION_SET;
if (ACTION_IS_NIL || config->action_type_ == ACTION_SET) {
config->action_type_ = ACTION_SET;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
if (FEATURETYPE_IS_NIL || config->type == FEATURE_ANMF) {
config->type = FEATURE_ANMF;
if (FEATURETYPE_IS_NIL || config->type_ == FEATURE_ANMF) {
config->type_ = FEATURE_ANMF;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
arg->subtype =
arg->subtype_ =
!strcmp(argv[i], "-loop") ? SUBTYPE_LOOP : SUBTYPE_BGCOLOR;
arg->params = argv[i + 1];
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "-o")) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->output = wargv[i + 1];
config->output_ = wargv[i + 1];
i += 2;
} else if (!strcmp(argv[i], "-info")) {
CHECK_NUM_ARGS_EXACTLY(2, ErrParse);
if (config->action_type != NIL_ACTION) {
if (config->action_type_ != NIL_ACTION) {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} else {
config->action_type = ACTION_INFO;
config->arg_count = 0;
config->input = wargv[i + 1];
config->action_type_ = ACTION_INFO;
config->arg_count_ = 0;
config->input_ = wargv[i + 1];
}
i += 2;
} else if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "-help")) {
@ -747,8 +746,8 @@ static int ParseCommandLine(Config* config, const W_CHAR** const unicode_argv) {
} else if (!strcmp(argv[i], "--")) {
if (i < argc - 1) {
++i;
if (config->input == NULL) {
config->input = wargv[i];
if (config->input_ == NULL) {
config->input_ = wargv[i];
} else {
ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n",
argv[i], ErrParse);
@ -759,65 +758,50 @@ static int ParseCommandLine(Config* config, const W_CHAR** const unicode_argv) {
ERROR_GOTO2("ERROR: Unknown option: '%s'.\n", argv[i], ErrParse);
}
} else { // One of the feature types or input.
// After consuming the arguments to -get/-set/-strip, treat any remaining
// arguments as input. This allows files that are named the same as the
// keywords used with these options.
int is_input = feature_arg_index == config->arg_count;
if (ACTION_IS_NIL) {
ERROR_GOTO1("ERROR: Action must be specified before other arguments.\n",
ErrParse);
}
if (!is_input) {
if (!strcmp(argv[i], "icc") || !strcmp(argv[i], "exif") ||
!strcmp(argv[i], "xmp")) {
if (FEATURETYPE_IS_NIL) {
config->type = (!strcmp(argv[i], "icc")) ? FEATURE_ICCP :
(!strcmp(argv[i], "exif")) ? FEATURE_EXIF : FEATURE_XMP;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
if (config->action_type == ACTION_SET) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
arg->filename = wargv[i + 1];
++feature_arg_index;
i += 2;
} else {
// Note: 'arg->params' is not used in this case. 'arg_count' is
// used as a flag to indicate the -get/-strip feature has already
// been consumed, allowing input types to be named the same as the
// feature type.
config->arg_count = 0;
++i;
}
} else if (!strcmp(argv[i], "frame") &&
(config->action_type == ACTION_GET)) {
if (!strcmp(argv[i], "icc") || !strcmp(argv[i], "exif") ||
!strcmp(argv[i], "xmp")) {
if (FEATURETYPE_IS_NIL) {
config->type_ = (!strcmp(argv[i], "icc")) ? FEATURE_ICCP :
(!strcmp(argv[i], "exif")) ? FEATURE_EXIF : FEATURE_XMP;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
if (config->action_type_ == ACTION_SET) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type = FEATURE_ANMF;
arg->params = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "loop") &&
(config->action_type == ACTION_SET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type = FEATURE_LOOP;
arg->params = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "bgcolor") &&
(config->action_type == ACTION_SET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type = FEATURE_BGCOLOR;
arg->params = argv[i + 1];
arg->filename_ = wargv[i + 1];
++feature_arg_index;
i += 2;
} else {
is_input = 1;
++i;
}
}
if (is_input) {
if (config->input == NULL) {
config->input = wargv[i];
} else if (!strcmp(argv[i], "frame") &&
(config->action_type_ == ACTION_GET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type_ = FEATURE_ANMF;
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "loop") &&
(config->action_type_ == ACTION_SET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type_ = FEATURE_LOOP;
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "bgcolor") &&
(config->action_type_ == ACTION_SET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type_ = FEATURE_BGCOLOR;
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
} else { // Assume input file.
if (config->input_ == NULL) {
config->input_ = wargv[i];
} else {
ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n",
argv[i], ErrParse);
@ -840,21 +824,21 @@ static int ValidateConfig(Config* const config) {
}
// Feature type.
if (FEATURETYPE_IS_NIL && config->action_type != ACTION_INFO) {
if (FEATURETYPE_IS_NIL && config->action_type_ != ACTION_INFO) {
ERROR_GOTO1("ERROR: No feature specified.\n", ErrValidate2);
}
// Input file.
if (config->input == NULL) {
if (config->action_type != ACTION_SET) {
if (config->input_ == NULL) {
if (config->action_type_ != ACTION_SET) {
ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2);
} else if (config->type != FEATURE_ANMF) {
} else if (config->type_ != FEATURE_ANMF) {
ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2);
}
}
// Output file.
if (config->output == NULL && config->action_type != ACTION_INFO) {
if (config->output_ == NULL && config->action_type_ != ACTION_INFO) {
ERROR_GOTO1("ERROR: No output file specified.\n", ErrValidate2);
}
@ -870,17 +854,17 @@ static int InitializeConfig(int argc, const char* argv[], Config* const config,
memset(config, 0, sizeof(*config));
ok = ExUtilInitCommandLineArguments(argc, argv, &config->cmd_args);
ok = ExUtilInitCommandLineArguments(argc, argv, &config->cmd_args_);
if (!ok) return 0;
// Validate command-line arguments.
if (!ValidateCommandLine(&config->cmd_args, &num_feature_args)) {
if (!ValidateCommandLine(&config->cmd_args_, &num_feature_args)) {
ERROR_GOTO1("Exiting due to command-line parsing error.\n", Err1);
}
config->arg_count = num_feature_args;
config->args = (FeatureArg*)calloc(num_feature_args, sizeof(*config->args));
if (config->args == NULL) {
config->arg_count_ = num_feature_args;
config->args_ = (FeatureArg*)calloc(num_feature_args, sizeof(*config->args_));
if (config->args_ == NULL) {
ERROR_GOTO1("ERROR: Memory allocation error.\n", Err1);
}
@ -912,7 +896,7 @@ static int GetFrame(const WebPMux* mux, const Config* config) {
WebPMuxFrameInfo info;
WebPDataInit(&info.bitstream);
num = ExUtilGetInt(config->args[0].params, 10, &parse_error);
num = ExUtilGetInt(config->args_[0].params_, 10, &parse_error);
if (num < 0) {
ERROR_GOTO1("ERROR: Frame/Fragment index must be non-negative.\n", ErrGet);
}
@ -937,7 +921,7 @@ static int GetFrame(const WebPMux* mux, const Config* config) {
ErrorString(err), ErrGet);
}
ok = WriteWebP(mux_single, config->output);
ok = WriteWebP(mux_single, config->output_);
ErrGet:
WebPDataClear(&info.bitstream);
@ -952,11 +936,11 @@ static int Process(const Config* config) {
WebPMuxError err = WEBP_MUX_OK;
int ok = 1;
switch (config->action_type) {
switch (config->action_type_) {
case ACTION_GET: {
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
switch (config->type) {
switch (config->type_) {
case FEATURE_ANMF:
ok = GetFrame(mux, config);
break;
@ -964,12 +948,12 @@ static int Process(const Config* config) {
case FEATURE_ICCP:
case FEATURE_EXIF:
case FEATURE_XMP:
err = WebPMuxGetChunk(mux, kFourccList[config->type], &chunk);
err = WebPMuxGetChunk(mux, kFourccList[config->type_], &chunk);
if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not get the %s.\n",
ErrorString(err), kDescriptions[config->type], Err2);
ErrorString(err), kDescriptions[config->type_], Err2);
}
ok = WriteData(config->output, &chunk);
ok = WriteData(config->output_, &chunk);
break;
default:
@ -979,7 +963,7 @@ static int Process(const Config* config) {
break;
}
case ACTION_SET: {
switch (config->type) {
switch (config->type_) {
case FEATURE_ANMF: {
int i;
WebPMuxAnimParams params = { 0xFFFFFFFF, 0 };
@ -988,11 +972,11 @@ static int Process(const Config* config) {
ERROR_GOTO2("ERROR (%s): Could not allocate a mux object.\n",
ErrorString(WEBP_MUX_MEMORY_ERROR), Err2);
}
for (i = 0; i < config->arg_count; ++i) {
switch (config->args[i].subtype) {
for (i = 0; i < config->arg_count_; ++i) {
switch (config->args_[i].subtype_) {
case SUBTYPE_BGCOLOR: {
uint32_t bgcolor;
ok = ParseBgcolorArgs(config->args[i].params, &bgcolor);
ok = ParseBgcolorArgs(config->args_[i].params_, &bgcolor);
if (!ok) {
ERROR_GOTO1("ERROR: Could not parse the background color \n",
Err2);
@ -1003,7 +987,7 @@ static int Process(const Config* config) {
case SUBTYPE_LOOP: {
int parse_error = 0;
const int loop_count =
ExUtilGetInt(config->args[i].params, 10, &parse_error);
ExUtilGetInt(config->args_[i].params_, 10, &parse_error);
if (loop_count < 0 || loop_count > 65535) {
// Note: This is only a 'necessary' condition for loop_count
// to be valid. The 'sufficient' conditioned in checked in
@ -1019,10 +1003,10 @@ static int Process(const Config* config) {
case SUBTYPE_ANMF: {
WebPMuxFrameInfo frame;
frame.id = WEBP_CHUNK_ANMF;
ok = ExUtilReadFileToWebPData(config->args[i].filename,
ok = ExUtilReadFileToWebPData(config->args_[i].filename_,
&frame.bitstream);
if (!ok) goto Err2;
ok = ParseFrameArgs(config->args[i].params, &frame);
ok = ParseFrameArgs(config->args_[i].params_, &frame);
if (!ok) {
WebPDataClear(&frame.bitstream);
ERROR_GOTO1("ERROR: Could not parse frame properties.\n",
@ -1053,15 +1037,15 @@ static int Process(const Config* config) {
case FEATURE_ICCP:
case FEATURE_EXIF:
case FEATURE_XMP: {
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
ok = ExUtilReadFileToWebPData(config->args[0].filename, &chunk);
ok = ExUtilReadFileToWebPData(config->args_[0].filename_, &chunk);
if (!ok) goto Err2;
err = WebPMuxSetChunk(mux, kFourccList[config->type], &chunk, 1);
err = WebPMuxSetChunk(mux, kFourccList[config->type_], &chunk, 1);
WebPDataClear(&chunk);
if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not set the %s.\n",
ErrorString(err), kDescriptions[config->type], Err2);
ErrorString(err), kDescriptions[config->type_], Err2);
}
break;
}
@ -1069,12 +1053,12 @@ static int Process(const Config* config) {
WebPMuxAnimParams params = { 0xFFFFFFFF, 0 };
int parse_error = 0;
const int loop_count =
ExUtilGetInt(config->args[0].params, 10, &parse_error);
ExUtilGetInt(config->args_[0].params_, 10, &parse_error);
if (loop_count < 0 || loop_count > 65535 || parse_error) {
ERROR_GOTO1("ERROR: Loop count must be in the range 0 to 65535.\n",
Err2);
}
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
ok = (WebPMuxGetAnimationParams(mux, &params) == WEBP_MUX_OK);
if (!ok) {
@ -1093,12 +1077,12 @@ static int Process(const Config* config) {
case FEATURE_BGCOLOR: {
WebPMuxAnimParams params = { 0xFFFFFFFF, 0 };
uint32_t bgcolor;
ok = ParseBgcolorArgs(config->args[0].params, &bgcolor);
ok = ParseBgcolorArgs(config->args_[0].params_, &bgcolor);
if (!ok) {
ERROR_GOTO1("ERROR: Could not parse the background color.\n",
Err2);
}
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
ok = (WebPMuxGetAnimationParams(mux, &params) == WEBP_MUX_OK);
if (!ok) {
@ -1119,12 +1103,12 @@ static int Process(const Config* config) {
break;
}
}
ok = WriteWebP(mux, config->output);
ok = WriteWebP(mux, config->output_);
break;
}
case ACTION_DURATION: {
int num_frames;
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
err = WebPMuxNumChunks(mux, WEBP_CHUNK_ANMF, &num_frames);
ok = (err == WEBP_MUX_OK);
@ -1134,7 +1118,7 @@ static int Process(const Config* config) {
if (num_frames == 0) {
fprintf(stderr, "Doesn't look like the source is animated. "
"Skipping duration setting.\n");
ok = WriteWebP(mux, config->output);
ok = WriteWebP(mux, config->output_);
if (!ok) goto Err2;
} else {
int i;
@ -1146,11 +1130,11 @@ static int Process(const Config* config) {
for (i = 0; i < num_frames; ++i) durations[i] = -1;
// Parse intervals to process.
for (i = 0; i < config->arg_count; ++i) {
for (i = 0; i < config->arg_count_; ++i) {
int k;
int args[3];
int duration, start, end;
const int nb_args = ExUtilGetInts(config->args[i].params,
const int nb_args = ExUtilGetInts(config->args_[i].params_,
10, 3, args);
ok = (nb_args >= 1);
if (!ok) goto Err3;
@ -1194,7 +1178,7 @@ static int Process(const Config* config) {
WebPDataClear(&frame.bitstream);
}
WebPMuxDelete(mux);
ok = WriteWebP(new_mux, config->output);
ok = WriteWebP(new_mux, config->output_);
mux = new_mux; // transfer for the WebPMuxDelete() call
new_mux = NULL;
@ -1206,24 +1190,24 @@ static int Process(const Config* config) {
break;
}
case ACTION_STRIP: {
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
if (config->type == FEATURE_ICCP || config->type == FEATURE_EXIF ||
config->type == FEATURE_XMP) {
err = WebPMuxDeleteChunk(mux, kFourccList[config->type]);
if (config->type_ == FEATURE_ICCP || config->type_ == FEATURE_EXIF ||
config->type_ == FEATURE_XMP) {
err = WebPMuxDeleteChunk(mux, kFourccList[config->type_]);
if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not strip the %s.\n",
ErrorString(err), kDescriptions[config->type], Err2);
ErrorString(err), kDescriptions[config->type_], Err2);
}
} else {
ERROR_GOTO1("ERROR: Invalid feature for action 'strip'.\n", Err2);
break;
}
ok = WriteWebP(mux, config->output);
ok = WriteWebP(mux, config->output_);
break;
}
case ACTION_INFO: {
ok = CreateMux(config->input, &mux);
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
ok = (DisplayInfo(mux) == WEBP_MUX_OK);
break;

View File

@ -20,12 +20,11 @@
#include "sharpyuv/sharpyuv.h"
#include "src/dsp/dsp.h"
#include "src/utils/utils.h"
#include "src/webp/encode.h"
#include "webp/format_constants.h"
#include "webp/types.h"
#define XTRA_MAJ_VERSION 1
#define XTRA_MIN_VERSION 6
#define XTRA_MIN_VERSION 4
#define XTRA_REV_VERSION 0
//------------------------------------------------------------------------------

View File

@ -11,8 +11,6 @@
#ifndef WEBP_EXTRAS_EXTRAS_H_
#define WEBP_EXTRAS_EXTRAS_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -23,11 +23,10 @@
#include <stdlib.h>
#include <string.h>
#include "../examples/unicode.h"
#include "webp/encode.h"
#include "imageio/image_dec.h"
#include "imageio/imageio_util.h"
#include "src/webp/types.h"
#include "webp/encode.h"
#include "../examples/unicode.h"
static size_t ReadPicture(const char* const filename, WebPPicture* const pic,
int keep_alpha) {

View File

@ -11,13 +11,11 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <math.h>
#include <stddef.h>
#include "extras/extras.h"
#include "src/webp/types.h"
#include "webp/decode.h"
#include <math.h>
//------------------------------------------------------------------------------
#define INVALID_BIT_POS (1ull << 63)
@ -78,7 +76,7 @@ int VP8EstimateQuality(const uint8_t* const data, size_t size) {
GET_BIT(2); // colorspace + clamp type
// Segment header
if (GET_BIT(1)) { // use_segment
if (GET_BIT(1)) { // use_segment_
int s;
const int update_map = GET_BIT(1);
if (GET_BIT(1)) { // update data

View File

@ -57,12 +57,6 @@ int main(int argc, char* argv[]) {
INIT_WARGV(argc, argv);
if (argc == 1) {
fprintf(stderr, "Usage: %s [-h] image.webp [more_files.webp...]\n",
argv[0]);
goto Error;
}
for (c = 1; c < argc; ++c) {
const char* file = NULL;
const uint8_t* webp = NULL;

View File

@ -11,10 +11,9 @@
#include <stdlib.h>
#include <string.h>
#include "../examples/unicode.h"
#include "src/webp/types.h"
#include "extras/extras.h"
#include "imageio/imageio_util.h"
#include "../examples/unicode.h"
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) {

View File

@ -9,12 +9,7 @@
//
// Generic image-type guessing.
#include <stddef.h>
#include "./image_dec.h"
#include "./metadata.h"
#include "webp/encode.h"
#include "webp/types.h"
const char* WebPGetEnabledInputFileFormats(void) {
return "WebP"

View File

@ -14,8 +14,6 @@
#ifndef WEBP_IMAGEIO_IMAGE_DEC_H_
#define WEBP_IMAGEIO_IMAGE_DEC_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef HAVE_CONFIG_H

View File

@ -12,7 +12,6 @@
#include "./image_enc.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
#ifdef WEBP_HAVE_PNG
@ -35,10 +34,8 @@
#include <wincodec.h>
#endif
#include "../examples/unicode.h"
#include "./imageio_util.h"
#include "webp/decode.h"
#include "webp/types.h"
#include "../examples/unicode.h"
//------------------------------------------------------------------------------
// PNG

View File

@ -16,11 +16,8 @@
#include <fcntl.h> // for _O_BINARY
#include <io.h> // for _setmode()
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "webp/types.h"
#include "../examples/unicode.h"
// -----------------------------------------------------------------------------

View File

@ -14,7 +14,6 @@
#define WEBP_IMAGEIO_IMAGEIO_UTIL_H_
#include <stdio.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -24,10 +24,9 @@
#include <stdlib.h>
#include <string.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
#include "webp/encode.h"
#include "webp/types.h"
// -----------------------------------------------------------------------------
// Metadata processing

View File

@ -12,8 +12,6 @@
#ifndef WEBP_IMAGEIO_JPEGDEC_H_
#define WEBP_IMAGEIO_JPEGDEC_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -13,8 +13,6 @@
#ifndef WEBP_IMAGEIO_METADATA_H_
#define WEBP_IMAGEIO_METADATA_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -22,15 +22,13 @@
#define PNG_USER_MEM_SUPPORTED // for png_create_read_struct_2
#endif
#include <png.h>
#include <setjmp.h> // note: this must be included *after* png.h
#include <stdlib.h>
#include <string.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
#include "webp/encode.h"
#include "webp/types.h"
#define LOCAL_PNG_VERSION ((PNG_LIBPNG_VER_MAJOR << 8) | PNG_LIBPNG_VER_MINOR)
#define LOCAL_PNG_PREREQ(maj, min) \
@ -141,8 +139,6 @@ static const struct {
{ "Raw profile type xmp", ProcessRawProfile, METADATA_OFFSET(xmp) },
// Exiftool puts exif data in APP1 chunk, too.
{ "Raw profile type APP1", ProcessRawProfile, METADATA_OFFSET(exif) },
// ImageMagick uses lowercase app1.
{ "Raw profile type app1", ProcessRawProfile, METADATA_OFFSET(exif) },
// XMP Specification Part 3, Section 3 #PNG
{ "XML:com.adobe.xmp", MetadataCopy, METADATA_OFFSET(xmp) },
{ NULL, NULL, 0 },
@ -163,20 +159,6 @@ static int ExtractMetadataFromPNG(png_structp png,
png_textp text = NULL;
const png_uint_32 num = png_get_text(png, info, &text, NULL);
png_uint_32 i;
#ifdef PNG_eXIf_SUPPORTED
// Look for an 'eXIf' tag. Preference is given to this tag as it's newer
// than the TextualData tags.
{
png_bytep exif;
png_uint_32 len;
if (png_get_eXIf_1(png, info, &len, &exif) == PNG_INFO_eXIf) {
if (!MetadataCopy((const char*)exif, len, &metadata->exif)) return 0;
}
}
#endif // PNG_eXIf_SUPPORTED
// Look for EXIF / XMP metadata.
for (i = 0; i < num; ++i, ++text) {
int j;
@ -210,7 +192,6 @@ static int ExtractMetadataFromPNG(png_structp png,
}
}
}
#ifdef PNG_iCCP_SUPPORTED
// Look for an ICC profile.
{
png_charp name;
@ -227,7 +208,6 @@ static int ExtractMetadataFromPNG(png_structp png,
if (!MetadataCopy((const char*)profile, len, &metadata->iccp)) return 0;
}
}
#endif // PNG_iCCP_SUPPORTED
}
return 1;
}

View File

@ -12,8 +12,6 @@
#ifndef WEBP_IMAGEIO_PNGDEC_H_
#define WEBP_IMAGEIO_PNGDEC_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -17,9 +17,8 @@
#include <stdlib.h>
#include <string.h>
#include "./imageio_util.h"
#include "webp/encode.h"
#include "webp/types.h"
#include "./imageio_util.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf

View File

@ -12,8 +12,6 @@
#ifndef WEBP_IMAGEIO_PNMDEC_H_
#define WEBP_IMAGEIO_PNMDEC_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -22,10 +22,9 @@
#ifdef WEBP_HAVE_TIFF
#include <tiffio.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
#include "webp/encode.h"
#include "webp/types.h"
static const struct {
ttag_t tag;

View File

@ -12,8 +12,6 @@
#ifndef WEBP_IMAGEIO_TIFFDEC_H_
#define WEBP_IMAGEIO_TIFFDEC_H_
#include <stddef.h>
#include "webp/types.h"
#ifdef __cplusplus

View File

@ -13,19 +13,18 @@
#include "webp/config.h"
#endif
#include "./webpdec.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "../examples/unicode.h"
#include "./imageio_util.h"
#include "./metadata.h"
#include "./webpdec.h"
#include "webp/decode.h"
#include "webp/demux.h"
#include "webp/encode.h"
#include "webp/mux_types.h"
#include "webp/types.h"
#include "../examples/unicode.h"
#include "./imageio_util.h"
#include "./metadata.h"
//------------------------------------------------------------------------------
// WebP decoding

View File

@ -12,10 +12,7 @@
#ifndef WEBP_IMAGEIO_WEBPDEC_H_
#define WEBP_IMAGEIO_WEBPDEC_H_
#include <stddef.h>
#include "webp/decode.h"
#include "webp/types.h"
#ifdef __cplusplus
extern "C" {

View File

@ -53,7 +53,7 @@ DEMUXLIBLIST=''
if [[ -z "${SDK}" ]]; then
echo "iOS SDK not available"
exit 1
elif [[ ${SDK%%.*} -gt 8 && "${XCODE%%.*}" -lt 16 ]]; then
elif [[ ${SDK%%.*} -gt 8 ]]; then
EXTRA_CFLAGS="-fembed-bitcode"
elif [[ ${SDK%%.*} -le 6 ]]; then
echo "You need iOS SDK version 6.0 or above"

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH CWEBP 1 "April 10, 2025"
.TH CWEBP 1 "September 17, 2024"
.SH NAME
cwebp \- compress an image file to a WebP file
.SH SYNOPSIS
@ -102,14 +102,6 @@ If either (but not both) of the \fBwidth\fP or \fBheight\fP parameters is 0,
the value will be calculated preserving the aspect\-ratio. Note: scaling
is applied \fIafter\fP cropping.
.TP
.BI \-resize_mode " string
Specify the behavior of the \fB\-resize\fP option. Possible values are:
\fBdown_only\fP, \fBup_only\fP, \fBalways\fP (default). \fBdown_only\fP will
use the values specified by \fB\-resize\fP if \fIeither\fP the input width or
height are larger than the given dimensions. Similarly, \fBup_only\fP will only
resize if \fIeither\fP the input width or height are smaller than the given
dimensions.
.TP
.B \-mt
Use multi\-threading for encoding, if possible.
.TP

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH GIF2WEBP 1 "November 4, 2024"
.TH GIF2WEBP 1 "July 18, 2024"
.SH NAME
gif2webp \- Convert a GIF image to WebP
.SH SYNOPSIS
@ -39,18 +39,6 @@ Encode the image using lossy compression.
Mixed compression mode: optimize compression of the image by picking either
lossy or lossless compression for each frame heuristically.
.TP
.BI \-near_lossless " int
Specify the level of near\-lossless image preprocessing. This option adjusts
pixel values to help compressibility, but has minimal impact on the visual
quality. It triggers lossless compression mode automatically. The range is 0
(maximum preprocessing) to 100 (no preprocessing, the default). The typical
value is around 60. Note that lossy with \fB\-q 100\fP can at times yield
better results.
.TP
.B \-sharp_yuv
Use more accurate and sharper RGB->YUV conversion. Note that this process is
slower than the default 'fast' RGB->YUV conversion.
.TP
.BI \-q " float
Specify the compression factor for RGB channels between 0 and 100. The default
is 75.

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH IMG2WEBP 1 "November 26, 2024"
.TH IMG2WEBP 1 "September 17, 2024"
.SH NAME
img2webp \- create animated WebP file from a sequence of input images.
.SH SYNOPSIS
@ -88,10 +88,6 @@ Specify the compression factor between 0 and 100. The default is 75.
Specify the compression method to use. This parameter controls the
trade off between encoding speed and the compressed file size and quality.
Possible values range from 0 to 6. Default value is 4.
When higher values are used, the encoder will spend more time inspecting
additional encoding possibilities and decide on the quality gain.
Lower value can result in faster processing time at the expense of
larger file size and lower compression quality.
.TP
.B \-exact, \-noexact
Preserve or alter RGB values in transparent area. The default is

View File

@ -33,7 +33,7 @@ libsharpyuv_la_SOURCES += sharpyuv_gamma.c sharpyuv_gamma.h
libsharpyuv_la_SOURCES += sharpyuv.c sharpyuv.h
libsharpyuv_la_CPPFLAGS = $(AM_CPPFLAGS)
libsharpyuv_la_LDFLAGS = -no-undefined -version-info 1:2:1 -lm
libsharpyuv_la_LDFLAGS = -no-undefined -version-info 1:0:1 -lm
libsharpyuv_la_LIBADD =
libsharpyuv_la_LIBADD += libsharpyuv_sse2.la
libsharpyuv_la_LIBADD += libsharpyuv_neon.la

View File

@ -6,8 +6,8 @@
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
VS_VERSION_INFO VERSIONINFO
FILEVERSION 0,0,4,2
PRODUCTVERSION 0,0,4,2
FILEVERSION 0,0,4,0
PRODUCTVERSION 0,0,4,0
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
@ -24,12 +24,12 @@ BEGIN
BEGIN
VALUE "CompanyName", "Google, Inc."
VALUE "FileDescription", "libsharpyuv DLL"
VALUE "FileVersion", "0.4.2"
VALUE "FileVersion", "0.4.0"
VALUE "InternalName", "libsharpyuv.dll"
VALUE "LegalCopyright", "Copyright (C) 2025"
VALUE "LegalCopyright", "Copyright (C) 2024"
VALUE "OriginalFilename", "libsharpyuv.dll"
VALUE "ProductName", "SharpYuv Library"
VALUE "ProductVersion", "0.4.2"
VALUE "ProductVersion", "0.4.0"
END
END
BLOCK "VarFileInfo"

View File

@ -19,10 +19,10 @@
#include <stdlib.h>
#include <string.h>
#include "src/webp/types.h"
#include "sharpyuv/sharpyuv_cpu.h"
#include "sharpyuv/sharpyuv_dsp.h"
#include "sharpyuv/sharpyuv_gamma.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------

View File

@ -52,7 +52,7 @@ extern "C" {
// SharpYUV API version following the convention from semver.org
#define SHARPYUV_VERSION_MAJOR 0
#define SHARPYUV_VERSION_MINOR 4
#define SHARPYUV_VERSION_PATCH 2
#define SHARPYUV_VERSION_PATCH 0
// Version as a uint32_t. The major number is the high 8 bits.
// The minor number is the middle 8 bits. The patch number is the low 16 bits.
#define SHARPYUV_MAKE_VERSION(MAJOR, MINOR, PATCH) \

View File

@ -15,8 +15,6 @@
#include <math.h>
#include <stddef.h>
#include "sharpyuv/sharpyuv.h"
static int ToFixed16(float f) { return (int)floor(f * (1 << 16) + 0.5f); }
void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
@ -24,16 +22,16 @@ void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
const float kr = yuv_color_space->kr;
const float kb = yuv_color_space->kb;
const float kg = 1.0f - kr - kb;
const float cb = 0.5f / (1.0f - kb);
const float cr = 0.5f / (1.0f - kr);
const float cr = 0.5f / (1.0f - kb);
const float cb = 0.5f / (1.0f - kr);
const int shift = yuv_color_space->bit_depth - 8;
const float denom = (float)((1 << yuv_color_space->bit_depth) - 1);
float scale_y = 1.0f;
float add_y = 0.0f;
float scale_u = cb;
float scale_v = cr;
float scale_u = cr;
float scale_v = cb;
float add_uv = (float)(128 << shift);
assert(yuv_color_space->bit_depth >= 8);
@ -62,10 +60,6 @@ void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
// Matrices are in YUV_FIX fixed point precision.
// WebP's matrix, similar but not identical to kRec601LimitedMatrix
// Derived using the following formulas:
// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
static const SharpYuvConversionMatrix kWebpMatrix = {
{16839, 33059, 6420, 16 << 16},
{-9719, -19081, 28800, 128 << 16},

View File

@ -17,7 +17,6 @@
#include <stdlib.h>
#include "sharpyuv/sharpyuv_cpu.h"
#include "src/dsp/cpu.h"
#include "src/webp/types.h"
//-----------------------------------------------------------------------------

View File

@ -15,7 +15,6 @@
#include <float.h>
#include <math.h>
#include "sharpyuv/sharpyuv.h"
#include "src/webp/types.h"
// Gamma correction compensates loss of resolution during chroma subsampling.

View File

@ -14,12 +14,8 @@
#include "sharpyuv/sharpyuv_dsp.h"
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include <stdlib.h>
#include "src/dsp/cpu.h"
#include "src/webp/types.h"
#include <emmintrin.h>
static uint16_t clip_SSE2(int v, int max) {
return (v < 0) ? 0 : (v > max) ? max : (uint16_t)v;

View File

@ -36,7 +36,7 @@ libwebp_la_LIBADD += utils/libwebputils.la
# other than the ones listed on the command line, i.e., after linking, it will
# not have unresolved symbols. Some platforms (Windows among them) require all
# symbols in shared libraries to be resolved at library creation.
libwebp_la_LDFLAGS = -no-undefined -version-info 9:0:2
libwebp_la_LDFLAGS = -no-undefined -version-info 8:9:1
libwebpincludedir = $(includedir)/webp
pkgconfig_DATA = libwebp.pc
@ -48,7 +48,7 @@ if BUILD_LIBWEBPDECODER
libwebpdecoder_la_LIBADD += dsp/libwebpdspdecode.la
libwebpdecoder_la_LIBADD += utils/libwebputilsdecode.la
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 5:0:2
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 4:9:1
pkgconfig_DATA += libwebpdecoder.pc
endif

View File

@ -11,18 +11,14 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include "src/dec/alphai_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/dsp/dsp.h"
#include "src/utils/quant_levels_dec_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/format_constants.h"
#include "src/webp/types.h"
@ -38,8 +34,8 @@ WEBP_NODISCARD static ALPHDecoder* ALPHNew(void) {
// Clears and deallocates an alpha decoder instance.
static void ALPHDelete(ALPHDecoder* const dec) {
if (dec != NULL) {
VP8LDelete(dec->vp8l_dec);
dec->vp8l_dec = NULL;
VP8LDelete(dec->vp8l_dec_);
dec->vp8l_dec_ = NULL;
WebPSafeFree(dec);
}
}
@ -58,28 +54,28 @@ WEBP_NODISCARD static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN;
const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN;
int rsrv;
VP8Io* const io = &dec->io;
VP8Io* const io = &dec->io_;
assert(data != NULL && output != NULL && src_io != NULL);
VP8FiltersInit();
dec->output = output;
dec->width = src_io->width;
dec->height = src_io->height;
assert(dec->width > 0 && dec->height > 0);
dec->output_ = output;
dec->width_ = src_io->width;
dec->height_ = src_io->height;
assert(dec->width_ > 0 && dec->height_ > 0);
if (data_size <= ALPHA_HEADER_LEN) {
return 0;
}
dec->method = (data[0] >> 0) & 0x03;
dec->filter = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03);
dec->pre_processing = (data[0] >> 4) & 0x03;
dec->method_ = (data[0] >> 0) & 0x03;
dec->filter_ = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03);
dec->pre_processing_ = (data[0] >> 4) & 0x03;
rsrv = (data[0] >> 6) & 0x03;
if (dec->method < ALPHA_NO_COMPRESSION ||
dec->method > ALPHA_LOSSLESS_COMPRESSION ||
dec->filter >= WEBP_FILTER_LAST ||
dec->pre_processing > ALPHA_PREPROCESSED_LEVELS ||
if (dec->method_ < ALPHA_NO_COMPRESSION ||
dec->method_ > ALPHA_LOSSLESS_COMPRESSION ||
dec->filter_ >= WEBP_FILTER_LAST ||
dec->pre_processing_ > ALPHA_PREPROCESSED_LEVELS ||
rsrv != 0) {
return 0;
}
@ -100,11 +96,11 @@ WEBP_NODISCARD static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
io->crop_bottom = src_io->crop_bottom;
// No need to copy the scaling parameters.
if (dec->method == ALPHA_NO_COMPRESSION) {
const size_t alpha_decoded_size = dec->width * dec->height;
if (dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t alpha_decoded_size = dec->width_ * dec->height_;
ok = (alpha_data_size >= alpha_decoded_size);
} else {
assert(dec->method == ALPHA_LOSSLESS_COMPRESSION);
assert(dec->method_ == ALPHA_LOSSLESS_COMPRESSION);
ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size);
}
@ -117,32 +113,32 @@ WEBP_NODISCARD static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
// Returns false in case of bitstream error.
WEBP_NODISCARD static int ALPHDecode(VP8Decoder* const dec, int row,
int num_rows) {
ALPHDecoder* const alph_dec = dec->alph_dec;
const int width = alph_dec->width;
const int height = alph_dec->io.crop_bottom;
if (alph_dec->method == ALPHA_NO_COMPRESSION) {
ALPHDecoder* const alph_dec = dec->alph_dec_;
const int width = alph_dec->width_;
const int height = alph_dec->io_.crop_bottom;
if (alph_dec->method_ == ALPHA_NO_COMPRESSION) {
int y;
const uint8_t* prev_line = dec->alpha_prev_line;
const uint8_t* deltas = dec->alpha_data + ALPHA_HEADER_LEN + row * width;
uint8_t* dst = dec->alpha_plane + row * width;
assert(deltas <= &dec->alpha_data[dec->alpha_data_size]);
assert(WebPUnfilters[alph_dec->filter] != NULL);
const uint8_t* prev_line = dec->alpha_prev_line_;
const uint8_t* deltas = dec->alpha_data_ + ALPHA_HEADER_LEN + row * width;
uint8_t* dst = dec->alpha_plane_ + row * width;
assert(deltas <= &dec->alpha_data_[dec->alpha_data_size_]);
assert(WebPUnfilters[alph_dec->filter_] != NULL);
for (y = 0; y < num_rows; ++y) {
WebPUnfilters[alph_dec->filter](prev_line, deltas, dst, width);
WebPUnfilters[alph_dec->filter_](prev_line, deltas, dst, width);
prev_line = dst;
dst += width;
deltas += width;
}
dec->alpha_prev_line = prev_line;
} else { // alph_dec->method == ALPHA_LOSSLESS_COMPRESSION
assert(alph_dec->vp8l_dec != NULL);
dec->alpha_prev_line_ = prev_line;
} else { // alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION
assert(alph_dec->vp8l_dec_ != NULL);
if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) {
return 0;
}
}
if (row + num_rows >= height) {
dec->is_alpha_decoded = 1;
dec->is_alpha_decoded_ = 1;
}
return 1;
}
@ -152,25 +148,25 @@ WEBP_NODISCARD static int AllocateAlphaPlane(VP8Decoder* const dec,
const int stride = io->width;
const int height = io->crop_bottom;
const uint64_t alpha_size = (uint64_t)stride * height;
assert(dec->alpha_plane_mem == NULL);
dec->alpha_plane_mem =
(uint8_t*)WebPSafeMalloc(alpha_size, sizeof(*dec->alpha_plane));
if (dec->alpha_plane_mem == NULL) {
assert(dec->alpha_plane_mem_ == NULL);
dec->alpha_plane_mem_ =
(uint8_t*)WebPSafeMalloc(alpha_size, sizeof(*dec->alpha_plane_));
if (dec->alpha_plane_mem_ == NULL) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"Alpha decoder initialization failed.");
}
dec->alpha_plane = dec->alpha_plane_mem;
dec->alpha_prev_line = NULL;
dec->alpha_plane_ = dec->alpha_plane_mem_;
dec->alpha_prev_line_ = NULL;
return 1;
}
void WebPDeallocateAlphaMemory(VP8Decoder* const dec) {
assert(dec != NULL);
WebPSafeFree(dec->alpha_plane_mem);
dec->alpha_plane_mem = NULL;
dec->alpha_plane = NULL;
ALPHDelete(dec->alph_dec);
dec->alph_dec = NULL;
WebPSafeFree(dec->alpha_plane_mem_);
dec->alpha_plane_mem_ = NULL;
dec->alpha_plane_ = NULL;
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
}
//------------------------------------------------------------------------------
@ -188,46 +184,46 @@ WEBP_NODISCARD const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
return NULL;
}
if (!dec->is_alpha_decoded) {
if (dec->alph_dec == NULL) { // Initialize decoder.
dec->alph_dec = ALPHNew();
if (dec->alph_dec == NULL) {
if (!dec->is_alpha_decoded_) {
if (dec->alph_dec_ == NULL) { // Initialize decoder.
dec->alph_dec_ = ALPHNew();
if (dec->alph_dec_ == NULL) {
VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"Alpha decoder initialization failed.");
return NULL;
}
if (!AllocateAlphaPlane(dec, io)) goto Error;
if (!ALPHInit(dec->alph_dec, dec->alpha_data, dec->alpha_data_size,
io, dec->alpha_plane)) {
VP8LDecoder* const vp8l_dec = dec->alph_dec->vp8l_dec;
if (!ALPHInit(dec->alph_dec_, dec->alpha_data_, dec->alpha_data_size_,
io, dec->alpha_plane_)) {
VP8LDecoder* const vp8l_dec = dec->alph_dec_->vp8l_dec_;
VP8SetError(dec,
(vp8l_dec == NULL) ? VP8_STATUS_OUT_OF_MEMORY
: vp8l_dec->status,
: vp8l_dec->status_,
"Alpha decoder initialization failed.");
goto Error;
}
// if we allowed use of alpha dithering, check whether it's needed at all
if (dec->alph_dec->pre_processing != ALPHA_PREPROCESSED_LEVELS) {
dec->alpha_dithering = 0; // disable dithering
if (dec->alph_dec_->pre_processing_ != ALPHA_PREPROCESSED_LEVELS) {
dec->alpha_dithering_ = 0; // disable dithering
} else {
num_rows = height - row; // decode everything in one pass
}
}
assert(dec->alph_dec != NULL);
assert(dec->alph_dec_ != NULL);
assert(row + num_rows <= height);
if (!ALPHDecode(dec, row, num_rows)) goto Error;
if (dec->is_alpha_decoded) { // finished?
ALPHDelete(dec->alph_dec);
dec->alph_dec = NULL;
if (dec->alpha_dithering > 0) {
uint8_t* const alpha = dec->alpha_plane + io->crop_top * width
if (dec->is_alpha_decoded_) { // finished?
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
if (dec->alpha_dithering_ > 0) {
uint8_t* const alpha = dec->alpha_plane_ + io->crop_top * width
+ io->crop_left;
if (!WebPDequantizeLevels(alpha,
io->crop_right - io->crop_left,
io->crop_bottom - io->crop_top,
width, dec->alpha_dithering)) {
width, dec->alpha_dithering_)) {
goto Error;
}
}
@ -235,7 +231,7 @@ WEBP_NODISCARD const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
}
// Return a pointer to the current decoded row.
return dec->alpha_plane + row * width;
return dec->alpha_plane_ + row * width;
Error:
WebPDeallocateAlphaMemory(dec);

View File

@ -14,10 +14,7 @@
#ifndef WEBP_DEC_ALPHAI_DEC_H_
#define WEBP_DEC_ALPHAI_DEC_H_
#include "src/dec/vp8_dec.h"
#include "src/webp/types.h"
#include "src/dec/webpi_dec.h"
#include "src/dsp/dsp.h"
#include "src/utils/filters_utils.h"
#ifdef __cplusplus
@ -28,24 +25,24 @@ struct VP8LDecoder; // Defined in dec/vp8li.h.
typedef struct ALPHDecoder ALPHDecoder;
struct ALPHDecoder {
int width;
int height;
int method;
WEBP_FILTER_TYPE filter;
int pre_processing;
struct VP8LDecoder* vp8l_dec;
VP8Io io;
int use_8b_decode; // Although alpha channel requires only 1 byte per
int width_;
int height_;
int method_;
WEBP_FILTER_TYPE filter_;
int pre_processing_;
struct VP8LDecoder* vp8l_dec_;
VP8Io io_;
int use_8b_decode_; // Although alpha channel requires only 1 byte per
// pixel, sometimes VP8LDecoder may need to allocate
// 4 bytes per pixel internally during decode.
uint8_t* output;
const uint8_t* prev_line; // last output row (or NULL)
uint8_t* output_;
const uint8_t* prev_line_; // last output row (or NULL)
};
//------------------------------------------------------------------------------
// internal functions. Not public.
// Deallocate memory associated to dec->alpha_plane decoding
// Deallocate memory associated to dec->alpha_plane_ decoding
void WebPDeallocateAlphaMemory(VP8Decoder* const dec);
//------------------------------------------------------------------------------

View File

@ -11,16 +11,11 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "src/dec/vp8i_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/utils/rescaler_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
// WebPDecBuffer
@ -31,9 +26,10 @@ static const uint8_t kModeBpp[MODE_LAST] = {
4, 4, 4, 2, // pre-multiplied modes
1, 1 };
// Check that webp_csp_mode is within the bounds of WEBP_CSP_MODE.
// Convert to an integer to handle both the unsigned/signed enum cases
// without the need for casting to remove type limit warnings.
int IsValidColorspace(int webp_csp_mode) {
static int IsValidColorspace(int webp_csp_mode) {
return (webp_csp_mode >= MODE_RGB && webp_csp_mode < MODE_LAST);
}

View File

@ -51,7 +51,4 @@ enum { MB_FEATURE_TREE_PROBS = 3,
NUM_PROBAS = 11
};
// Check that webp_csp_mode is within the bounds of WEBP_CSP_MODE.
int IsValidColorspace(int webp_csp_mode);
#endif // WEBP_DEC_COMMON_DEC_H_

View File

@ -11,20 +11,9 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "src/dec/common_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/dsp/dsp.h"
#include "src/utils/random_utils.h"
#include "src/utils/thread_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
// Main reconstruction function.
@ -83,11 +72,11 @@ static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx) {
int j;
int mb_x;
const int mb_y = ctx->mb_y;
const int cache_id = ctx->id;
uint8_t* const y_dst = dec->yuv_b + Y_OFF;
uint8_t* const u_dst = dec->yuv_b + U_OFF;
uint8_t* const v_dst = dec->yuv_b + V_OFF;
const int mb_y = ctx->mb_y_;
const int cache_id = ctx->id_;
uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
// Initialize left-most block.
for (j = 0; j < 16; ++j) {
@ -110,8 +99,8 @@ static void ReconstructRow(const VP8Decoder* const dec,
}
// Reconstruct one row.
for (mb_x = 0; mb_x < dec->mb_w; ++mb_x) {
const VP8MBData* const block = ctx->mb_data + mb_x;
for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
const VP8MBData* const block = ctx->mb_data_ + mb_x;
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
@ -126,9 +115,9 @@ static void ReconstructRow(const VP8Decoder* const dec,
}
{
// bring top samples into the cache
VP8TopSamples* const top_yuv = dec->yuv_t + mb_x;
const int16_t* const coeffs = block->coeffs;
uint32_t bits = block->non_zero_y;
VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x;
const int16_t* const coeffs = block->coeffs_;
uint32_t bits = block->non_zero_y_;
int n;
if (mb_y > 0) {
@ -138,11 +127,11 @@ static void ReconstructRow(const VP8Decoder* const dec,
}
// predict and add residuals
if (block->is_i4x4) { // 4x4
if (block->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (mb_y > 0) {
if (mb_x >= dec->mb_w - 1) { // on rightmost border
if (mb_x >= dec->mb_w_ - 1) { // on rightmost border
memset(top_right, top_yuv[0].y[15], sizeof(*top_right));
} else {
memcpy(top_right, top_yuv[1].y, sizeof(*top_right));
@ -154,11 +143,11 @@ static void ReconstructRow(const VP8Decoder* const dec,
// predict and add residuals for all 4x4 blocks in turn.
for (n = 0; n < 16; ++n, bits <<= 2) {
uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[block->imodes[n]](dst);
VP8PredLuma4[block->imodes_[n]](dst);
DoTransform(bits, coeffs + n * 16, dst);
}
} else { // 16x16
const int pred_func = CheckMode(mb_x, mb_y, block->imodes[0]);
const int pred_func = CheckMode(mb_x, mb_y, block->imodes_[0]);
VP8PredLuma16[pred_func](y_dst);
if (bits != 0) {
for (n = 0; n < 16; ++n, bits <<= 2) {
@ -168,8 +157,8 @@ static void ReconstructRow(const VP8Decoder* const dec,
}
{
// Chroma
const uint32_t bits_uv = block->non_zero_uv;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode);
const uint32_t bits_uv = block->non_zero_uv_;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_);
VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst);
DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst);
@ -177,25 +166,25 @@ static void ReconstructRow(const VP8Decoder* const dec,
}
// stash away top samples for next block
if (mb_y < dec->mb_h - 1) {
if (mb_y < dec->mb_h_ - 1) {
memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16);
memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8);
memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8);
}
}
// Transfer reconstructed samples from yuv_b cache to final destination.
// Transfer reconstructed samples from yuv_b_ cache to final destination.
{
const int y_offset = cache_id * 16 * dec->cache_y_stride;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride;
uint8_t* const y_out = dec->cache_y + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v + mb_x * 8 + uv_offset;
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset;
for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride, y_dst + j * BPS, 16);
memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
}
for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride, v_dst + j * BPS, 8);
memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
}
}
}
@ -212,40 +201,40 @@ static void ReconstructRow(const VP8Decoder* const dec,
static const uint8_t kFilterExtraRows[3] = { 0, 2, 8 };
static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
const VP8ThreadContext* const ctx = &dec->thread_ctx;
const int cache_id = ctx->id;
const int y_bps = dec->cache_y_stride;
const VP8FInfo* const f_info = ctx->f_info + mb_x;
uint8_t* const y_dst = dec->cache_y + cache_id * 16 * y_bps + mb_x * 16;
const int ilevel = f_info->f_ilevel;
const int limit = f_info->f_limit;
const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int cache_id = ctx->id_;
const int y_bps = dec->cache_y_stride_;
const VP8FInfo* const f_info = ctx->f_info_ + mb_x;
uint8_t* const y_dst = dec->cache_y_ + cache_id * 16 * y_bps + mb_x * 16;
const int ilevel = f_info->f_ilevel_;
const int limit = f_info->f_limit_;
if (limit == 0) {
return;
}
assert(limit >= 3);
if (dec->filter_type == 1) { // simple
if (dec->filter_type_ == 1) { // simple
if (mb_x > 0) {
VP8SimpleHFilter16(y_dst, y_bps, limit + 4);
}
if (f_info->f_inner) {
if (f_info->f_inner_) {
VP8SimpleHFilter16i(y_dst, y_bps, limit);
}
if (mb_y > 0) {
VP8SimpleVFilter16(y_dst, y_bps, limit + 4);
}
if (f_info->f_inner) {
if (f_info->f_inner_) {
VP8SimpleVFilter16i(y_dst, y_bps, limit);
}
} else { // complex
const int uv_bps = dec->cache_uv_stride;
uint8_t* const u_dst = dec->cache_u + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v + cache_id * 8 * uv_bps + mb_x * 8;
const int hev_thresh = f_info->hev_thresh;
const int uv_bps = dec->cache_uv_stride_;
uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8;
const int hev_thresh = f_info->hev_thresh_;
if (mb_x > 0) {
VP8HFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh);
VP8HFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh);
}
if (f_info->f_inner) {
if (f_info->f_inner_) {
VP8HFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh);
VP8HFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh);
}
@ -253,7 +242,7 @@ static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
VP8VFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh);
VP8VFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh);
}
if (f_info->f_inner) {
if (f_info->f_inner_) {
VP8VFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh);
VP8VFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh);
}
@ -263,9 +252,9 @@ static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
// Filter the decoded macroblock row (if needed)
static void FilterRow(const VP8Decoder* const dec) {
int mb_x;
const int mb_y = dec->thread_ctx.mb_y;
assert(dec->thread_ctx.filter_row);
for (mb_x = dec->tl_mb_x; mb_x < dec->br_mb_x; ++mb_x) {
const int mb_y = dec->thread_ctx_.mb_y_;
assert(dec->thread_ctx_.filter_row_);
for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) {
DoFilter(dec, mb_x, mb_y);
}
}
@ -274,51 +263,51 @@ static void FilterRow(const VP8Decoder* const dec) {
// Precompute the filtering strength for each segment and each i4x4/i16x16 mode.
static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
if (dec->filter_type > 0) {
if (dec->filter_type_ > 0) {
int s;
const VP8FilterHeader* const hdr = &dec->filter_hdr;
const VP8FilterHeader* const hdr = &dec->filter_hdr_;
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
int i4x4;
// First, compute the initial level
int base_level;
if (dec->segment_hdr.use_segment) {
base_level = dec->segment_hdr.filter_strength[s];
if (!dec->segment_hdr.absolute_delta) {
base_level += hdr->level;
if (dec->segment_hdr_.use_segment_) {
base_level = dec->segment_hdr_.filter_strength_[s];
if (!dec->segment_hdr_.absolute_delta_) {
base_level += hdr->level_;
}
} else {
base_level = hdr->level;
base_level = hdr->level_;
}
for (i4x4 = 0; i4x4 <= 1; ++i4x4) {
VP8FInfo* const info = &dec->fstrengths[s][i4x4];
VP8FInfo* const info = &dec->fstrengths_[s][i4x4];
int level = base_level;
if (hdr->use_lf_delta) {
level += hdr->ref_lf_delta[0];
if (hdr->use_lf_delta_) {
level += hdr->ref_lf_delta_[0];
if (i4x4) {
level += hdr->mode_lf_delta[0];
level += hdr->mode_lf_delta_[0];
}
}
level = (level < 0) ? 0 : (level > 63) ? 63 : level;
if (level > 0) {
int ilevel = level;
if (hdr->sharpness > 0) {
if (hdr->sharpness > 4) {
if (hdr->sharpness_ > 0) {
if (hdr->sharpness_ > 4) {
ilevel >>= 2;
} else {
ilevel >>= 1;
}
if (ilevel > 9 - hdr->sharpness) {
ilevel = 9 - hdr->sharpness;
if (ilevel > 9 - hdr->sharpness_) {
ilevel = 9 - hdr->sharpness_;
}
}
if (ilevel < 1) ilevel = 1;
info->f_ilevel = ilevel;
info->f_limit = 2 * level + ilevel;
info->hev_thresh = (level >= 40) ? 2 : (level >= 15) ? 1 : 0;
info->f_ilevel_ = ilevel;
info->f_limit_ = 2 * level + ilevel;
info->hev_thresh_ = (level >= 40) ? 2 : (level >= 15) ? 1 : 0;
} else {
info->f_limit = 0; // no filtering
info->f_limit_ = 0; // no filtering
}
info->f_inner = i4x4;
info->f_inner_ = i4x4;
}
}
}
@ -332,7 +321,7 @@ static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
#define DITHER_AMP_TAB_SIZE 12
static const uint8_t kQuantToDitherAmp[DITHER_AMP_TAB_SIZE] = {
// roughly, it's dqm->uv_mat[1]
// roughly, it's dqm->uv_mat_[1]
8, 7, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1
};
@ -347,24 +336,24 @@ void VP8InitDithering(const WebPDecoderOptions* const options,
int s;
int all_amp = 0;
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8QuantMatrix* const dqm = &dec->dqm[s];
if (dqm->uv_quant < DITHER_AMP_TAB_SIZE) {
const int idx = (dqm->uv_quant < 0) ? 0 : dqm->uv_quant;
dqm->dither = (f * kQuantToDitherAmp[idx]) >> 3;
VP8QuantMatrix* const dqm = &dec->dqm_[s];
if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) {
const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_;
dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3;
}
all_amp |= dqm->dither;
all_amp |= dqm->dither_;
}
if (all_amp != 0) {
VP8InitRandom(&dec->dithering_rg, 1.0f);
dec->dither = 1;
VP8InitRandom(&dec->dithering_rg_, 1.0f);
dec->dither_ = 1;
}
}
// potentially allow alpha dithering
dec->alpha_dithering = options->alpha_dithering_strength;
if (dec->alpha_dithering > 100) {
dec->alpha_dithering = 100;
} else if (dec->alpha_dithering < 0) {
dec->alpha_dithering = 0;
dec->alpha_dithering_ = options->alpha_dithering_strength;
if (dec->alpha_dithering_ > 100) {
dec->alpha_dithering_ = 100;
} else if (dec->alpha_dithering_ < 0) {
dec->alpha_dithering_ = 0;
}
}
}
@ -381,17 +370,17 @@ static void Dither8x8(VP8Random* const rg, uint8_t* dst, int bps, int amp) {
static void DitherRow(VP8Decoder* const dec) {
int mb_x;
assert(dec->dither);
for (mb_x = dec->tl_mb_x; mb_x < dec->br_mb_x; ++mb_x) {
const VP8ThreadContext* const ctx = &dec->thread_ctx;
const VP8MBData* const data = ctx->mb_data + mb_x;
const int cache_id = ctx->id;
const int uv_bps = dec->cache_uv_stride;
if (data->dither >= MIN_DITHER_AMP) {
uint8_t* const u_dst = dec->cache_u + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v + cache_id * 8 * uv_bps + mb_x * 8;
Dither8x8(&dec->dithering_rg, u_dst, uv_bps, data->dither);
Dither8x8(&dec->dithering_rg, v_dst, uv_bps, data->dither);
assert(dec->dither_);
for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) {
const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const VP8MBData* const data = ctx->mb_data_ + mb_x;
const int cache_id = ctx->id_;
const int uv_bps = dec->cache_uv_stride_;
if (data->dither_ >= MIN_DITHER_AMP) {
uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8;
Dither8x8(&dec->dithering_rg_, u_dst, uv_bps, data->dither_);
Dither8x8(&dec->dithering_rg_, v_dst, uv_bps, data->dither_);
}
}
}
@ -414,29 +403,29 @@ static int FinishRow(void* arg1, void* arg2) {
VP8Decoder* const dec = (VP8Decoder*)arg1;
VP8Io* const io = (VP8Io*)arg2;
int ok = 1;
const VP8ThreadContext* const ctx = &dec->thread_ctx;
const int cache_id = ctx->id;
const int extra_y_rows = kFilterExtraRows[dec->filter_type];
const int ysize = extra_y_rows * dec->cache_y_stride;
const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride;
const int y_offset = cache_id * 16 * dec->cache_y_stride;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride;
uint8_t* const ydst = dec->cache_y - ysize + y_offset;
uint8_t* const udst = dec->cache_u - uvsize + uv_offset;
uint8_t* const vdst = dec->cache_v - uvsize + uv_offset;
const int mb_y = ctx->mb_y;
const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int cache_id = ctx->id_;
const int extra_y_rows = kFilterExtraRows[dec->filter_type_];
const int ysize = extra_y_rows * dec->cache_y_stride_;
const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride_;
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const ydst = dec->cache_y_ - ysize + y_offset;
uint8_t* const udst = dec->cache_u_ - uvsize + uv_offset;
uint8_t* const vdst = dec->cache_v_ - uvsize + uv_offset;
const int mb_y = ctx->mb_y_;
const int is_first_row = (mb_y == 0);
const int is_last_row = (mb_y >= dec->br_mb_y - 1);
const int is_last_row = (mb_y >= dec->br_mb_y_ - 1);
if (dec->mt_method == 2) {
if (dec->mt_method_ == 2) {
ReconstructRow(dec, ctx);
}
if (ctx->filter_row) {
if (ctx->filter_row_) {
FilterRow(dec);
}
if (dec->dither) {
if (dec->dither_) {
DitherRow(dec);
}
@ -449,9 +438,9 @@ static int FinishRow(void* arg1, void* arg2) {
io->u = udst;
io->v = vdst;
} else {
io->y = dec->cache_y + y_offset;
io->u = dec->cache_u + uv_offset;
io->v = dec->cache_v + uv_offset;
io->y = dec->cache_y_ + y_offset;
io->u = dec->cache_u_ + uv_offset;
io->v = dec->cache_v_ + uv_offset;
}
if (!is_last_row) {
@ -460,9 +449,9 @@ static int FinishRow(void* arg1, void* arg2) {
if (y_end > io->crop_bottom) {
y_end = io->crop_bottom; // make sure we don't overflow on last row.
}
// If dec->alpha_data is not NULL, we have some alpha plane present.
// If dec->alpha_data_ is not NULL, we have some alpha plane present.
io->a = NULL;
if (dec->alpha_data != NULL && y_start < y_end) {
if (dec->alpha_data_ != NULL && y_start < y_end) {
io->a = VP8DecompressAlphaRows(dec, io, y_start, y_end - y_start);
if (io->a == NULL) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
@ -473,9 +462,9 @@ static int FinishRow(void* arg1, void* arg2) {
const int delta_y = io->crop_top - y_start;
y_start = io->crop_top;
assert(!(delta_y & 1));
io->y += dec->cache_y_stride * delta_y;
io->u += dec->cache_uv_stride * (delta_y >> 1);
io->v += dec->cache_uv_stride * (delta_y >> 1);
io->y += dec->cache_y_stride_ * delta_y;
io->u += dec->cache_uv_stride_ * (delta_y >> 1);
io->v += dec->cache_uv_stride_ * (delta_y >> 1);
if (io->a != NULL) {
io->a += io->width * delta_y;
}
@ -494,11 +483,11 @@ static int FinishRow(void* arg1, void* arg2) {
}
}
// rotate top samples if needed
if (cache_id + 1 == dec->num_caches) {
if (cache_id + 1 == dec->num_caches_) {
if (!is_last_row) {
memcpy(dec->cache_y - ysize, ydst + 16 * dec->cache_y_stride, ysize);
memcpy(dec->cache_u - uvsize, udst + 8 * dec->cache_uv_stride, uvsize);
memcpy(dec->cache_v - uvsize, vdst + 8 * dec->cache_uv_stride, uvsize);
memcpy(dec->cache_y_ - ysize, ydst + 16 * dec->cache_y_stride_, ysize);
memcpy(dec->cache_u_ - uvsize, udst + 8 * dec->cache_uv_stride_, uvsize);
memcpy(dec->cache_v_ - uvsize, vdst + 8 * dec->cache_uv_stride_, uvsize);
}
}
@ -511,43 +500,43 @@ static int FinishRow(void* arg1, void* arg2) {
int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1;
VP8ThreadContext* const ctx = &dec->thread_ctx;
VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int filter_row =
(dec->filter_type > 0) &&
(dec->mb_y >= dec->tl_mb_y) && (dec->mb_y <= dec->br_mb_y);
if (dec->mt_method == 0) {
// ctx->id and ctx->f_info are already set
ctx->mb_y = dec->mb_y;
ctx->filter_row = filter_row;
(dec->filter_type_ > 0) &&
(dec->mb_y_ >= dec->tl_mb_y_) && (dec->mb_y_ <= dec->br_mb_y_);
if (dec->mt_method_ == 0) {
// ctx->id_ and ctx->f_info_ are already set
ctx->mb_y_ = dec->mb_y_;
ctx->filter_row_ = filter_row;
ReconstructRow(dec, ctx);
ok = FinishRow(dec, io);
} else {
WebPWorker* const worker = &dec->worker;
WebPWorker* const worker = &dec->worker_;
// Finish previous job *before* updating context
ok &= WebPGetWorkerInterface()->Sync(worker);
assert(worker->status == OK);
assert(worker->status_ == OK);
if (ok) { // spawn a new deblocking/output job
ctx->io = *io;
ctx->id = dec->cache_id;
ctx->mb_y = dec->mb_y;
ctx->filter_row = filter_row;
if (dec->mt_method == 2) { // swap macroblock data
VP8MBData* const tmp = ctx->mb_data;
ctx->mb_data = dec->mb_data;
dec->mb_data = tmp;
ctx->io_ = *io;
ctx->id_ = dec->cache_id_;
ctx->mb_y_ = dec->mb_y_;
ctx->filter_row_ = filter_row;
if (dec->mt_method_ == 2) { // swap macroblock data
VP8MBData* const tmp = ctx->mb_data_;
ctx->mb_data_ = dec->mb_data_;
dec->mb_data_ = tmp;
} else {
// perform reconstruction directly in main thread
ReconstructRow(dec, ctx);
}
if (filter_row) { // swap filter info
VP8FInfo* const tmp = ctx->f_info;
ctx->f_info = dec->f_info;
dec->f_info = tmp;
VP8FInfo* const tmp = ctx->f_info_;
ctx->f_info_ = dec->f_info_;
dec->f_info_ = tmp;
}
// (reconstruct)+filter in parallel
WebPGetWorkerInterface()->Launch(worker);
if (++dec->cache_id == dec->num_caches) {
dec->cache_id = 0;
if (++dec->cache_id_ == dec->num_caches_) {
dec->cache_id_ = 0;
}
}
}
@ -562,12 +551,12 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// Note: Afterward, we must call teardown() no matter what.
if (io->setup != NULL && !io->setup(io)) {
VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed");
return dec->status;
return dec->status_;
}
// Disable filtering per user request
if (io->bypass_filtering) {
dec->filter_type = 0;
dec->filter_type_ = 0;
}
// Define the area where we can skip in-loop filtering, in case of cropping.
@ -580,29 +569,29 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// top-left corner of the picture (MB #0). We must filter all the previous
// macroblocks.
{
const int extra_pixels = kFilterExtraRows[dec->filter_type];
if (dec->filter_type == 2) {
const int extra_pixels = kFilterExtraRows[dec->filter_type_];
if (dec->filter_type_ == 2) {
// For complex filter, we need to preserve the dependency chain.
dec->tl_mb_x = 0;
dec->tl_mb_y = 0;
dec->tl_mb_x_ = 0;
dec->tl_mb_y_ = 0;
} else {
// For simple filter, we can filter only the cropped region.
// We include 'extra_pixels' on the other side of the boundary, since
// vertical or horizontal filtering of the previous macroblock can
// modify some abutting pixels.
dec->tl_mb_x = (io->crop_left - extra_pixels) >> 4;
dec->tl_mb_y = (io->crop_top - extra_pixels) >> 4;
if (dec->tl_mb_x < 0) dec->tl_mb_x = 0;
if (dec->tl_mb_y < 0) dec->tl_mb_y = 0;
dec->tl_mb_x_ = (io->crop_left - extra_pixels) >> 4;
dec->tl_mb_y_ = (io->crop_top - extra_pixels) >> 4;
if (dec->tl_mb_x_ < 0) dec->tl_mb_x_ = 0;
if (dec->tl_mb_y_ < 0) dec->tl_mb_y_ = 0;
}
// We need some 'extra' pixels on the right/bottom.
dec->br_mb_y = (io->crop_bottom + 15 + extra_pixels) >> 4;
dec->br_mb_x = (io->crop_right + 15 + extra_pixels) >> 4;
if (dec->br_mb_x > dec->mb_w) {
dec->br_mb_x = dec->mb_w;
dec->br_mb_y_ = (io->crop_bottom + 15 + extra_pixels) >> 4;
dec->br_mb_x_ = (io->crop_right + 15 + extra_pixels) >> 4;
if (dec->br_mb_x_ > dec->mb_w_) {
dec->br_mb_x_ = dec->mb_w_;
}
if (dec->br_mb_y > dec->mb_h) {
dec->br_mb_y = dec->mb_h;
if (dec->br_mb_y_ > dec->mb_h_) {
dec->br_mb_y_ = dec->mb_h_;
}
}
PrecomputeFilterStrengths(dec);
@ -611,8 +600,8 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1;
if (dec->mt_method > 0) {
ok = WebPGetWorkerInterface()->Sync(&dec->worker);
if (dec->mt_method_ > 0) {
ok = WebPGetWorkerInterface()->Sync(&dec->worker_);
}
if (io->teardown != NULL) {
@ -650,20 +639,20 @@ int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) {
// Initialize multi/single-thread worker
static int InitThreadContext(VP8Decoder* const dec) {
dec->cache_id = 0;
if (dec->mt_method > 0) {
WebPWorker* const worker = &dec->worker;
dec->cache_id_ = 0;
if (dec->mt_method_ > 0) {
WebPWorker* const worker = &dec->worker_;
if (!WebPGetWorkerInterface()->Reset(worker)) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"thread initialization failed.");
}
worker->data1 = dec;
worker->data2 = (void*)&dec->thread_ctx.io;
worker->data2 = (void*)&dec->thread_ctx_.io_;
worker->hook = FinishRow;
dec->num_caches =
(dec->filter_type > 0) ? MT_CACHE_LINES : MT_CACHE_LINES - 1;
dec->num_caches_ =
(dec->filter_type_ > 0) ? MT_CACHE_LINES : MT_CACHE_LINES - 1;
} else {
dec->num_caches = ST_CACHE_LINES;
dec->num_caches_ = ST_CACHE_LINES;
}
return 1;
}
@ -691,25 +680,25 @@ int VP8GetThreadMethod(const WebPDecoderOptions* const options,
// Memory setup
static int AllocateMemory(VP8Decoder* const dec) {
const int num_caches = dec->num_caches;
const int mb_w = dec->mb_w;
const int num_caches = dec->num_caches_;
const int mb_w = dec->mb_w_;
// Note: we use 'size_t' when there's no overflow risk, uint64_t otherwise.
const size_t intra_pred_mode_size = 4 * mb_w * sizeof(uint8_t);
const size_t top_size = sizeof(VP8TopSamples) * mb_w;
const size_t mb_info_size = (mb_w + 1) * sizeof(VP8MB);
const size_t f_info_size =
(dec->filter_type > 0) ?
mb_w * (dec->mt_method > 0 ? 2 : 1) * sizeof(VP8FInfo)
(dec->filter_type_ > 0) ?
mb_w * (dec->mt_method_ > 0 ? 2 : 1) * sizeof(VP8FInfo)
: 0;
const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b);
const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b_);
const size_t mb_data_size =
(dec->mt_method == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data);
(dec->mt_method_ == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data_);
const size_t cache_height = (16 * num_caches
+ kFilterExtraRows[dec->filter_type]) * 3 / 2;
+ kFilterExtraRows[dec->filter_type_]) * 3 / 2;
const size_t cache_size = top_size * cache_height;
// alpha_size is the only one that scales as width x height.
const uint64_t alpha_size = (dec->alpha_data != NULL) ?
(uint64_t)dec->pic_hdr.width * dec->pic_hdr.height : 0ULL;
const uint64_t alpha_size = (dec->alpha_data_ != NULL) ?
(uint64_t)dec->pic_hdr_.width_ * dec->pic_hdr_.height_ : 0ULL;
const uint64_t needed = (uint64_t)intra_pred_mode_size
+ top_size + mb_info_size + f_info_size
+ yuv_size + mb_data_size
@ -717,77 +706,77 @@ static int AllocateMemory(VP8Decoder* const dec) {
uint8_t* mem;
if (!CheckSizeOverflow(needed)) return 0; // check for overflow
if (needed > dec->mem_size) {
WebPSafeFree(dec->mem);
dec->mem_size = 0;
dec->mem = WebPSafeMalloc(needed, sizeof(uint8_t));
if (dec->mem == NULL) {
if (needed > dec->mem_size_) {
WebPSafeFree(dec->mem_);
dec->mem_size_ = 0;
dec->mem_ = WebPSafeMalloc(needed, sizeof(uint8_t));
if (dec->mem_ == NULL) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"no memory during frame initialization.");
}
// down-cast is ok, thanks to WebPSafeMalloc() above.
dec->mem_size = (size_t)needed;
dec->mem_size_ = (size_t)needed;
}
mem = (uint8_t*)dec->mem;
dec->intra_t = mem;
mem = (uint8_t*)dec->mem_;
dec->intra_t_ = mem;
mem += intra_pred_mode_size;
dec->yuv_t = (VP8TopSamples*)mem;
dec->yuv_t_ = (VP8TopSamples*)mem;
mem += top_size;
dec->mb_info = ((VP8MB*)mem) + 1;
dec->mb_info_ = ((VP8MB*)mem) + 1;
mem += mb_info_size;
dec->f_info = f_info_size ? (VP8FInfo*)mem : NULL;
dec->f_info_ = f_info_size ? (VP8FInfo*)mem : NULL;
mem += f_info_size;
dec->thread_ctx.id = 0;
dec->thread_ctx.f_info = dec->f_info;
if (dec->filter_type > 0 && dec->mt_method > 0) {
dec->thread_ctx_.id_ = 0;
dec->thread_ctx_.f_info_ = dec->f_info_;
if (dec->filter_type_ > 0 && dec->mt_method_ > 0) {
// secondary cache line. The deblocking process need to make use of the
// filtering strength from previous macroblock row, while the new ones
// are being decoded in parallel. We'll just swap the pointers.
dec->thread_ctx.f_info += mb_w;
dec->thread_ctx_.f_info_ += mb_w;
}
mem = (uint8_t*)WEBP_ALIGN(mem);
assert((yuv_size & WEBP_ALIGN_CST) == 0);
dec->yuv_b = mem;
dec->yuv_b_ = mem;
mem += yuv_size;
dec->mb_data = (VP8MBData*)mem;
dec->thread_ctx.mb_data = (VP8MBData*)mem;
if (dec->mt_method == 2) {
dec->thread_ctx.mb_data += mb_w;
dec->mb_data_ = (VP8MBData*)mem;
dec->thread_ctx_.mb_data_ = (VP8MBData*)mem;
if (dec->mt_method_ == 2) {
dec->thread_ctx_.mb_data_ += mb_w;
}
mem += mb_data_size;
dec->cache_y_stride = 16 * mb_w;
dec->cache_uv_stride = 8 * mb_w;
dec->cache_y_stride_ = 16 * mb_w;
dec->cache_uv_stride_ = 8 * mb_w;
{
const int extra_rows = kFilterExtraRows[dec->filter_type];
const int extra_y = extra_rows * dec->cache_y_stride;
const int extra_uv = (extra_rows / 2) * dec->cache_uv_stride;
dec->cache_y = mem + extra_y;
dec->cache_u = dec->cache_y
+ 16 * num_caches * dec->cache_y_stride + extra_uv;
dec->cache_v = dec->cache_u
+ 8 * num_caches * dec->cache_uv_stride + extra_uv;
dec->cache_id = 0;
const int extra_rows = kFilterExtraRows[dec->filter_type_];
const int extra_y = extra_rows * dec->cache_y_stride_;
const int extra_uv = (extra_rows / 2) * dec->cache_uv_stride_;
dec->cache_y_ = mem + extra_y;
dec->cache_u_ = dec->cache_y_
+ 16 * num_caches * dec->cache_y_stride_ + extra_uv;
dec->cache_v_ = dec->cache_u_
+ 8 * num_caches * dec->cache_uv_stride_ + extra_uv;
dec->cache_id_ = 0;
}
mem += cache_size;
// alpha plane
dec->alpha_plane = alpha_size ? mem : NULL;
dec->alpha_plane_ = alpha_size ? mem : NULL;
mem += alpha_size;
assert(mem <= (uint8_t*)dec->mem + dec->mem_size);
assert(mem <= (uint8_t*)dec->mem_ + dec->mem_size_);
// note: left/top-info is initialized once for all.
memset(dec->mb_info - 1, 0, mb_info_size);
memset(dec->mb_info_ - 1, 0, mb_info_size);
VP8InitScanline(dec); // initialize left too.
// initialize top
memset(dec->intra_t, B_DC_PRED, intra_pred_mode_size);
memset(dec->intra_t_, B_DC_PRED, intra_pred_mode_size);
return 1;
}
@ -795,16 +784,16 @@ static int AllocateMemory(VP8Decoder* const dec) {
static void InitIo(VP8Decoder* const dec, VP8Io* io) {
// prepare 'io'
io->mb_y = 0;
io->y = dec->cache_y;
io->u = dec->cache_u;
io->v = dec->cache_v;
io->y_stride = dec->cache_y_stride;
io->uv_stride = dec->cache_uv_stride;
io->y = dec->cache_y_;
io->u = dec->cache_u_;
io->v = dec->cache_v_;
io->y_stride = dec->cache_y_stride_;
io->uv_stride = dec->cache_uv_stride_;
io->a = NULL;
}
int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io) {
if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches.
if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches_.
if (!AllocateMemory(dec)) return 0;
InitIo(dec, io);
VP8DspInit(); // Init critical function pointers and look-up tables.

View File

@ -12,20 +12,15 @@
// Author: somnath@google.com (Somnath Banerjee)
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <stdlib.h>
#include "src/dec/alphai_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/utils/bit_reader_utils.h"
#include "src/utils/thread_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/format_constants.h"
#include "src/webp/types.h"
// In append mode, buffer allocations increase as multiples of this value.
// Needs to be a power of 2.
@ -59,140 +54,134 @@ typedef enum {
// storage for partition #0 and partial data (in a rolling fashion)
typedef struct {
MemBufferMode mode; // Operation mode
size_t start; // start location of the data to be decoded
size_t end; // end location
size_t buf_size; // size of the allocated buffer
uint8_t* buf; // We don't own this buffer in case WebPIUpdate()
MemBufferMode mode_; // Operation mode
size_t start_; // start location of the data to be decoded
size_t end_; // end location
size_t buf_size_; // size of the allocated buffer
uint8_t* buf_; // We don't own this buffer in case WebPIUpdate()
size_t part0_size; // size of partition #0
const uint8_t* part0_buf; // buffer to store partition #0
size_t part0_size_; // size of partition #0
const uint8_t* part0_buf_; // buffer to store partition #0
} MemBuffer;
struct WebPIDecoder {
DecState state; // current decoding state
WebPDecParams params; // Params to store output info
int is_lossless; // for down-casting 'dec'.
void* dec; // either a VP8Decoder or a VP8LDecoder instance
VP8Io io;
DecState state_; // current decoding state
WebPDecParams params_; // Params to store output info
int is_lossless_; // for down-casting 'dec_'.
void* dec_; // either a VP8Decoder or a VP8LDecoder instance
VP8Io io_;
MemBuffer mem; // input memory buffer.
WebPDecBuffer output; // output buffer (when no external one is supplied,
// or if the external one has slow-memory)
WebPDecBuffer* final_output; // Slow-memory output to copy to eventually.
size_t chunk_size; // Compressed VP8/VP8L size extracted from Header.
MemBuffer mem_; // input memory buffer.
WebPDecBuffer output_; // output buffer (when no external one is supplied,
// or if the external one has slow-memory)
WebPDecBuffer* final_output_; // Slow-memory output to copy to eventually.
size_t chunk_size_; // Compressed VP8/VP8L size extracted from Header.
int last_mb_y; // last row reached for intra-mode decoding
int last_mb_y_; // last row reached for intra-mode decoding
};
// MB context to restore in case VP8DecodeMB() fails
typedef struct {
VP8MB left;
VP8MB info;
VP8BitReader token_br;
VP8MB left_;
VP8MB info_;
VP8BitReader token_br_;
} MBContext;
//------------------------------------------------------------------------------
// MemBuffer: incoming data handling
static WEBP_INLINE size_t MemDataSize(const MemBuffer* mem) {
return (mem->end - mem->start);
return (mem->end_ - mem->start_);
}
// Check if we need to preserve the compressed alpha data, as it may not have
// been decoded yet.
static int NeedCompressedAlpha(const WebPIDecoder* const idec) {
if (idec->state == STATE_WEBP_HEADER) {
if (idec->state_ == STATE_WEBP_HEADER) {
// We haven't parsed the headers yet, so we don't know whether the image is
// lossy or lossless. This also means that we haven't parsed the ALPH chunk.
return 0;
}
if (idec->is_lossless) {
if (idec->is_lossless_) {
return 0; // ALPH chunk is not present for lossless images.
} else {
const VP8Decoder* const dec = (VP8Decoder*)idec->dec;
assert(dec != NULL); // Must be true as idec->state != STATE_WEBP_HEADER.
return (dec->alpha_data != NULL) && !dec->is_alpha_decoded;
const VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
assert(dec != NULL); // Must be true as idec->state_ != STATE_WEBP_HEADER.
return (dec->alpha_data_ != NULL) && !dec->is_alpha_decoded_;
}
}
static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
MemBuffer* const mem = &idec->mem;
const uint8_t* const new_base = mem->buf + mem->start;
// note: for VP8, setting up idec->io is only really needed at the beginning
MemBuffer* const mem = &idec->mem_;
const uint8_t* const new_base = mem->buf_ + mem->start_;
// note: for VP8, setting up idec->io_ is only really needed at the beginning
// of the decoding, till partition #0 is complete.
idec->io.data = new_base;
idec->io.data_size = MemDataSize(mem);
idec->io_.data = new_base;
idec->io_.data_size = MemDataSize(mem);
if (idec->dec != NULL) {
if (!idec->is_lossless) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec;
const uint32_t last_part = dec->num_parts_minus_one;
if (idec->dec_ != NULL) {
if (!idec->is_lossless_) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
const uint32_t last_part = dec->num_parts_minus_one_;
if (offset != 0) {
uint32_t p;
for (p = 0; p <= last_part; ++p) {
VP8RemapBitReader(dec->parts + p, offset);
VP8RemapBitReader(dec->parts_ + p, offset);
}
// Remap partition #0 data pointer to new offset, but only in MAP
// mode (in APPEND mode, partition #0 is copied into a fixed memory).
if (mem->mode == MEM_MODE_MAP) {
VP8RemapBitReader(&dec->br, offset);
if (mem->mode_ == MEM_MODE_MAP) {
VP8RemapBitReader(&dec->br_, offset);
}
}
{
const uint8_t* const last_start = dec->parts[last_part].buf;
// 'last_start' will be NULL when 'idec->state' is < STATE_VP8_PARTS0
// and through a portion of that state (when there isn't enough data to
// parse the partitions). The bitreader is only used meaningfully when
// there is enough data to begin parsing partition 0.
if (last_start != NULL) {
VP8BitReaderSetBuffer(&dec->parts[last_part], last_start,
mem->buf + mem->end - last_start);
}
const uint8_t* const last_start = dec->parts_[last_part].buf_;
VP8BitReaderSetBuffer(&dec->parts_[last_part], last_start,
mem->buf_ + mem->end_ - last_start);
}
if (NeedCompressedAlpha(idec)) {
ALPHDecoder* const alph_dec = dec->alph_dec;
dec->alpha_data += offset;
if (alph_dec != NULL && alph_dec->vp8l_dec != NULL) {
if (alph_dec->method == ALPHA_LOSSLESS_COMPRESSION) {
VP8LDecoder* const alph_vp8l_dec = alph_dec->vp8l_dec;
assert(dec->alpha_data_size >= ALPHA_HEADER_LEN);
VP8LBitReaderSetBuffer(&alph_vp8l_dec->br,
dec->alpha_data + ALPHA_HEADER_LEN,
dec->alpha_data_size - ALPHA_HEADER_LEN);
} else { // alph_dec->method == ALPHA_NO_COMPRESSION
ALPHDecoder* const alph_dec = dec->alph_dec_;
dec->alpha_data_ += offset;
if (alph_dec != NULL && alph_dec->vp8l_dec_ != NULL) {
if (alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION) {
VP8LDecoder* const alph_vp8l_dec = alph_dec->vp8l_dec_;
assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN);
VP8LBitReaderSetBuffer(&alph_vp8l_dec->br_,
dec->alpha_data_ + ALPHA_HEADER_LEN,
dec->alpha_data_size_ - ALPHA_HEADER_LEN);
} else { // alph_dec->method_ == ALPHA_NO_COMPRESSION
// Nothing special to do in this case.
}
}
}
} else { // Resize lossless bitreader
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec;
VP8LBitReaderSetBuffer(&dec->br, new_base, MemDataSize(mem));
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_;
VP8LBitReaderSetBuffer(&dec->br_, new_base, MemDataSize(mem));
}
}
}
// Appends data to the end of MemBuffer->buf. It expands the allocated memory
// Appends data to the end of MemBuffer->buf_. It expands the allocated memory
// size if required and also updates VP8BitReader's if new memory is allocated.
WEBP_NODISCARD static int AppendToMemBuffer(WebPIDecoder* const idec,
const uint8_t* const data,
size_t data_size) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec;
MemBuffer* const mem = &idec->mem;
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
MemBuffer* const mem = &idec->mem_;
const int need_compressed_alpha = NeedCompressedAlpha(idec);
const uint8_t* const old_start =
(mem->buf == NULL) ? NULL : mem->buf + mem->start;
(mem->buf_ == NULL) ? NULL : mem->buf_ + mem->start_;
const uint8_t* const old_base =
need_compressed_alpha ? dec->alpha_data : old_start;
assert(mem->buf != NULL || mem->start == 0);
assert(mem->mode == MEM_MODE_APPEND);
need_compressed_alpha ? dec->alpha_data_ : old_start;
assert(mem->buf_ != NULL || mem->start_ == 0);
assert(mem->mode_ == MEM_MODE_APPEND);
if (data_size > MAX_CHUNK_PAYLOAD) {
// security safeguard: trying to allocate more than what the format
// allows for a chunk should be considered a smoke smell.
return 0;
}
if (mem->end + data_size > mem->buf_size) { // Need some free memory
if (mem->end_ + data_size > mem->buf_size_) { // Need some free memory
const size_t new_mem_start = old_start - old_base;
const size_t current_size = MemDataSize(mem) + new_mem_start;
const uint64_t new_size = (uint64_t)current_size + data_size;
@ -201,85 +190,85 @@ WEBP_NODISCARD static int AppendToMemBuffer(WebPIDecoder* const idec,
(uint8_t*)WebPSafeMalloc(extra_size, sizeof(*new_buf));
if (new_buf == NULL) return 0;
if (old_base != NULL) memcpy(new_buf, old_base, current_size);
WebPSafeFree(mem->buf);
mem->buf = new_buf;
mem->buf_size = (size_t)extra_size;
mem->start = new_mem_start;
mem->end = current_size;
WebPSafeFree(mem->buf_);
mem->buf_ = new_buf;
mem->buf_size_ = (size_t)extra_size;
mem->start_ = new_mem_start;
mem->end_ = current_size;
}
assert(mem->buf != NULL);
memcpy(mem->buf + mem->end, data, data_size);
mem->end += data_size;
assert(mem->end <= mem->buf_size);
assert(mem->buf_ != NULL);
memcpy(mem->buf_ + mem->end_, data, data_size);
mem->end_ += data_size;
assert(mem->end_ <= mem->buf_size_);
DoRemap(idec, mem->buf + mem->start - old_start);
DoRemap(idec, mem->buf_ + mem->start_ - old_start);
return 1;
}
WEBP_NODISCARD static int RemapMemBuffer(WebPIDecoder* const idec,
const uint8_t* const data,
size_t data_size) {
MemBuffer* const mem = &idec->mem;
const uint8_t* const old_buf = mem->buf;
MemBuffer* const mem = &idec->mem_;
const uint8_t* const old_buf = mem->buf_;
const uint8_t* const old_start =
(old_buf == NULL) ? NULL : old_buf + mem->start;
assert(old_buf != NULL || mem->start == 0);
assert(mem->mode == MEM_MODE_MAP);
(old_buf == NULL) ? NULL : old_buf + mem->start_;
assert(old_buf != NULL || mem->start_ == 0);
assert(mem->mode_ == MEM_MODE_MAP);
if (data_size < mem->buf_size) return 0; // can't remap to a shorter buffer!
if (data_size < mem->buf_size_) return 0; // can't remap to a shorter buffer!
mem->buf = (uint8_t*)data;
mem->end = mem->buf_size = data_size;
mem->buf_ = (uint8_t*)data;
mem->end_ = mem->buf_size_ = data_size;
DoRemap(idec, mem->buf + mem->start - old_start);
DoRemap(idec, mem->buf_ + mem->start_ - old_start);
return 1;
}
static void InitMemBuffer(MemBuffer* const mem) {
mem->mode = MEM_MODE_NONE;
mem->buf = NULL;
mem->buf_size = 0;
mem->part0_buf = NULL;
mem->part0_size = 0;
mem->mode_ = MEM_MODE_NONE;
mem->buf_ = NULL;
mem->buf_size_ = 0;
mem->part0_buf_ = NULL;
mem->part0_size_ = 0;
}
static void ClearMemBuffer(MemBuffer* const mem) {
assert(mem);
if (mem->mode == MEM_MODE_APPEND) {
WebPSafeFree(mem->buf);
WebPSafeFree((void*)mem->part0_buf);
if (mem->mode_ == MEM_MODE_APPEND) {
WebPSafeFree(mem->buf_);
WebPSafeFree((void*)mem->part0_buf_);
}
}
WEBP_NODISCARD static int CheckMemBufferMode(MemBuffer* const mem,
MemBufferMode expected) {
if (mem->mode == MEM_MODE_NONE) {
mem->mode = expected; // switch to the expected mode
} else if (mem->mode != expected) {
if (mem->mode_ == MEM_MODE_NONE) {
mem->mode_ = expected; // switch to the expected mode
} else if (mem->mode_ != expected) {
return 0; // we mixed the modes => error
}
assert(mem->mode == expected); // mode is ok
assert(mem->mode_ == expected); // mode is ok
return 1;
}
// To be called last.
WEBP_NODISCARD static VP8StatusCode FinishDecoding(WebPIDecoder* const idec) {
const WebPDecoderOptions* const options = idec->params.options;
WebPDecBuffer* const output = idec->params.output;
const WebPDecoderOptions* const options = idec->params_.options;
WebPDecBuffer* const output = idec->params_.output;
idec->state = STATE_DONE;
idec->state_ = STATE_DONE;
if (options != NULL && options->flip) {
const VP8StatusCode status = WebPFlipBuffer(output);
if (status != VP8_STATUS_OK) return status;
}
if (idec->final_output != NULL) {
if (idec->final_output_ != NULL) {
const VP8StatusCode status = WebPCopyDecBufferPixels(
output, idec->final_output); // do the slow-copy
WebPFreeDecBuffer(&idec->output);
output, idec->final_output_); // do the slow-copy
WebPFreeDecBuffer(&idec->output_);
if (status != VP8_STATUS_OK) return status;
*output = *idec->final_output;
idec->final_output = NULL;
*output = *idec->final_output_;
idec->final_output_ = NULL;
}
return VP8_STATUS_OK;
}
@ -289,43 +278,43 @@ WEBP_NODISCARD static VP8StatusCode FinishDecoding(WebPIDecoder* const idec) {
static void SaveContext(const VP8Decoder* dec, const VP8BitReader* token_br,
MBContext* const context) {
context->left = dec->mb_info[-1];
context->info = dec->mb_info[dec->mb_x];
context->token_br = *token_br;
context->left_ = dec->mb_info_[-1];
context->info_ = dec->mb_info_[dec->mb_x_];
context->token_br_ = *token_br;
}
static void RestoreContext(const MBContext* context, VP8Decoder* const dec,
VP8BitReader* const token_br) {
dec->mb_info[-1] = context->left;
dec->mb_info[dec->mb_x] = context->info;
*token_br = context->token_br;
dec->mb_info_[-1] = context->left_;
dec->mb_info_[dec->mb_x_] = context->info_;
*token_br = context->token_br_;
}
//------------------------------------------------------------------------------
static VP8StatusCode IDecError(WebPIDecoder* const idec, VP8StatusCode error) {
if (idec->state == STATE_VP8_DATA) {
if (idec->state_ == STATE_VP8_DATA) {
// Synchronize the thread, clean-up and check for errors.
(void)VP8ExitCritical((VP8Decoder*)idec->dec, &idec->io);
(void)VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_);
}
idec->state = STATE_ERROR;
idec->state_ = STATE_ERROR;
return error;
}
static void ChangeState(WebPIDecoder* const idec, DecState new_state,
size_t consumed_bytes) {
MemBuffer* const mem = &idec->mem;
idec->state = new_state;
mem->start += consumed_bytes;
assert(mem->start <= mem->end);
idec->io.data = mem->buf + mem->start;
idec->io.data_size = MemDataSize(mem);
MemBuffer* const mem = &idec->mem_;
idec->state_ = new_state;
mem->start_ += consumed_bytes;
assert(mem->start_ <= mem->end_);
idec->io_.data = mem->buf_ + mem->start_;
idec->io_.data_size = MemDataSize(mem);
}
// Headers
static VP8StatusCode DecodeWebPHeaders(WebPIDecoder* const idec) {
MemBuffer* const mem = &idec->mem;
const uint8_t* data = mem->buf + mem->start;
MemBuffer* const mem = &idec->mem_;
const uint8_t* data = mem->buf_ + mem->start_;
size_t curr_size = MemDataSize(mem);
VP8StatusCode status;
WebPHeaderStructure headers;
@ -340,32 +329,32 @@ static VP8StatusCode DecodeWebPHeaders(WebPIDecoder* const idec) {
return IDecError(idec, status);
}
idec->chunk_size = headers.compressed_size;
idec->is_lossless = headers.is_lossless;
if (!idec->is_lossless) {
idec->chunk_size_ = headers.compressed_size;
idec->is_lossless_ = headers.is_lossless;
if (!idec->is_lossless_) {
VP8Decoder* const dec = VP8New();
if (dec == NULL) {
return VP8_STATUS_OUT_OF_MEMORY;
}
dec->incremental = 1;
idec->dec = dec;
dec->alpha_data = headers.alpha_data;
dec->alpha_data_size = headers.alpha_data_size;
dec->incremental_ = 1;
idec->dec_ = dec;
dec->alpha_data_ = headers.alpha_data;
dec->alpha_data_size_ = headers.alpha_data_size;
ChangeState(idec, STATE_VP8_HEADER, headers.offset);
} else {
VP8LDecoder* const dec = VP8LNew();
if (dec == NULL) {
return VP8_STATUS_OUT_OF_MEMORY;
}
idec->dec = dec;
idec->dec_ = dec;
ChangeState(idec, STATE_VP8L_HEADER, headers.offset);
}
return VP8_STATUS_OK;
}
static VP8StatusCode DecodeVP8FrameHeader(WebPIDecoder* const idec) {
const uint8_t* data = idec->mem.buf + idec->mem.start;
const size_t curr_size = MemDataSize(&idec->mem);
const uint8_t* data = idec->mem_.buf_ + idec->mem_.start_;
const size_t curr_size = MemDataSize(&idec->mem_);
int width, height;
uint32_t bits;
@ -373,61 +362,61 @@ static VP8StatusCode DecodeVP8FrameHeader(WebPIDecoder* const idec) {
// Not enough data bytes to extract VP8 Frame Header.
return VP8_STATUS_SUSPENDED;
}
if (!VP8GetInfo(data, curr_size, idec->chunk_size, &width, &height)) {
if (!VP8GetInfo(data, curr_size, idec->chunk_size_, &width, &height)) {
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
bits = data[0] | (data[1] << 8) | (data[2] << 16);
idec->mem.part0_size = (bits >> 5) + VP8_FRAME_HEADER_SIZE;
idec->mem_.part0_size_ = (bits >> 5) + VP8_FRAME_HEADER_SIZE;
idec->io.data = data;
idec->io.data_size = curr_size;
idec->state = STATE_VP8_PARTS0;
idec->io_.data = data;
idec->io_.data_size = curr_size;
idec->state_ = STATE_VP8_PARTS0;
return VP8_STATUS_OK;
}
// Partition #0
static VP8StatusCode CopyParts0Data(WebPIDecoder* const idec) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec;
VP8BitReader* const br = &dec->br;
const size_t part_size = br->buf_end - br->buf;
MemBuffer* const mem = &idec->mem;
assert(!idec->is_lossless);
assert(mem->part0_buf == NULL);
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
VP8BitReader* const br = &dec->br_;
const size_t part_size = br->buf_end_ - br->buf_;
MemBuffer* const mem = &idec->mem_;
assert(!idec->is_lossless_);
assert(mem->part0_buf_ == NULL);
// the following is a format limitation, no need for runtime check:
assert(part_size <= mem->part0_size);
assert(part_size <= mem->part0_size_);
if (part_size == 0) { // can't have zero-size partition #0
return VP8_STATUS_BITSTREAM_ERROR;
}
if (mem->mode == MEM_MODE_APPEND) {
if (mem->mode_ == MEM_MODE_APPEND) {
// We copy and grab ownership of the partition #0 data.
uint8_t* const part0_buf = (uint8_t*)WebPSafeMalloc(1ULL, part_size);
if (part0_buf == NULL) {
return VP8_STATUS_OUT_OF_MEMORY;
}
memcpy(part0_buf, br->buf, part_size);
mem->part0_buf = part0_buf;
memcpy(part0_buf, br->buf_, part_size);
mem->part0_buf_ = part0_buf;
VP8BitReaderSetBuffer(br, part0_buf, part_size);
} else {
// Else: just keep pointers to the partition #0's data in dec->br.
// Else: just keep pointers to the partition #0's data in dec_->br_.
}
mem->start += part_size;
mem->start_ += part_size;
return VP8_STATUS_OK;
}
static VP8StatusCode DecodePartition0(WebPIDecoder* const idec) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec;
VP8Io* const io = &idec->io;
const WebPDecParams* const params = &idec->params;
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
VP8Io* const io = &idec->io_;
const WebPDecParams* const params = &idec->params_;
WebPDecBuffer* const output = params->output;
// Wait till we have enough data for the whole partition #0
if (MemDataSize(&idec->mem) < idec->mem.part0_size) {
if (MemDataSize(&idec->mem_) < idec->mem_.part0_size_) {
return VP8_STATUS_SUSPENDED;
}
if (!VP8GetHeaders(dec, io)) {
const VP8StatusCode status = dec->status;
const VP8StatusCode status = dec->status_;
if (status == VP8_STATUS_SUSPENDED ||
status == VP8_STATUS_NOT_ENOUGH_DATA) {
// treating NOT_ENOUGH_DATA as SUSPENDED state
@ -437,69 +426,69 @@ static VP8StatusCode DecodePartition0(WebPIDecoder* const idec) {
}
// Allocate/Verify output buffer now
dec->status = WebPAllocateDecBuffer(io->width, io->height, params->options,
output);
if (dec->status != VP8_STATUS_OK) {
return IDecError(idec, dec->status);
dec->status_ = WebPAllocateDecBuffer(io->width, io->height, params->options,
output);
if (dec->status_ != VP8_STATUS_OK) {
return IDecError(idec, dec->status_);
}
// This change must be done before calling VP8InitFrame()
dec->mt_method = VP8GetThreadMethod(params->options, NULL,
io->width, io->height);
dec->mt_method_ = VP8GetThreadMethod(params->options, NULL,
io->width, io->height);
VP8InitDithering(params->options, dec);
dec->status = CopyParts0Data(idec);
if (dec->status != VP8_STATUS_OK) {
return IDecError(idec, dec->status);
dec->status_ = CopyParts0Data(idec);
if (dec->status_ != VP8_STATUS_OK) {
return IDecError(idec, dec->status_);
}
// Finish setting up the decoding parameters. Will call io->setup().
if (VP8EnterCritical(dec, io) != VP8_STATUS_OK) {
return IDecError(idec, dec->status);
return IDecError(idec, dec->status_);
}
// Note: past this point, teardown() must always be called
// in case of error.
idec->state = STATE_VP8_DATA;
idec->state_ = STATE_VP8_DATA;
// Allocate memory and prepare everything.
if (!VP8InitFrame(dec, io)) {
return IDecError(idec, dec->status);
return IDecError(idec, dec->status_);
}
return VP8_STATUS_OK;
}
// Remaining partitions
static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec;
VP8Io* const io = &idec->io;
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
VP8Io* const io = &idec->io_;
// Make sure partition #0 has been read before, to set dec to ready.
if (!dec->ready) {
// Make sure partition #0 has been read before, to set dec to ready_.
if (!dec->ready_) {
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
for (; dec->mb_y < dec->mb_h; ++dec->mb_y) {
if (idec->last_mb_y != dec->mb_y) {
if (!VP8ParseIntraModeRow(&dec->br, dec)) {
for (; dec->mb_y_ < dec->mb_h_; ++dec->mb_y_) {
if (idec->last_mb_y_ != dec->mb_y_) {
if (!VP8ParseIntraModeRow(&dec->br_, dec)) {
// note: normally, error shouldn't occur since we already have the whole
// partition0 available here in DecodeRemaining(). Reaching EOF while
// reading intra modes really means a BITSTREAM_ERROR.
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
idec->last_mb_y = dec->mb_y;
idec->last_mb_y_ = dec->mb_y_;
}
for (; dec->mb_x < dec->mb_w; ++dec->mb_x) {
for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) {
VP8BitReader* const token_br =
&dec->parts[dec->mb_y & dec->num_parts_minus_one];
&dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_];
MBContext context;
SaveContext(dec, token_br, &context);
if (!VP8DecodeMB(dec, token_br)) {
// We shouldn't fail when MAX_MB data was available
if (dec->num_parts_minus_one == 0 &&
MemDataSize(&idec->mem) > MAX_MB_SIZE) {
if (dec->num_parts_minus_one_ == 0 &&
MemDataSize(&idec->mem_) > MAX_MB_SIZE) {
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
// Synchronize the threads.
if (dec->mt_method > 0) {
if (!WebPGetWorkerInterface()->Sync(&dec->worker)) {
if (dec->mt_method_ > 0) {
if (!WebPGetWorkerInterface()->Sync(&dec->worker_)) {
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
}
@ -507,9 +496,9 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
return VP8_STATUS_SUSPENDED;
}
// Release buffer only if there is only one partition
if (dec->num_parts_minus_one == 0) {
idec->mem.start = token_br->buf - idec->mem.buf;
assert(idec->mem.start <= idec->mem.end);
if (dec->num_parts_minus_one_ == 0) {
idec->mem_.start_ = token_br->buf_ - idec->mem_.buf_;
assert(idec->mem_.start_ <= idec->mem_.end_);
}
}
VP8InitScanline(dec); // Prepare for next scanline
@ -521,10 +510,10 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
}
// Synchronize the thread and check for errors.
if (!VP8ExitCritical(dec, io)) {
idec->state = STATE_ERROR; // prevent re-entry in IDecError
idec->state_ = STATE_ERROR; // prevent re-entry in IDecError
return IDecError(idec, VP8_STATUS_USER_ABORT);
}
dec->ready = 0;
dec->ready_ = 0;
return FinishDecoding(idec);
}
@ -537,81 +526,81 @@ static VP8StatusCode ErrorStatusLossless(WebPIDecoder* const idec,
}
static VP8StatusCode DecodeVP8LHeader(WebPIDecoder* const idec) {
VP8Io* const io = &idec->io;
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec;
const WebPDecParams* const params = &idec->params;
VP8Io* const io = &idec->io_;
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_;
const WebPDecParams* const params = &idec->params_;
WebPDecBuffer* const output = params->output;
size_t curr_size = MemDataSize(&idec->mem);
assert(idec->is_lossless);
size_t curr_size = MemDataSize(&idec->mem_);
assert(idec->is_lossless_);
// Wait until there's enough data for decoding header.
if (curr_size < (idec->chunk_size >> 3)) {
dec->status = VP8_STATUS_SUSPENDED;
return ErrorStatusLossless(idec, dec->status);
if (curr_size < (idec->chunk_size_ >> 3)) {
dec->status_ = VP8_STATUS_SUSPENDED;
return ErrorStatusLossless(idec, dec->status_);
}
if (!VP8LDecodeHeader(dec, io)) {
if (dec->status == VP8_STATUS_BITSTREAM_ERROR &&
curr_size < idec->chunk_size) {
dec->status = VP8_STATUS_SUSPENDED;
if (dec->status_ == VP8_STATUS_BITSTREAM_ERROR &&
curr_size < idec->chunk_size_) {
dec->status_ = VP8_STATUS_SUSPENDED;
}
return ErrorStatusLossless(idec, dec->status);
return ErrorStatusLossless(idec, dec->status_);
}
// Allocate/verify output buffer now.
dec->status = WebPAllocateDecBuffer(io->width, io->height, params->options,
output);
if (dec->status != VP8_STATUS_OK) {
return IDecError(idec, dec->status);
dec->status_ = WebPAllocateDecBuffer(io->width, io->height, params->options,
output);
if (dec->status_ != VP8_STATUS_OK) {
return IDecError(idec, dec->status_);
}
idec->state = STATE_VP8L_DATA;
idec->state_ = STATE_VP8L_DATA;
return VP8_STATUS_OK;
}
static VP8StatusCode DecodeVP8LData(WebPIDecoder* const idec) {
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec;
const size_t curr_size = MemDataSize(&idec->mem);
assert(idec->is_lossless);
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_;
const size_t curr_size = MemDataSize(&idec->mem_);
assert(idec->is_lossless_);
// Switch to incremental decoding if we don't have all the bytes available.
dec->incremental = (curr_size < idec->chunk_size);
dec->incremental_ = (curr_size < idec->chunk_size_);
if (!VP8LDecodeImage(dec)) {
return ErrorStatusLossless(idec, dec->status);
return ErrorStatusLossless(idec, dec->status_);
}
assert(dec->status == VP8_STATUS_OK || dec->status == VP8_STATUS_SUSPENDED);
return (dec->status == VP8_STATUS_SUSPENDED) ? dec->status
: FinishDecoding(idec);
assert(dec->status_ == VP8_STATUS_OK || dec->status_ == VP8_STATUS_SUSPENDED);
return (dec->status_ == VP8_STATUS_SUSPENDED) ? dec->status_
: FinishDecoding(idec);
}
// Main decoding loop
static VP8StatusCode IDecode(WebPIDecoder* idec) {
VP8StatusCode status = VP8_STATUS_SUSPENDED;
if (idec->state == STATE_WEBP_HEADER) {
if (idec->state_ == STATE_WEBP_HEADER) {
status = DecodeWebPHeaders(idec);
} else {
if (idec->dec == NULL) {
if (idec->dec_ == NULL) {
return VP8_STATUS_SUSPENDED; // can't continue if we have no decoder.
}
}
if (idec->state == STATE_VP8_HEADER) {
if (idec->state_ == STATE_VP8_HEADER) {
status = DecodeVP8FrameHeader(idec);
}
if (idec->state == STATE_VP8_PARTS0) {
if (idec->state_ == STATE_VP8_PARTS0) {
status = DecodePartition0(idec);
}
if (idec->state == STATE_VP8_DATA) {
const VP8Decoder* const dec = (VP8Decoder*)idec->dec;
if (idec->state_ == STATE_VP8_DATA) {
const VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
if (dec == NULL) {
return VP8_STATUS_SUSPENDED; // can't continue if we have no decoder.
}
status = DecodeRemaining(idec);
}
if (idec->state == STATE_VP8L_HEADER) {
if (idec->state_ == STATE_VP8L_HEADER) {
status = DecodeVP8LHeader(idec);
}
if (idec->state == STATE_VP8L_DATA) {
if (idec->state_ == STATE_VP8L_DATA) {
status = DecodeVP8LData(idec);
}
return status;
@ -628,29 +617,29 @@ WEBP_NODISCARD static WebPIDecoder* NewDecoder(
return NULL;
}
idec->state = STATE_WEBP_HEADER;
idec->chunk_size = 0;
idec->state_ = STATE_WEBP_HEADER;
idec->chunk_size_ = 0;
idec->last_mb_y = -1;
idec->last_mb_y_ = -1;
InitMemBuffer(&idec->mem);
if (!WebPInitDecBuffer(&idec->output) || !VP8InitIo(&idec->io)) {
InitMemBuffer(&idec->mem_);
if (!WebPInitDecBuffer(&idec->output_) || !VP8InitIo(&idec->io_)) {
WebPSafeFree(idec);
return NULL;
}
WebPResetDecParams(&idec->params);
WebPResetDecParams(&idec->params_);
if (output_buffer == NULL || WebPAvoidSlowMemory(output_buffer, features)) {
idec->params.output = &idec->output;
idec->final_output = output_buffer;
idec->params_.output = &idec->output_;
idec->final_output_ = output_buffer;
if (output_buffer != NULL) {
idec->params.output->colorspace = output_buffer->colorspace;
idec->params_.output->colorspace = output_buffer->colorspace;
}
} else {
idec->params.output = output_buffer;
idec->final_output = NULL;
idec->params_.output = output_buffer;
idec->final_output_ = NULL;
}
WebPInitCustomIo(&idec->params, &idec->io); // Plug the I/O functions.
WebPInitCustomIo(&idec->params_, &idec->io_); // Plug the I/O functions.
return idec;
}
@ -685,27 +674,27 @@ WebPIDecoder* WebPIDecode(const uint8_t* data, size_t data_size,
}
// Finish initialization
if (config != NULL) {
idec->params.options = &config->options;
idec->params_.options = &config->options;
}
return idec;
}
void WebPIDelete(WebPIDecoder* idec) {
if (idec == NULL) return;
if (idec->dec != NULL) {
if (!idec->is_lossless) {
if (idec->state == STATE_VP8_DATA) {
if (idec->dec_ != NULL) {
if (!idec->is_lossless_) {
if (idec->state_ == STATE_VP8_DATA) {
// Synchronize the thread, clean-up and check for errors.
// TODO(vrabaud) do we care about the return result?
(void)VP8ExitCritical((VP8Decoder*)idec->dec, &idec->io);
(void)VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_);
}
VP8Delete((VP8Decoder*)idec->dec);
VP8Delete((VP8Decoder*)idec->dec_);
} else {
VP8LDelete((VP8LDecoder*)idec->dec);
VP8LDelete((VP8LDecoder*)idec->dec_);
}
}
ClearMemBuffer(&idec->mem);
WebPFreeDecBuffer(&idec->output);
ClearMemBuffer(&idec->mem_);
WebPFreeDecBuffer(&idec->output_);
WebPSafeFree(idec);
}
@ -728,11 +717,11 @@ WebPIDecoder* WebPINewRGB(WEBP_CSP_MODE csp, uint8_t* output_buffer,
}
idec = WebPINewDecoder(NULL);
if (idec == NULL) return NULL;
idec->output.colorspace = csp;
idec->output.is_external_memory = is_external_memory;
idec->output.u.RGBA.rgba = output_buffer;
idec->output.u.RGBA.stride = output_stride;
idec->output.u.RGBA.size = output_buffer_size;
idec->output_.colorspace = csp;
idec->output_.is_external_memory = is_external_memory;
idec->output_.u.RGBA.rgba = output_buffer;
idec->output_.u.RGBA.stride = output_stride;
idec->output_.u.RGBA.size = output_buffer_size;
return idec;
}
@ -762,20 +751,20 @@ WebPIDecoder* WebPINewYUVA(uint8_t* luma, size_t luma_size, int luma_stride,
idec = WebPINewDecoder(NULL);
if (idec == NULL) return NULL;
idec->output.colorspace = colorspace;
idec->output.is_external_memory = is_external_memory;
idec->output.u.YUVA.y = luma;
idec->output.u.YUVA.y_stride = luma_stride;
idec->output.u.YUVA.y_size = luma_size;
idec->output.u.YUVA.u = u;
idec->output.u.YUVA.u_stride = u_stride;
idec->output.u.YUVA.u_size = u_size;
idec->output.u.YUVA.v = v;
idec->output.u.YUVA.v_stride = v_stride;
idec->output.u.YUVA.v_size = v_size;
idec->output.u.YUVA.a = a;
idec->output.u.YUVA.a_stride = a_stride;
idec->output.u.YUVA.a_size = a_size;
idec->output_.colorspace = colorspace;
idec->output_.is_external_memory = is_external_memory;
idec->output_.u.YUVA.y = luma;
idec->output_.u.YUVA.y_stride = luma_stride;
idec->output_.u.YUVA.y_size = luma_size;
idec->output_.u.YUVA.u = u;
idec->output_.u.YUVA.u_stride = u_stride;
idec->output_.u.YUVA.u_size = u_size;
idec->output_.u.YUVA.v = v;
idec->output_.u.YUVA.v_stride = v_stride;
idec->output_.u.YUVA.v_size = v_size;
idec->output_.u.YUVA.a = a;
idec->output_.u.YUVA.a_stride = a_stride;
idec->output_.u.YUVA.a_size = a_size;
return idec;
}
@ -792,10 +781,10 @@ WebPIDecoder* WebPINewYUV(uint8_t* luma, size_t luma_size, int luma_stride,
static VP8StatusCode IDecCheckStatus(const WebPIDecoder* const idec) {
assert(idec);
if (idec->state == STATE_ERROR) {
if (idec->state_ == STATE_ERROR) {
return VP8_STATUS_BITSTREAM_ERROR;
}
if (idec->state == STATE_DONE) {
if (idec->state_ == STATE_DONE) {
return VP8_STATUS_OK;
}
return VP8_STATUS_SUSPENDED;
@ -812,7 +801,7 @@ VP8StatusCode WebPIAppend(WebPIDecoder* idec,
return status;
}
// Check mixed calls between RemapMemBuffer and AppendToMemBuffer.
if (!CheckMemBufferMode(&idec->mem, MEM_MODE_APPEND)) {
if (!CheckMemBufferMode(&idec->mem_, MEM_MODE_APPEND)) {
return VP8_STATUS_INVALID_PARAM;
}
// Append data to memory buffer
@ -833,7 +822,7 @@ VP8StatusCode WebPIUpdate(WebPIDecoder* idec,
return status;
}
// Check mixed calls between RemapMemBuffer and AppendToMemBuffer.
if (!CheckMemBufferMode(&idec->mem, MEM_MODE_MAP)) {
if (!CheckMemBufferMode(&idec->mem_, MEM_MODE_MAP)) {
return VP8_STATUS_INVALID_PARAM;
}
// Make the memory buffer point to the new buffer
@ -846,16 +835,16 @@ VP8StatusCode WebPIUpdate(WebPIDecoder* idec,
//------------------------------------------------------------------------------
static const WebPDecBuffer* GetOutputBuffer(const WebPIDecoder* const idec) {
if (idec == NULL || idec->dec == NULL) {
if (idec == NULL || idec->dec_ == NULL) {
return NULL;
}
if (idec->state <= STATE_VP8_PARTS0) {
if (idec->state_ <= STATE_VP8_PARTS0) {
return NULL;
}
if (idec->final_output != NULL) {
if (idec->final_output_ != NULL) {
return NULL; // not yet slow-copied
}
return idec->params.output;
return idec->params_.output;
}
const WebPDecBuffer* WebPIDecodedArea(const WebPIDecoder* idec,
@ -866,7 +855,7 @@ const WebPDecBuffer* WebPIDecodedArea(const WebPIDecoder* idec,
if (top != NULL) *top = 0;
if (src != NULL) {
if (width != NULL) *width = src->width;
if (height != NULL) *height = idec->params.last_y;
if (height != NULL) *height = idec->params_.last_y;
} else {
if (width != NULL) *width = 0;
if (height != NULL) *height = 0;
@ -882,7 +871,7 @@ WEBP_NODISCARD uint8_t* WebPIDecGetRGB(const WebPIDecoder* idec, int* last_y,
return NULL;
}
if (last_y != NULL) *last_y = idec->params.last_y;
if (last_y != NULL) *last_y = idec->params_.last_y;
if (width != NULL) *width = src->width;
if (height != NULL) *height = src->height;
if (stride != NULL) *stride = src->u.RGBA.stride;
@ -900,7 +889,7 @@ WEBP_NODISCARD uint8_t* WebPIDecGetYUVA(const WebPIDecoder* idec, int* last_y,
return NULL;
}
if (last_y != NULL) *last_y = idec->params.last_y;
if (last_y != NULL) *last_y = idec->params_.last_y;
if (u != NULL) *u = src->u.YUVA.u;
if (v != NULL) *v = src->u.YUVA.v;
if (a != NULL) *a = src->u.YUVA.a;
@ -918,14 +907,14 @@ int WebPISetIOHooks(WebPIDecoder* const idec,
VP8IoSetupHook setup,
VP8IoTeardownHook teardown,
void* user_data) {
if (idec == NULL || idec->state > STATE_WEBP_HEADER) {
if (idec == NULL || idec->state_ > STATE_WEBP_HEADER) {
return 0;
}
idec->io.put = put;
idec->io.setup = setup;
idec->io.teardown = teardown;
idec->io.opaque = user_data;
idec->io_.put = put;
idec->io_.setup = setup;
idec->io_.teardown = teardown;
idec->io_.opaque = user_data;
return 1;
}

View File

@ -12,20 +12,12 @@
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include "src/dec/vp8_dec.h"
#include "src/webp/types.h"
#include "src/dec/vp8i_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/dsp/yuv.h"
#include "src/utils/rescaler_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
//------------------------------------------------------------------------------
// Main YUV<->RGB conversion functions
@ -33,9 +25,9 @@
static int EmitYUV(const VP8Io* const io, WebPDecParams* const p) {
WebPDecBuffer* output = p->output;
const WebPYUVABuffer* const buf = &output->u.YUVA;
uint8_t* const y_dst = buf->y + (ptrdiff_t)io->mb_y * buf->y_stride;
uint8_t* const u_dst = buf->u + (ptrdiff_t)(io->mb_y >> 1) * buf->u_stride;
uint8_t* const v_dst = buf->v + (ptrdiff_t)(io->mb_y >> 1) * buf->v_stride;
uint8_t* const y_dst = buf->y + (size_t)io->mb_y * buf->y_stride;
uint8_t* const u_dst = buf->u + (size_t)(io->mb_y >> 1) * buf->u_stride;
uint8_t* const v_dst = buf->v + (size_t)(io->mb_y >> 1) * buf->v_stride;
const int mb_w = io->mb_w;
const int mb_h = io->mb_h;
const int uv_w = (mb_w + 1) / 2;
@ -50,7 +42,7 @@ static int EmitYUV(const VP8Io* const io, WebPDecParams* const p) {
static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) {
WebPDecBuffer* const output = p->output;
WebPRGBABuffer* const buf = &output->u.RGBA;
uint8_t* const dst = buf->rgba + (ptrdiff_t)io->mb_y * buf->stride;
uint8_t* const dst = buf->rgba + (size_t)io->mb_y * buf->stride;
WebPSamplerProcessPlane(io->y, io->y_stride,
io->u, io->v, io->uv_stride,
dst, buf->stride, io->mb_w, io->mb_h,
@ -65,7 +57,7 @@ static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) {
static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
int num_lines_out = io->mb_h; // a priori guess
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* dst = buf->rgba + (ptrdiff_t)io->mb_y * buf->stride;
uint8_t* dst = buf->rgba + (size_t)io->mb_y * buf->stride;
WebPUpsampleLinePairFunc upsample = WebPUpsamplers[p->output->colorspace];
const uint8_t* cur_y = io->y;
const uint8_t* cur_u = io->u;
@ -136,7 +128,7 @@ static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
const int mb_w = io->mb_w;
const int mb_h = io->mb_h;
uint8_t* dst = buf->a + (ptrdiff_t)io->mb_y * buf->a_stride;
uint8_t* dst = buf->a + (size_t)io->mb_y * buf->a_stride;
int j;
(void)expected_num_lines_out;
assert(expected_num_lines_out == mb_h);
@ -189,8 +181,8 @@ static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
(colorspace == MODE_ARGB || colorspace == MODE_Argb);
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)start_y * buf->stride;
const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
uint8_t* const dst = base_rgba + (alpha_first ? 0 : 3);
const int has_alpha = WebPDispatchAlpha(alpha, io->width, mb_w,
num_rows, dst, buf->stride);
@ -213,8 +205,8 @@ static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p,
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)start_y * buf->stride;
const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
#if (WEBP_SWAP_16BIT_CSP == 1)
uint8_t* alpha_dst = base_rgba;
#else
@ -265,7 +257,7 @@ static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) {
if (WebPIsAlphaMode(p->output->colorspace) && io->a != NULL) {
// Before rescaling, we premultiply the luma directly into the io->y
// internal buffer. This is OK since these samples are not used for
// intra-prediction (the top samples are saved in cache_y/u/v).
// intra-prediction (the top samples are saved in cache_y_/u_/v_).
// But we need to cast the const away, though.
WebPMultRows((uint8_t*)io->y, io->y_stride,
io->a, io->width, io->mb_w, mb_h, 0);
@ -279,9 +271,9 @@ static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) {
static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
uint8_t* const dst_a = buf->a + (ptrdiff_t)p->last_y * buf->a_stride;
uint8_t* const dst_a = buf->a + (size_t)p->last_y * buf->a_stride;
if (io->a != NULL) {
uint8_t* const dst_y = buf->y + (ptrdiff_t)p->last_y * buf->y_stride;
uint8_t* const dst_y = buf->y + (size_t)p->last_y * buf->y_stride;
const int num_lines_out = Rescale(io->a, io->width, io->mb_h, p->scaler_a);
assert(expected_num_lines_out == num_lines_out);
if (num_lines_out > 0) { // unmultiply the Y
@ -370,7 +362,7 @@ static int ExportRGB(WebPDecParams* const p, int y_pos) {
const WebPYUV444Converter convert =
WebPYUV444Converters[p->output->colorspace];
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* dst = buf->rgba + (ptrdiff_t)y_pos * buf->stride;
uint8_t* dst = buf->rgba + (size_t)y_pos * buf->stride;
int num_lines_out = 0;
// For RGB rescaling, because of the YUV420, current scan position
// U/V can be +1/-1 line from the Y one. Hence the double test.
@ -397,14 +389,14 @@ static int EmitRescaledRGB(const VP8Io* const io, WebPDecParams* const p) {
while (j < mb_h) {
const int y_lines_in =
WebPRescalerImport(p->scaler_y, mb_h - j,
io->y + (ptrdiff_t)j * io->y_stride, io->y_stride);
io->y + (size_t)j * io->y_stride, io->y_stride);
j += y_lines_in;
if (WebPRescaleNeededLines(p->scaler_u, uv_mb_h - uv_j)) {
const int u_lines_in = WebPRescalerImport(
p->scaler_u, uv_mb_h - uv_j, io->u + (ptrdiff_t)uv_j * io->uv_stride,
p->scaler_u, uv_mb_h - uv_j, io->u + (size_t)uv_j * io->uv_stride,
io->uv_stride);
const int v_lines_in = WebPRescalerImport(
p->scaler_v, uv_mb_h - uv_j, io->v + (ptrdiff_t)uv_j * io->uv_stride,
p->scaler_v, uv_mb_h - uv_j, io->v + (size_t)uv_j * io->uv_stride,
io->uv_stride);
(void)v_lines_in; // remove a gcc warning
assert(u_lines_in == v_lines_in);
@ -417,7 +409,7 @@ static int EmitRescaledRGB(const VP8Io* const io, WebPDecParams* const p) {
static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) {
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)y_pos * buf->stride;
uint8_t* const base_rgba = buf->rgba + (size_t)y_pos * buf->stride;
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const int alpha_first =
(colorspace == MODE_ARGB || colorspace == MODE_Argb);
@ -445,7 +437,7 @@ static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) {
static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos,
int max_lines_out) {
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)y_pos * buf->stride;
uint8_t* const base_rgba = buf->rgba + (size_t)y_pos * buf->stride;
#if (WEBP_SWAP_16BIT_CSP == 1)
uint8_t* alpha_dst = base_rgba;
#else
@ -484,7 +476,7 @@ static int EmitRescaledAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
int lines_left = expected_num_out_lines;
const int y_end = p->last_y + lines_left;
while (lines_left > 0) {
const int64_t row_offset = (ptrdiff_t)scaler->src_y - io->mb_y;
const int64_t row_offset = (int64_t)scaler->src_y - io->mb_y;
WebPRescalerImport(scaler, io->mb_h + io->mb_y - scaler->src_y,
io->a + row_offset * io->width, io->width);
lines_left -= p->emit_alpha_row(p, y_end - lines_left, lines_left);

View File

@ -11,11 +11,7 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include "src/dec/common_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/utils/bit_reader_utils.h"
#include "src/webp/types.h"
static WEBP_INLINE int clip(int v, int M) {
return v < 0 ? 0 : v > M ? M : v;
@ -64,7 +60,7 @@ static const uint16_t kAcTable[128] = {
// Paragraph 9.6
void VP8ParseQuant(VP8Decoder* const dec) {
VP8BitReader* const br = &dec->br;
VP8BitReader* const br = &dec->br_;
const int base_q0 = VP8GetValue(br, 7, "global-header");
const int dqy1_dc = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 4, "global-header") : 0;
@ -77,42 +73,43 @@ void VP8ParseQuant(VP8Decoder* const dec) {
const int dquv_ac = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 4, "global-header") : 0;
const VP8SegmentHeader* const hdr = &dec->segment_hdr;
const VP8SegmentHeader* const hdr = &dec->segment_hdr_;
int i;
for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
int q;
if (hdr->use_segment) {
q = hdr->quantizer[i];
if (!hdr->absolute_delta) {
if (hdr->use_segment_) {
q = hdr->quantizer_[i];
if (!hdr->absolute_delta_) {
q += base_q0;
}
} else {
if (i > 0) {
dec->dqm[i] = dec->dqm[0];
dec->dqm_[i] = dec->dqm_[0];
continue;
} else {
q = base_q0;
}
}
{
VP8QuantMatrix* const m = &dec->dqm[i];
m->y1_mat[0] = kDcTable[clip(q + dqy1_dc, 127)];
m->y1_mat[1] = kAcTable[clip(q + 0, 127)];
VP8QuantMatrix* const m = &dec->dqm_[i];
m->y1_mat_[0] = kDcTable[clip(q + dqy1_dc, 127)];
m->y1_mat_[1] = kAcTable[clip(q + 0, 127)];
m->y2_mat[0] = kDcTable[clip(q + dqy2_dc, 127)] * 2;
m->y2_mat_[0] = kDcTable[clip(q + dqy2_dc, 127)] * 2;
// For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16.
// The smallest precision for that is '(x*6349) >> 12' but 16 is a good
// word size.
m->y2_mat[1] = (kAcTable[clip(q + dqy2_ac, 127)] * 101581) >> 16;
if (m->y2_mat[1] < 8) m->y2_mat[1] = 8;
m->y2_mat_[1] = (kAcTable[clip(q + dqy2_ac, 127)] * 101581) >> 16;
if (m->y2_mat_[1] < 8) m->y2_mat_[1] = 8;
m->uv_mat[0] = kDcTable[clip(q + dquv_dc, 117)];
m->uv_mat[1] = kAcTable[clip(q + dquv_ac, 127)];
m->uv_mat_[0] = kDcTable[clip(q + dquv_dc, 117)];
m->uv_mat_[1] = kAcTable[clip(q + dquv_ac, 127)];
m->uv_quant = q + dquv_ac; // for dithering strength evaluation
m->uv_quant_ = q + dquv_ac; // for dithering strength evaluation
}
}
}
//------------------------------------------------------------------------------

View File

@ -11,15 +11,9 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <string.h>
#include "src/dec/common_dec.h"
#include "src/webp/types.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dsp/cpu.h"
#include "src/utils/bit_reader_inl_utils.h"
#include "src/utils/bit_reader_utils.h"
#if !defined(USE_GENERIC_TREE)
#if !defined(__arm__) && !defined(_M_ARM) && !WEBP_AARCH64 && \
@ -290,40 +284,40 @@ static const uint8_t kBModesProba[NUM_BMODES][NUM_BMODES][NUM_BMODES - 1] = {
};
void VP8ResetProba(VP8Proba* const proba) {
memset(proba->segments, 255u, sizeof(proba->segments));
// proba->bands[][] is initialized later
memset(proba->segments_, 255u, sizeof(proba->segments_));
// proba->bands_[][] is initialized later
}
static void ParseIntraMode(VP8BitReader* const br,
VP8Decoder* const dec, int mb_x) {
uint8_t* const top = dec->intra_t + 4 * mb_x;
uint8_t* const left = dec->intra_l;
VP8MBData* const block = dec->mb_data + mb_x;
uint8_t* const top = dec->intra_t_ + 4 * mb_x;
uint8_t* const left = dec->intra_l_;
VP8MBData* const block = dec->mb_data_ + mb_x;
// Note: we don't save segment map (yet), as we don't expect
// to decode more than 1 keyframe.
if (dec->segment_hdr.update_map) {
if (dec->segment_hdr_.update_map_) {
// Hardcoded tree parsing
block->segment = !VP8GetBit(br, dec->proba.segments[0], "segments")
? VP8GetBit(br, dec->proba.segments[1], "segments")
: VP8GetBit(br, dec->proba.segments[2], "segments") + 2;
block->segment_ = !VP8GetBit(br, dec->proba_.segments_[0], "segments")
? VP8GetBit(br, dec->proba_.segments_[1], "segments")
: VP8GetBit(br, dec->proba_.segments_[2], "segments") + 2;
} else {
block->segment = 0; // default for intra
block->segment_ = 0; // default for intra
}
if (dec->use_skip_proba) block->skip = VP8GetBit(br, dec->skip_p, "skip");
if (dec->use_skip_proba_) block->skip_ = VP8GetBit(br, dec->skip_p_, "skip");
block->is_i4x4 = !VP8GetBit(br, 145, "block-size");
if (!block->is_i4x4) {
block->is_i4x4_ = !VP8GetBit(br, 145, "block-size");
if (!block->is_i4x4_) {
// Hardcoded 16x16 intra-mode decision tree.
const int ymode =
VP8GetBit(br, 156, "pred-modes") ?
(VP8GetBit(br, 128, "pred-modes") ? TM_PRED : H_PRED) :
(VP8GetBit(br, 163, "pred-modes") ? V_PRED : DC_PRED);
block->imodes[0] = ymode;
block->imodes_[0] = ymode;
memset(top, ymode, 4 * sizeof(*top));
memset(left, ymode, 4 * sizeof(*left));
} else {
uint8_t* modes = block->imodes;
uint8_t* modes = block->imodes_;
int y;
for (y = 0; y < 4; ++y) {
int ymode = left[y];
@ -360,17 +354,17 @@ static void ParseIntraMode(VP8BitReader* const br,
}
}
// Hardcoded UVMode decision tree
block->uvmode = !VP8GetBit(br, 142, "pred-modes-uv") ? DC_PRED
: !VP8GetBit(br, 114, "pred-modes-uv") ? V_PRED
: VP8GetBit(br, 183, "pred-modes-uv") ? TM_PRED : H_PRED;
block->uvmode_ = !VP8GetBit(br, 142, "pred-modes-uv") ? DC_PRED
: !VP8GetBit(br, 114, "pred-modes-uv") ? V_PRED
: VP8GetBit(br, 183, "pred-modes-uv") ? TM_PRED : H_PRED;
}
int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec) {
int mb_x;
for (mb_x = 0; mb_x < dec->mb_w; ++mb_x) {
for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
ParseIntraMode(br, dec, mb_x);
}
return !dec->br.eof;
return !dec->br_.eof_;
}
//------------------------------------------------------------------------------
@ -520,7 +514,7 @@ static const uint8_t kBands[16 + 1] = {
};
void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
VP8Proba* const proba = &dec->proba;
VP8Proba* const proba = &dec->proba_;
int t, b, c, p;
for (t = 0; t < NUM_TYPES; ++t) {
for (b = 0; b < NUM_BANDS; ++b) {
@ -530,16 +524,16 @@ void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
VP8GetBit(br, CoeffsUpdateProba[t][b][c][p], "global-header") ?
VP8GetValue(br, 8, "global-header") :
CoeffsProba0[t][b][c][p];
proba->bands[t][b].probas[c][p] = v;
proba->bands_[t][b].probas_[c][p] = v;
}
}
}
for (b = 0; b < 16 + 1; ++b) {
proba->bands_ptr[t][b] = &proba->bands[t][kBands[b]];
proba->bands_ptr_[t][b] = &proba->bands_[t][kBands[b]];
}
}
dec->use_skip_proba = VP8Get(br, "global-header");
if (dec->use_skip_proba) {
dec->skip_p = VP8GetValue(br, 8, "global-header");
dec->use_skip_proba_ = VP8Get(br, "global-header");
if (dec->use_skip_proba_) {
dec->skip_p_ = VP8GetValue(br, 8, "global-header");
}
}

View File

@ -11,25 +11,14 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "src/dec/alphai_dec.h"
#include "src/dec/common_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/utils/bit_reader_inl_utils.h"
#include "src/utils/bit_reader_utils.h"
#include "src/utils/thread_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/format_constants.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
@ -51,8 +40,8 @@ static void InitGetCoeffs(void);
// VP8Decoder
static void SetOk(VP8Decoder* const dec) {
dec->status = VP8_STATUS_OK;
dec->error_msg = "OK";
dec->status_ = VP8_STATUS_OK;
dec->error_msg_ = "OK";
}
int VP8InitIoInternal(VP8Io* const io, int version) {
@ -69,9 +58,9 @@ VP8Decoder* VP8New(void) {
VP8Decoder* const dec = (VP8Decoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
if (dec != NULL) {
SetOk(dec);
WebPGetWorkerInterface()->Init(&dec->worker);
dec->ready = 0;
dec->num_parts_minus_one = 0;
WebPGetWorkerInterface()->Init(&dec->worker_);
dec->ready_ = 0;
dec->num_parts_minus_one_ = 0;
InitGetCoeffs();
}
return dec;
@ -79,13 +68,13 @@ VP8Decoder* VP8New(void) {
VP8StatusCode VP8Status(VP8Decoder* const dec) {
if (!dec) return VP8_STATUS_INVALID_PARAM;
return dec->status;
return dec->status_;
}
const char* VP8StatusMessage(VP8Decoder* const dec) {
if (dec == NULL) return "no object";
if (!dec->error_msg) return "OK";
return dec->error_msg;
if (!dec->error_msg_) return "OK";
return dec->error_msg_;
}
void VP8Delete(VP8Decoder* const dec) {
@ -98,12 +87,12 @@ void VP8Delete(VP8Decoder* const dec) {
int VP8SetError(VP8Decoder* const dec,
VP8StatusCode error, const char* const msg) {
// VP8_STATUS_SUSPENDED is only meaningful in incremental decoding.
assert(dec->incremental || error != VP8_STATUS_SUSPENDED);
assert(dec->incremental_ || error != VP8_STATUS_SUSPENDED);
// The oldest error reported takes precedence over the new one.
if (dec->status == VP8_STATUS_OK) {
dec->status = error;
dec->error_msg = msg;
dec->ready = 0;
if (dec->status_ == VP8_STATUS_OK) {
dec->status_ = error;
dec->error_msg_ = msg;
dec->ready_ = 0;
}
return 0;
}
@ -162,11 +151,11 @@ int VP8GetInfo(const uint8_t* data, size_t data_size, size_t chunk_size,
static void ResetSegmentHeader(VP8SegmentHeader* const hdr) {
assert(hdr != NULL);
hdr->use_segment = 0;
hdr->update_map = 0;
hdr->absolute_delta = 1;
memset(hdr->quantizer, 0, sizeof(hdr->quantizer));
memset(hdr->filter_strength, 0, sizeof(hdr->filter_strength));
hdr->use_segment_ = 0;
hdr->update_map_ = 0;
hdr->absolute_delta_ = 1;
memset(hdr->quantizer_, 0, sizeof(hdr->quantizer_));
memset(hdr->filter_strength_, 0, sizeof(hdr->filter_strength_));
}
// Paragraph 9.3
@ -174,32 +163,32 @@ static int ParseSegmentHeader(VP8BitReader* br,
VP8SegmentHeader* hdr, VP8Proba* proba) {
assert(br != NULL);
assert(hdr != NULL);
hdr->use_segment = VP8Get(br, "global-header");
if (hdr->use_segment) {
hdr->update_map = VP8Get(br, "global-header");
hdr->use_segment_ = VP8Get(br, "global-header");
if (hdr->use_segment_) {
hdr->update_map_ = VP8Get(br, "global-header");
if (VP8Get(br, "global-header")) { // update data
int s;
hdr->absolute_delta = VP8Get(br, "global-header");
hdr->absolute_delta_ = VP8Get(br, "global-header");
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
hdr->quantizer[s] = VP8Get(br, "global-header") ?
hdr->quantizer_[s] = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 7, "global-header") : 0;
}
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
hdr->filter_strength[s] = VP8Get(br, "global-header") ?
hdr->filter_strength_[s] = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 6, "global-header") : 0;
}
}
if (hdr->update_map) {
if (hdr->update_map_) {
int s;
for (s = 0; s < MB_FEATURE_TREE_PROBS; ++s) {
proba->segments[s] = VP8Get(br, "global-header") ?
proba->segments_[s] = VP8Get(br, "global-header") ?
VP8GetValue(br, 8, "global-header") : 255u;
}
}
} else {
hdr->update_map = 0;
hdr->update_map_ = 0;
}
return !br->eof;
return !br->eof_;
}
// Paragraph 9.5
@ -213,7 +202,7 @@ static int ParseSegmentHeader(VP8BitReader* br,
// If the partitions were positioned ok, VP8_STATUS_OK is returned.
static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
const uint8_t* buf, size_t size) {
VP8BitReader* const br = &dec->br;
VP8BitReader* const br = &dec->br_;
const uint8_t* sz = buf;
const uint8_t* buf_end = buf + size;
const uint8_t* part_start;
@ -221,8 +210,8 @@ static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
size_t last_part;
size_t p;
dec->num_parts_minus_one = (1 << VP8GetValue(br, 2, "global-header")) - 1;
last_part = dec->num_parts_minus_one;
dec->num_parts_minus_one_ = (1 << VP8GetValue(br, 2, "global-header")) - 1;
last_part = dec->num_parts_minus_one_;
if (size < 3 * last_part) {
// we can't even read the sizes with sz[]! That's a failure.
return VP8_STATUS_NOT_ENOUGH_DATA;
@ -232,42 +221,42 @@ static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
for (p = 0; p < last_part; ++p) {
size_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16);
if (psize > size_left) psize = size_left;
VP8InitBitReader(dec->parts + p, part_start, psize);
VP8InitBitReader(dec->parts_ + p, part_start, psize);
part_start += psize;
size_left -= psize;
sz += 3;
}
VP8InitBitReader(dec->parts + last_part, part_start, size_left);
VP8InitBitReader(dec->parts_ + last_part, part_start, size_left);
if (part_start < buf_end) return VP8_STATUS_OK;
return dec->incremental
return dec->incremental_
? VP8_STATUS_SUSPENDED // Init is ok, but there's not enough data
: VP8_STATUS_NOT_ENOUGH_DATA;
}
// Paragraph 9.4
static int ParseFilterHeader(VP8BitReader* br, VP8Decoder* const dec) {
VP8FilterHeader* const hdr = &dec->filter_hdr;
hdr->simple = VP8Get(br, "global-header");
hdr->level = VP8GetValue(br, 6, "global-header");
hdr->sharpness = VP8GetValue(br, 3, "global-header");
hdr->use_lf_delta = VP8Get(br, "global-header");
if (hdr->use_lf_delta) {
VP8FilterHeader* const hdr = &dec->filter_hdr_;
hdr->simple_ = VP8Get(br, "global-header");
hdr->level_ = VP8GetValue(br, 6, "global-header");
hdr->sharpness_ = VP8GetValue(br, 3, "global-header");
hdr->use_lf_delta_ = VP8Get(br, "global-header");
if (hdr->use_lf_delta_) {
if (VP8Get(br, "global-header")) { // update lf-delta?
int i;
for (i = 0; i < NUM_REF_LF_DELTAS; ++i) {
if (VP8Get(br, "global-header")) {
hdr->ref_lf_delta[i] = VP8GetSignedValue(br, 6, "global-header");
hdr->ref_lf_delta_[i] = VP8GetSignedValue(br, 6, "global-header");
}
}
for (i = 0; i < NUM_MODE_LF_DELTAS; ++i) {
if (VP8Get(br, "global-header")) {
hdr->mode_lf_delta[i] = VP8GetSignedValue(br, 6, "global-header");
hdr->mode_lf_delta_[i] = VP8GetSignedValue(br, 6, "global-header");
}
}
}
}
dec->filter_type = (hdr->level == 0) ? 0 : hdr->simple ? 1 : 2;
return !br->eof;
dec->filter_type_ = (hdr->level_ == 0) ? 0 : hdr->simple_ ? 1 : 2;
return !br->eof_;
}
// Topmost call
@ -297,16 +286,16 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
// Paragraph 9.1
{
const uint32_t bits = buf[0] | (buf[1] << 8) | (buf[2] << 16);
frm_hdr = &dec->frm_hdr;
frm_hdr->key_frame = !(bits & 1);
frm_hdr->profile = (bits >> 1) & 7;
frm_hdr->show = (bits >> 4) & 1;
frm_hdr->partition_length = (bits >> 5);
if (frm_hdr->profile > 3) {
frm_hdr = &dec->frm_hdr_;
frm_hdr->key_frame_ = !(bits & 1);
frm_hdr->profile_ = (bits >> 1) & 7;
frm_hdr->show_ = (bits >> 4) & 1;
frm_hdr->partition_length_ = (bits >> 5);
if (frm_hdr->profile_ > 3) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Incorrect keyframe parameters.");
}
if (!frm_hdr->show) {
if (!frm_hdr->show_) {
return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
"Frame not displayable.");
}
@ -314,8 +303,8 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
buf_size -= 3;
}
pic_hdr = &dec->pic_hdr;
if (frm_hdr->key_frame) {
pic_hdr = &dec->pic_hdr_;
if (frm_hdr->key_frame_) {
// Paragraph 9.2
if (buf_size < 7) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
@ -325,20 +314,20 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Bad code word");
}
pic_hdr->width = ((buf[4] << 8) | buf[3]) & 0x3fff;
pic_hdr->xscale = buf[4] >> 6; // ratio: 1, 5/4 5/3 or 2
pic_hdr->height = ((buf[6] << 8) | buf[5]) & 0x3fff;
pic_hdr->yscale = buf[6] >> 6;
pic_hdr->width_ = ((buf[4] << 8) | buf[3]) & 0x3fff;
pic_hdr->xscale_ = buf[4] >> 6; // ratio: 1, 5/4 5/3 or 2
pic_hdr->height_ = ((buf[6] << 8) | buf[5]) & 0x3fff;
pic_hdr->yscale_ = buf[6] >> 6;
buf += 7;
buf_size -= 7;
dec->mb_w = (pic_hdr->width + 15) >> 4;
dec->mb_h = (pic_hdr->height + 15) >> 4;
dec->mb_w_ = (pic_hdr->width_ + 15) >> 4;
dec->mb_h_ = (pic_hdr->height_ + 15) >> 4;
// Setup default output area (can be later modified during io->setup())
io->width = pic_hdr->width;
io->height = pic_hdr->height;
// IMPORTANT! use some sane dimensions in crop* and scaled* fields.
io->width = pic_hdr->width_;
io->height = pic_hdr->height_;
// IMPORTANT! use some sane dimensions in crop_* and scaled_* fields.
// So they can be used interchangeably without always testing for
// 'use_cropping'.
io->use_cropping = 0;
@ -353,27 +342,27 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
io->mb_w = io->width; // for soundness
io->mb_h = io->height; // ditto
VP8ResetProba(&dec->proba);
ResetSegmentHeader(&dec->segment_hdr);
VP8ResetProba(&dec->proba_);
ResetSegmentHeader(&dec->segment_hdr_);
}
// Check if we have all the partition #0 available, and initialize dec->br
// Check if we have all the partition #0 available, and initialize dec->br_
// to read this partition (and this partition only).
if (frm_hdr->partition_length > buf_size) {
if (frm_hdr->partition_length_ > buf_size) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"bad partition length");
}
br = &dec->br;
VP8InitBitReader(br, buf, frm_hdr->partition_length);
buf += frm_hdr->partition_length;
buf_size -= frm_hdr->partition_length;
br = &dec->br_;
VP8InitBitReader(br, buf, frm_hdr->partition_length_);
buf += frm_hdr->partition_length_;
buf_size -= frm_hdr->partition_length_;
if (frm_hdr->key_frame) {
pic_hdr->colorspace = VP8Get(br, "global-header");
pic_hdr->clamp_type = VP8Get(br, "global-header");
if (frm_hdr->key_frame_) {
pic_hdr->colorspace_ = VP8Get(br, "global-header");
pic_hdr->clamp_type_ = VP8Get(br, "global-header");
}
if (!ParseSegmentHeader(br, &dec->segment_hdr, &dec->proba)) {
if (!ParseSegmentHeader(br, &dec->segment_hdr_, &dec->proba_)) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"cannot parse segment header");
}
@ -391,17 +380,17 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
VP8ParseQuant(dec);
// Frame buffer marking
if (!frm_hdr->key_frame) {
if (!frm_hdr->key_frame_) {
return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
"Not a key frame.");
}
VP8Get(br, "global-header"); // ignore the value of 'update_proba'
VP8Get(br, "global-header"); // ignore the value of update_proba_
VP8ParseProba(br, dec);
// sanitized state
dec->ready = 1;
dec->ready_ = 1;
return 1;
}
@ -454,17 +443,17 @@ static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) {
static int GetCoeffsFast(VP8BitReader* const br,
const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out) {
const uint8_t* p = prob[n]->probas[ctx];
const uint8_t* p = prob[n]->probas_[ctx];
for (; n < 16; ++n) {
if (!VP8GetBit(br, p[0], "coeffs")) {
return n; // previous coeff was last non-zero coeff
}
while (!VP8GetBit(br, p[1], "coeffs")) { // sequence of zero coeffs
p = prob[++n]->probas[0];
p = prob[++n]->probas_[0];
if (n == 16) return 16;
}
{ // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas[0];
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0];
int v;
if (!VP8GetBit(br, p[2], "coeffs")) {
v = 1;
@ -484,17 +473,17 @@ static int GetCoeffsFast(VP8BitReader* const br,
static int GetCoeffsAlt(VP8BitReader* const br,
const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out) {
const uint8_t* p = prob[n]->probas[ctx];
const uint8_t* p = prob[n]->probas_[ctx];
for (; n < 16; ++n) {
if (!VP8GetBitAlt(br, p[0], "coeffs")) {
return n; // previous coeff was last non-zero coeff
}
while (!VP8GetBitAlt(br, p[1], "coeffs")) { // sequence of zero coeffs
p = prob[++n]->probas[0];
p = prob[++n]->probas_[0];
if (n == 16) return 16;
}
{ // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas[0];
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0];
int v;
if (!VP8GetBitAlt(br, p[2], "coeffs")) {
v = 1;
@ -527,12 +516,12 @@ static WEBP_INLINE uint32_t NzCodeBits(uint32_t nz_coeffs, int nz, int dc_nz) {
static int ParseResiduals(VP8Decoder* const dec,
VP8MB* const mb, VP8BitReader* const token_br) {
const VP8BandProbas* (* const bands)[16 + 1] = dec->proba.bands_ptr;
const VP8BandProbas* (* const bands)[16 + 1] = dec->proba_.bands_ptr_;
const VP8BandProbas* const * ac_proba;
VP8MBData* const block = dec->mb_data + dec->mb_x;
const VP8QuantMatrix* const q = &dec->dqm[block->segment];
int16_t* dst = block->coeffs;
VP8MB* const left_mb = dec->mb_info - 1;
VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
const VP8QuantMatrix* const q = &dec->dqm_[block->segment_];
int16_t* dst = block->coeffs_;
VP8MB* const left_mb = dec->mb_info_ - 1;
uint8_t tnz, lnz;
uint32_t non_zero_y = 0;
uint32_t non_zero_uv = 0;
@ -541,11 +530,11 @@ static int ParseResiduals(VP8Decoder* const dec,
int first;
memset(dst, 0, 384 * sizeof(*dst));
if (!block->is_i4x4) { // parse DC
if (!block->is_i4x4_) { // parse DC
int16_t dc[16] = { 0 };
const int ctx = mb->nz_dc + left_mb->nz_dc;
const int nz = GetCoeffs(token_br, bands[1], ctx, q->y2_mat, 0, dc);
mb->nz_dc = left_mb->nz_dc = (nz > 0);
const int ctx = mb->nz_dc_ + left_mb->nz_dc_;
const int nz = GetCoeffs(token_br, bands[1], ctx, q->y2_mat_, 0, dc);
mb->nz_dc_ = left_mb->nz_dc_ = (nz > 0);
if (nz > 1) { // more than just the DC -> perform the full transform
VP8TransformWHT(dc, dst);
} else { // only DC is non-zero -> inlined simplified transform
@ -560,14 +549,14 @@ static int ParseResiduals(VP8Decoder* const dec,
ac_proba = bands[3];
}
tnz = mb->nz & 0x0f;
lnz = left_mb->nz & 0x0f;
tnz = mb->nz_ & 0x0f;
lnz = left_mb->nz_ & 0x0f;
for (y = 0; y < 4; ++y) {
int l = lnz & 1;
uint32_t nz_coeffs = 0;
for (x = 0; x < 4; ++x) {
const int ctx = l + (tnz & 1);
const int nz = GetCoeffs(token_br, ac_proba, ctx, q->y1_mat, first, dst);
const int nz = GetCoeffs(token_br, ac_proba, ctx, q->y1_mat_, first, dst);
l = (nz > first);
tnz = (tnz >> 1) | (l << 7);
nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0);
@ -582,13 +571,13 @@ static int ParseResiduals(VP8Decoder* const dec,
for (ch = 0; ch < 4; ch += 2) {
uint32_t nz_coeffs = 0;
tnz = mb->nz >> (4 + ch);
lnz = left_mb->nz >> (4 + ch);
tnz = mb->nz_ >> (4 + ch);
lnz = left_mb->nz_ >> (4 + ch);
for (y = 0; y < 2; ++y) {
int l = lnz & 1;
for (x = 0; x < 2; ++x) {
const int ctx = l + (tnz & 1);
const int nz = GetCoeffs(token_br, bands[2], ctx, q->uv_mat, 0, dst);
const int nz = GetCoeffs(token_br, bands[2], ctx, q->uv_mat_, 0, dst);
l = (nz > 0);
tnz = (tnz >> 1) | (l << 3);
nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0);
@ -602,16 +591,16 @@ static int ParseResiduals(VP8Decoder* const dec,
out_t_nz |= (tnz << 4) << ch;
out_l_nz |= (lnz & 0xf0) << ch;
}
mb->nz = out_t_nz;
left_mb->nz = out_l_nz;
mb->nz_ = out_t_nz;
left_mb->nz_ = out_l_nz;
block->non_zero_y = non_zero_y;
block->non_zero_uv = non_zero_uv;
block->non_zero_y_ = non_zero_y;
block->non_zero_uv_ = non_zero_uv;
// We look at the mode-code of each block and check if some blocks have less
// than three non-zero coeffs (code < 2). This is to avoid dithering flat and
// empty blocks.
block->dither = (non_zero_uv & 0xaaaa) ? 0 : q->dither;
block->dither_ = (non_zero_uv & 0xaaaa) ? 0 : q->dither_;
return !(non_zero_y | non_zero_uv); // will be used for further optimization
}
@ -620,50 +609,50 @@ static int ParseResiduals(VP8Decoder* const dec,
// Main loop
int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) {
VP8MB* const left = dec->mb_info - 1;
VP8MB* const mb = dec->mb_info + dec->mb_x;
VP8MBData* const block = dec->mb_data + dec->mb_x;
int skip = dec->use_skip_proba ? block->skip : 0;
VP8MB* const left = dec->mb_info_ - 1;
VP8MB* const mb = dec->mb_info_ + dec->mb_x_;
VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
int skip = dec->use_skip_proba_ ? block->skip_ : 0;
if (!skip) {
skip = ParseResiduals(dec, mb, token_br);
} else {
left->nz = mb->nz = 0;
if (!block->is_i4x4) {
left->nz_dc = mb->nz_dc = 0;
left->nz_ = mb->nz_ = 0;
if (!block->is_i4x4_) {
left->nz_dc_ = mb->nz_dc_ = 0;
}
block->non_zero_y = 0;
block->non_zero_uv = 0;
block->dither = 0;
block->non_zero_y_ = 0;
block->non_zero_uv_ = 0;
block->dither_ = 0;
}
if (dec->filter_type > 0) { // store filter info
VP8FInfo* const finfo = dec->f_info + dec->mb_x;
*finfo = dec->fstrengths[block->segment][block->is_i4x4];
finfo->f_inner |= !skip;
if (dec->filter_type_ > 0) { // store filter info
VP8FInfo* const finfo = dec->f_info_ + dec->mb_x_;
*finfo = dec->fstrengths_[block->segment_][block->is_i4x4_];
finfo->f_inner_ |= !skip;
}
return !token_br->eof;
return !token_br->eof_;
}
void VP8InitScanline(VP8Decoder* const dec) {
VP8MB* const left = dec->mb_info - 1;
left->nz = 0;
left->nz_dc = 0;
memset(dec->intra_l, B_DC_PRED, sizeof(dec->intra_l));
dec->mb_x = 0;
VP8MB* const left = dec->mb_info_ - 1;
left->nz_ = 0;
left->nz_dc_ = 0;
memset(dec->intra_l_, B_DC_PRED, sizeof(dec->intra_l_));
dec->mb_x_ = 0;
}
static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
for (dec->mb_y = 0; dec->mb_y < dec->br_mb_y; ++dec->mb_y) {
for (dec->mb_y_ = 0; dec->mb_y_ < dec->br_mb_y_; ++dec->mb_y_) {
// Parse bitstream for this row.
VP8BitReader* const token_br =
&dec->parts[dec->mb_y & dec->num_parts_minus_one];
if (!VP8ParseIntraModeRow(&dec->br, dec)) {
&dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_];
if (!VP8ParseIntraModeRow(&dec->br_, dec)) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-partition0 encountered.");
}
for (; dec->mb_x < dec->mb_w; ++dec->mb_x) {
for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) {
if (!VP8DecodeMB(dec, token_br)) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-file encountered.");
@ -676,8 +665,8 @@ static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
return VP8SetError(dec, VP8_STATUS_USER_ABORT, "Output aborted.");
}
}
if (dec->mt_method > 0) {
if (!WebPGetWorkerInterface()->Sync(&dec->worker)) return 0;
if (dec->mt_method_ > 0) {
if (!WebPGetWorkerInterface()->Sync(&dec->worker_)) return 0;
}
return 1;
@ -694,12 +683,12 @@ int VP8Decode(VP8Decoder* const dec, VP8Io* const io) {
"NULL VP8Io parameter in VP8Decode().");
}
if (!dec->ready) {
if (!dec->ready_) {
if (!VP8GetHeaders(dec, io)) {
return 0;
}
}
assert(dec->ready);
assert(dec->ready_);
// Finish setting up the decoding parameter. Will call io->setup().
ok = (VP8EnterCritical(dec, io) == VP8_STATUS_OK);
@ -719,7 +708,7 @@ int VP8Decode(VP8Decoder* const dec, VP8Io* const io) {
return 0;
}
dec->ready = 0;
dec->ready_ = 0;
return ok;
}
@ -727,13 +716,13 @@ void VP8Clear(VP8Decoder* const dec) {
if (dec == NULL) {
return;
}
WebPGetWorkerInterface()->End(&dec->worker);
WebPGetWorkerInterface()->End(&dec->worker_);
WebPDeallocateAlphaMemory(dec);
WebPSafeFree(dec->mem);
dec->mem = NULL;
dec->mem_size = 0;
memset(&dec->br, 0, sizeof(dec->br));
dec->ready = 0;
WebPSafeFree(dec->mem_);
dec->mem_ = NULL;
dec->mem_size_ = 0;
memset(&dec->br_, 0, sizeof(dec->br_));
dec->ready_ = 0;
}
//------------------------------------------------------------------------------

View File

@ -14,8 +14,6 @@
#ifndef WEBP_DEC_VP8_DEC_H_
#define WEBP_DEC_VP8_DEC_H_
#include <stddef.h>
#include "src/webp/decode.h"
#include "src/webp/types.h"

View File

@ -15,16 +15,12 @@
#define WEBP_DEC_VP8I_DEC_H_
#include <string.h> // for memcpy()
#include "src/dec/common_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8li_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/dsp/dsp.h"
#include "src/utils/bit_reader_utils.h"
#include "src/utils/random_utils.h"
#include "src/utils/thread_utils.h"
#include "src/webp/decode.h"
#include "src/dsp/dsp.h"
#include "src/webp/types.h"
#ifdef __cplusplus
@ -36,7 +32,7 @@ extern "C" {
// version numbers
#define DEC_MAJ_VERSION 1
#define DEC_MIN_VERSION 6
#define DEC_MIN_VERSION 4
#define DEC_REV_VERSION 0
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
@ -73,85 +69,85 @@ extern "C" {
// Headers
typedef struct {
uint8_t key_frame;
uint8_t profile;
uint8_t show;
uint32_t partition_length;
uint8_t key_frame_;
uint8_t profile_;
uint8_t show_;
uint32_t partition_length_;
} VP8FrameHeader;
typedef struct {
uint16_t width;
uint16_t height;
uint8_t xscale;
uint8_t yscale;
uint8_t colorspace; // 0 = YCbCr
uint8_t clamp_type;
uint16_t width_;
uint16_t height_;
uint8_t xscale_;
uint8_t yscale_;
uint8_t colorspace_; // 0 = YCbCr
uint8_t clamp_type_;
} VP8PictureHeader;
// segment features
typedef struct {
int use_segment;
int update_map; // whether to update the segment map or not
int absolute_delta; // absolute or delta values for quantizer and filter
int8_t quantizer[NUM_MB_SEGMENTS]; // quantization changes
int8_t filter_strength[NUM_MB_SEGMENTS]; // filter strength for segments
int use_segment_;
int update_map_; // whether to update the segment map or not
int absolute_delta_; // absolute or delta values for quantizer and filter
int8_t quantizer_[NUM_MB_SEGMENTS]; // quantization changes
int8_t filter_strength_[NUM_MB_SEGMENTS]; // filter strength for segments
} VP8SegmentHeader;
// probas associated to one of the contexts
typedef uint8_t VP8ProbaArray[NUM_PROBAS];
typedef struct { // all the probas associated to one band
VP8ProbaArray probas[NUM_CTX];
VP8ProbaArray probas_[NUM_CTX];
} VP8BandProbas;
// Struct collecting all frame-persistent probabilities.
typedef struct {
uint8_t segments[MB_FEATURE_TREE_PROBS];
uint8_t segments_[MB_FEATURE_TREE_PROBS];
// Type: 0:Intra16-AC 1:Intra16-DC 2:Chroma 3:Intra4
VP8BandProbas bands[NUM_TYPES][NUM_BANDS];
const VP8BandProbas* bands_ptr[NUM_TYPES][16 + 1];
VP8BandProbas bands_[NUM_TYPES][NUM_BANDS];
const VP8BandProbas* bands_ptr_[NUM_TYPES][16 + 1];
} VP8Proba;
// Filter parameters
typedef struct {
int simple; // 0=complex, 1=simple
int level; // [0..63]
int sharpness; // [0..7]
int use_lf_delta;
int ref_lf_delta[NUM_REF_LF_DELTAS];
int mode_lf_delta[NUM_MODE_LF_DELTAS];
int simple_; // 0=complex, 1=simple
int level_; // [0..63]
int sharpness_; // [0..7]
int use_lf_delta_;
int ref_lf_delta_[NUM_REF_LF_DELTAS];
int mode_lf_delta_[NUM_MODE_LF_DELTAS];
} VP8FilterHeader;
//------------------------------------------------------------------------------
// Informations about the macroblocks.
typedef struct { // filter specs
uint8_t f_limit; // filter limit in [3..189], or 0 if no filtering
uint8_t f_ilevel; // inner limit in [1..63]
uint8_t f_inner; // do inner filtering?
uint8_t hev_thresh; // high edge variance threshold in [0..2]
uint8_t f_limit_; // filter limit in [3..189], or 0 if no filtering
uint8_t f_ilevel_; // inner limit in [1..63]
uint8_t f_inner_; // do inner filtering?
uint8_t hev_thresh_; // high edge variance threshold in [0..2]
} VP8FInfo;
typedef struct { // Top/Left Contexts used for syntax-parsing
uint8_t nz; // non-zero AC/DC coeffs (4bit for luma + 4bit for chroma)
uint8_t nz_dc; // non-zero DC coeff (1bit)
uint8_t nz_; // non-zero AC/DC coeffs (4bit for luma + 4bit for chroma)
uint8_t nz_dc_; // non-zero DC coeff (1bit)
} VP8MB;
// Dequantization matrices
typedef int quant_t[2]; // [DC / AC]. Can be 'uint16_t[2]' too (~slower).
typedef struct {
quant_t y1_mat, y2_mat, uv_mat;
quant_t y1_mat_, y2_mat_, uv_mat_;
int uv_quant; // U/V quantizer value
int dither; // dithering amplitude (0 = off, max=255)
int uv_quant_; // U/V quantizer value
int dither_; // dithering amplitude (0 = off, max=255)
} VP8QuantMatrix;
// Data needed to reconstruct a macroblock
typedef struct {
int16_t coeffs[384]; // 384 coeffs = (16+4+4) * 4*4
uint8_t is_i4x4; // true if intra4x4
uint8_t imodes[16]; // one 16x16 mode (#0) or sixteen 4x4 modes
uint8_t uvmode; // chroma prediction mode
int16_t coeffs_[384]; // 384 coeffs = (16+4+4) * 4*4
uint8_t is_i4x4_; // true if intra4x4
uint8_t imodes_[16]; // one 16x16 mode (#0) or sixteen 4x4 modes
uint8_t uvmode_; // chroma prediction mode
// bit-wise info about the content of each sub-4x4 blocks (in decoding order).
// Each of the 4x4 blocks for y/u/v is associated with a 2b code according to:
// code=0 -> no coefficient
@ -159,21 +155,21 @@ typedef struct {
// code=2 -> first three coefficients are non-zero
// code=3 -> more than three coefficients are non-zero
// This allows to call specialized transform functions.
uint32_t non_zero_y;
uint32_t non_zero_uv;
uint8_t dither; // local dithering strength (deduced from non_zero*)
uint8_t skip;
uint8_t segment;
uint32_t non_zero_y_;
uint32_t non_zero_uv_;
uint8_t dither_; // local dithering strength (deduced from non_zero_*)
uint8_t skip_;
uint8_t segment_;
} VP8MBData;
// Persistent information needed by the parallel processing
typedef struct {
int id; // cache row to process (in [0..2])
int mb_y; // macroblock position of the row
int filter_row; // true if row-filtering is needed
VP8FInfo* f_info; // filter strengths (swapped with dec->f_info)
VP8MBData* mb_data; // reconstruction data (swapped with dec->mb_data)
VP8Io io; // copy of the VP8Io to pass to put()
int id_; // cache row to process (in [0..2])
int mb_y_; // macroblock position of the row
int filter_row_; // true if row-filtering is needed
VP8FInfo* f_info_; // filter strengths (swapped with dec->f_info_)
VP8MBData* mb_data_; // reconstruction data (swapped with dec->mb_data_)
VP8Io io_; // copy of the VP8Io to pass to put()
} VP8ThreadContext;
// Saved top samples, per macroblock. Fits into a cache-line.
@ -185,89 +181,89 @@ typedef struct {
// VP8Decoder: the main opaque structure handed over to user
struct VP8Decoder {
VP8StatusCode status;
int ready; // true if ready to decode a picture with VP8Decode()
const char* error_msg; // set when status is not OK.
VP8StatusCode status_;
int ready_; // true if ready to decode a picture with VP8Decode()
const char* error_msg_; // set when status_ is not OK.
// Main data source
VP8BitReader br;
int incremental; // if true, incremental decoding is expected
VP8BitReader br_;
int incremental_; // if true, incremental decoding is expected
// headers
VP8FrameHeader frm_hdr;
VP8PictureHeader pic_hdr;
VP8FilterHeader filter_hdr;
VP8SegmentHeader segment_hdr;
VP8FrameHeader frm_hdr_;
VP8PictureHeader pic_hdr_;
VP8FilterHeader filter_hdr_;
VP8SegmentHeader segment_hdr_;
// Worker
WebPWorker worker;
int mt_method; // multi-thread method: 0=off, 1=[parse+recon][filter]
// 2=[parse][recon+filter]
int cache_id; // current cache row
int num_caches; // number of cached rows of 16 pixels (1, 2 or 3)
VP8ThreadContext thread_ctx; // Thread context
WebPWorker worker_;
int mt_method_; // multi-thread method: 0=off, 1=[parse+recon][filter]
// 2=[parse][recon+filter]
int cache_id_; // current cache row
int num_caches_; // number of cached rows of 16 pixels (1, 2 or 3)
VP8ThreadContext thread_ctx_; // Thread context
// dimension, in macroblock units.
int mb_w, mb_h;
int mb_w_, mb_h_;
// Macroblock to process/filter, depending on cropping and filter_type.
int tl_mb_x, tl_mb_y; // top-left MB that must be in-loop filtered
int br_mb_x, br_mb_y; // last bottom-right MB that must be decoded
int tl_mb_x_, tl_mb_y_; // top-left MB that must be in-loop filtered
int br_mb_x_, br_mb_y_; // last bottom-right MB that must be decoded
// number of partitions minus one.
uint32_t num_parts_minus_one;
uint32_t num_parts_minus_one_;
// per-partition boolean decoders.
VP8BitReader parts[MAX_NUM_PARTITIONS];
VP8BitReader parts_[MAX_NUM_PARTITIONS];
// Dithering strength, deduced from decoding options
int dither; // whether to use dithering or not
VP8Random dithering_rg; // random generator for dithering
int dither_; // whether to use dithering or not
VP8Random dithering_rg_; // random generator for dithering
// dequantization (one set of DC/AC dequant factor per segment)
VP8QuantMatrix dqm[NUM_MB_SEGMENTS];
VP8QuantMatrix dqm_[NUM_MB_SEGMENTS];
// probabilities
VP8Proba proba;
int use_skip_proba;
uint8_t skip_p;
VP8Proba proba_;
int use_skip_proba_;
uint8_t skip_p_;
// Boundary data cache and persistent buffers.
uint8_t* intra_t; // top intra modes values: 4 * mb_w
uint8_t intra_l[4]; // left intra modes values
uint8_t* intra_t_; // top intra modes values: 4 * mb_w_
uint8_t intra_l_[4]; // left intra modes values
VP8TopSamples* yuv_t; // top y/u/v samples
VP8TopSamples* yuv_t_; // top y/u/v samples
VP8MB* mb_info; // contextual macroblock info (mb_w + 1)
VP8FInfo* f_info; // filter strength info
uint8_t* yuv_b; // main block for Y/U/V (size = YUV_SIZE)
VP8MB* mb_info_; // contextual macroblock info (mb_w_ + 1)
VP8FInfo* f_info_; // filter strength info
uint8_t* yuv_b_; // main block for Y/U/V (size = YUV_SIZE)
uint8_t* cache_y; // macroblock row for storing unfiltered samples
uint8_t* cache_u;
uint8_t* cache_v;
int cache_y_stride;
int cache_uv_stride;
uint8_t* cache_y_; // macroblock row for storing unfiltered samples
uint8_t* cache_u_;
uint8_t* cache_v_;
int cache_y_stride_;
int cache_uv_stride_;
// main memory chunk for the above data. Persistent.
void* mem;
size_t mem_size;
void* mem_;
size_t mem_size_;
// Per macroblock non-persistent infos.
int mb_x, mb_y; // current position, in macroblock units
VP8MBData* mb_data; // parsed reconstruction data
int mb_x_, mb_y_; // current position, in macroblock units
VP8MBData* mb_data_; // parsed reconstruction data
// Filtering side-info
int filter_type; // 0=off, 1=simple, 2=complex
VP8FInfo fstrengths[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type
int filter_type_; // 0=off, 1=simple, 2=complex
VP8FInfo fstrengths_[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type
// Alpha
struct ALPHDecoder* alph_dec; // alpha-plane decoder object
const uint8_t* alpha_data; // compressed alpha data (if present)
size_t alpha_data_size;
int is_alpha_decoded; // true if alpha_data is decoded in alpha_plane
uint8_t* alpha_plane_mem; // memory allocated for alpha_plane
uint8_t* alpha_plane; // output. Persistent, contains the whole data.
const uint8_t* alpha_prev_line; // last decoded alpha row (or NULL)
int alpha_dithering; // derived from decoding options (0=off, 100=full)
struct ALPHDecoder* alph_dec_; // alpha-plane decoder object
const uint8_t* alpha_data_; // compressed alpha data (if present)
size_t alpha_data_size_;
int is_alpha_decoded_; // true if alpha_data_ is decoded in alpha_plane_
uint8_t* alpha_plane_mem_; // memory allocated for alpha_plane_
uint8_t* alpha_plane_; // output. Persistent, contains the whole data.
const uint8_t* alpha_prev_line_; // last decoded alpha row (or NULL)
int alpha_dithering_; // derived from decoding options (0=off, 100=full)
};
//------------------------------------------------------------------------------

File diff suppressed because it is too large Load Diff

View File

@ -16,15 +16,10 @@
#define WEBP_DEC_VP8LI_DEC_H_
#include <string.h> // for memcpy()
#include "src/dec/vp8_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/utils/bit_reader_utils.h"
#include "src/utils/color_cache_utils.h"
#include "src/utils/huffman_utils.h"
#include "src/utils/rescaler_utils.h"
#include "src/webp/decode.h"
#include "src/webp/format_constants.h"
#include "src/webp/types.h"
#ifdef __cplusplus
@ -39,58 +34,58 @@ typedef enum {
typedef struct VP8LTransform VP8LTransform;
struct VP8LTransform {
VP8LImageTransformType type; // transform type.
int bits; // subsampling bits defining transform window.
int xsize; // transform window X index.
int ysize; // transform window Y index.
uint32_t* data; // transform data.
VP8LImageTransformType type_; // transform type.
int bits_; // subsampling bits defining transform window.
int xsize_; // transform window X index.
int ysize_; // transform window Y index.
uint32_t* data_; // transform data.
};
typedef struct {
int color_cache_size;
VP8LColorCache color_cache;
VP8LColorCache saved_color_cache; // for incremental
int color_cache_size_;
VP8LColorCache color_cache_;
VP8LColorCache saved_color_cache_; // for incremental
int huffman_mask;
int huffman_subsample_bits;
int huffman_xsize;
uint32_t* huffman_image;
int num_htree_groups;
HTreeGroup* htree_groups;
HuffmanTables huffman_tables;
int huffman_mask_;
int huffman_subsample_bits_;
int huffman_xsize_;
uint32_t* huffman_image_;
int num_htree_groups_;
HTreeGroup* htree_groups_;
HuffmanTables huffman_tables_;
} VP8LMetadata;
typedef struct VP8LDecoder VP8LDecoder;
struct VP8LDecoder {
VP8StatusCode status;
VP8LDecodeState state;
VP8Io* io;
VP8StatusCode status_;
VP8LDecodeState state_;
VP8Io* io_;
const WebPDecBuffer* output; // shortcut to io->opaque->output
const WebPDecBuffer* output_; // shortcut to io->opaque->output
uint32_t* pixels; // Internal data: either uint8_t* for alpha
// or uint32_t* for BGRA.
uint32_t* argb_cache; // Scratch buffer for temporary BGRA storage.
uint32_t* pixels_; // Internal data: either uint8_t* for alpha
// or uint32_t* for BGRA.
uint32_t* argb_cache_; // Scratch buffer for temporary BGRA storage.
VP8LBitReader br;
int incremental; // if true, incremental decoding is expected
VP8LBitReader saved_br; // note: could be local variables too
int saved_last_pixel;
VP8LBitReader br_;
int incremental_; // if true, incremental decoding is expected
VP8LBitReader saved_br_; // note: could be local variables too
int saved_last_pixel_;
int width;
int height;
int last_row; // last input row decoded so far.
int last_pixel; // last pixel decoded so far. However, it may
// not be transformed, scaled and
// color-converted yet.
int last_out_row; // last row output so far.
int width_;
int height_;
int last_row_; // last input row decoded so far.
int last_pixel_; // last pixel decoded so far. However, it may
// not be transformed, scaled and
// color-converted yet.
int last_out_row_; // last row output so far.
VP8LMetadata hdr;
VP8LMetadata hdr_;
int next_transform;
VP8LTransform transforms[NUM_TRANSFORMS];
int next_transform_;
VP8LTransform transforms_[NUM_TRANSFORMS];
// or'd bitset storing the transforms types.
uint32_t transforms_seen;
uint32_t transforms_seen_;
uint8_t* rescaler_memory; // Working memory for rescaling work.
WebPRescaler* rescaler; // Common rescaler for all channels.
@ -123,7 +118,7 @@ WEBP_NODISCARD VP8LDecoder* VP8LNew(void);
WEBP_NODISCARD int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io);
// Decodes an image. It's required to decode the lossless header before calling
// this function. Returns false in case of error, with updated dec->status.
// this function. Returns false in case of error, with updated dec->status_.
WEBP_NODISCARD int VP8LDecodeImage(VP8LDecoder* const dec);
// Clears and deallocate a lossless decoder instance.

View File

@ -11,20 +11,15 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "src/dec/common_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h"
#include "src/dec/webpi_dec.h"
#include "src/utils/rescaler_utils.h"
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/format_constants.h"
#include "src/webp/mux_types.h" // ALPHA_FLAG
#include "src/webp/decode.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
@ -480,23 +475,23 @@ WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data,
if (dec == NULL) {
return VP8_STATUS_OUT_OF_MEMORY;
}
dec->alpha_data = headers.alpha_data;
dec->alpha_data_size = headers.alpha_data_size;
dec->alpha_data_ = headers.alpha_data;
dec->alpha_data_size_ = headers.alpha_data_size;
// Decode bitstream header, update io->width/io->height.
if (!VP8GetHeaders(dec, &io)) {
status = dec->status; // An error occurred. Grab error status.
status = dec->status_; // An error occurred. Grab error status.
} else {
// Allocate/check output buffers.
status = WebPAllocateDecBuffer(io.width, io.height, params->options,
params->output);
if (status == VP8_STATUS_OK) { // Decode
// This change must be done before calling VP8Decode()
dec->mt_method = VP8GetThreadMethod(params->options, &headers,
io.width, io.height);
dec->mt_method_ = VP8GetThreadMethod(params->options, &headers,
io.width, io.height);
VP8InitDithering(params->options, dec);
if (!VP8Decode(dec, &io)) {
status = dec->status;
status = dec->status_;
}
}
}
@ -507,14 +502,14 @@ WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data,
return VP8_STATUS_OUT_OF_MEMORY;
}
if (!VP8LDecodeHeader(dec, &io)) {
status = dec->status; // An error occurred. Grab error status.
status = dec->status_; // An error occurred. Grab error status.
} else {
// Allocate/check output buffers.
status = WebPAllocateDecBuffer(io.width, io.height, params->options,
params->output);
if (status == VP8_STATUS_OK) { // Decode
if (!VP8LDecodeImage(dec)) {
status = dec->status;
status = dec->status_;
}
}
}
@ -752,61 +747,6 @@ int WebPInitDecoderConfigInternal(WebPDecoderConfig* config,
return 1;
}
static int WebPCheckCropDimensionsBasic(int x, int y, int w, int h) {
return !(x < 0 || y < 0 || w <= 0 || h <= 0);
}
int WebPValidateDecoderConfig(const WebPDecoderConfig* config) {
const WebPDecoderOptions* options;
if (config == NULL) return 0;
if (!IsValidColorspace(config->output.colorspace)) {
return 0;
}
options = &config->options;
// bypass_filtering, no_fancy_upsampling, use_cropping, use_scaling,
// use_threads, flip can be any integer and are interpreted as boolean.
// Check for cropping.
if (options->use_cropping && !WebPCheckCropDimensionsBasic(
options->crop_left, options->crop_top,
options->crop_width, options->crop_height)) {
return 0;
}
// Check for scaling.
if (options->use_scaling &&
(options->scaled_width < 0 || options->scaled_height < 0 ||
(options->scaled_width == 0 && options->scaled_height == 0))) {
return 0;
}
// In case the WebPBitstreamFeatures has been filled in, check further.
if (config->input.width > 0 || config->input.height > 0) {
int scaled_width = options->scaled_width;
int scaled_height = options->scaled_height;
if (options->use_cropping &&
!WebPCheckCropDimensions(config->input.width, config->input.height,
options->crop_left, options->crop_top,
options->crop_width, options->crop_height)) {
return 0;
}
if (options->use_scaling && !WebPRescalerGetScaledDimensions(
config->input.width, config->input.height,
&scaled_width, &scaled_height)) {
return 0;
}
}
// Check for dithering.
if (options->dithering_strength < 0 || options->dithering_strength > 100 ||
options->alpha_dithering_strength < 0 ||
options->alpha_dithering_strength > 100) {
return 0;
}
return 1;
}
VP8StatusCode WebPGetFeaturesInternal(const uint8_t* data, size_t data_size,
WebPBitstreamFeatures* features,
int version) {
@ -866,8 +806,8 @@ VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size,
int WebPCheckCropDimensions(int image_width, int image_height,
int x, int y, int w, int h) {
return WebPCheckCropDimensionsBasic(x, y, w, h) &&
!(x >= image_width || w > image_width || w > image_width - x ||
return !(x < 0 || y < 0 || w <= 0 || h <= 0 ||
x >= image_width || w > image_width || w > image_width - x ||
y >= image_height || h > image_height || h > image_height - y);
}

View File

@ -18,12 +18,9 @@
extern "C" {
#endif
#include <stddef.h>
#include "src/dec/vp8_dec.h"
#include "src/utils/rescaler_utils.h"
#include "src/dec/vp8_dec.h"
#include "src/webp/decode.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
// WebPDecParams: Decoding output parameters. Transient internal object.

View File

@ -13,6 +13,6 @@ noinst_HEADERS =
noinst_HEADERS += ../webp/format_constants.h
libwebpdemux_la_LIBADD = ../libwebp.la
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 2:17:0
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 2:15:0
libwebpdemuxincludedir = $(includedir)/webp
pkgconfig_DATA = libwebpdemux.pc

View File

@ -20,8 +20,6 @@
#include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/demux.h"
#include "src/webp/mux.h"
#include "src/webp/mux_types.h"
#include "src/webp/types.h"
#define NUM_CHANNELS 4
@ -41,18 +39,18 @@ static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst,
int num_pixels);
struct WebPAnimDecoder {
WebPDemuxer* demux; // Demuxer created from given WebP bitstream.
WebPDecoderConfig config; // Decoder config.
WebPDemuxer* demux_; // Demuxer created from given WebP bitstream.
WebPDecoderConfig config_; // Decoder config.
// Note: we use a pointer to a function blending multiple pixels at a time to
// allow possible inlining of per-pixel blending function.
BlendRowFunc blend_func; // Pointer to the chose blend row function.
WebPAnimInfo info; // Global info about the animation.
uint8_t* curr_frame; // Current canvas (not disposed).
uint8_t* prev_frame_disposed; // Previous canvas (properly disposed).
int prev_frame_timestamp; // Previous frame timestamp (milliseconds).
WebPIterator prev_iter; // Iterator object for previous frame.
int prev_frame_was_keyframe; // True if previous frame was a keyframe.
int next_frame; // Index of the next frame to be decoded
BlendRowFunc blend_func_; // Pointer to the chose blend row function.
WebPAnimInfo info_; // Global info about the animation.
uint8_t* curr_frame_; // Current canvas (not disposed).
uint8_t* prev_frame_disposed_; // Previous canvas (properly disposed).
int prev_frame_timestamp_; // Previous frame timestamp (milliseconds).
WebPIterator prev_iter_; // Iterator object for previous frame.
int prev_frame_was_keyframe_; // True if previous frame was a keyframe.
int next_frame_; // Index of the next frame to be decoded
// (starting from 1).
};
@ -75,7 +73,7 @@ WEBP_NODISCARD static int ApplyDecoderOptions(
const WebPAnimDecoderOptions* const dec_options,
WebPAnimDecoder* const dec) {
WEBP_CSP_MODE mode;
WebPDecoderConfig* config = &dec->config;
WebPDecoderConfig* config = &dec->config_;
assert(dec_options != NULL);
mode = dec_options->color_mode;
@ -83,9 +81,9 @@ WEBP_NODISCARD static int ApplyDecoderOptions(
mode != MODE_rgbA && mode != MODE_bgrA) {
return 0;
}
dec->blend_func = (mode == MODE_RGBA || mode == MODE_BGRA)
? &BlendPixelRowNonPremult
: &BlendPixelRowPremult;
dec->blend_func_ = (mode == MODE_RGBA || mode == MODE_BGRA)
? &BlendPixelRowNonPremult
: &BlendPixelRowPremult;
if (!WebPInitDecoderConfig(config)) {
return 0;
}
@ -125,22 +123,22 @@ WebPAnimDecoder* WebPAnimDecoderNewInternal(
}
if (!ApplyDecoderOptions(&options, dec)) goto Error;
dec->demux = WebPDemux(webp_data);
if (dec->demux == NULL) goto Error;
dec->demux_ = WebPDemux(webp_data);
if (dec->demux_ == NULL) goto Error;
dec->info.canvas_width = WebPDemuxGetI(dec->demux, WEBP_FF_CANVAS_WIDTH);
dec->info.canvas_height = WebPDemuxGetI(dec->demux, WEBP_FF_CANVAS_HEIGHT);
dec->info.loop_count = WebPDemuxGetI(dec->demux, WEBP_FF_LOOP_COUNT);
dec->info.bgcolor = WebPDemuxGetI(dec->demux, WEBP_FF_BACKGROUND_COLOR);
dec->info.frame_count = WebPDemuxGetI(dec->demux, WEBP_FF_FRAME_COUNT);
dec->info_.canvas_width = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_WIDTH);
dec->info_.canvas_height = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_HEIGHT);
dec->info_.loop_count = WebPDemuxGetI(dec->demux_, WEBP_FF_LOOP_COUNT);
dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR);
dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT);
// Note: calloc() because we fill frame with zeroes as well.
dec->curr_frame = (uint8_t*)WebPSafeCalloc(
dec->info.canvas_width * NUM_CHANNELS, dec->info.canvas_height);
if (dec->curr_frame == NULL) goto Error;
dec->prev_frame_disposed = (uint8_t*)WebPSafeCalloc(
dec->info.canvas_width * NUM_CHANNELS, dec->info.canvas_height);
if (dec->prev_frame_disposed == NULL) goto Error;
dec->curr_frame_ = (uint8_t*)WebPSafeCalloc(
dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
if (dec->curr_frame_ == NULL) goto Error;
dec->prev_frame_disposed_ = (uint8_t*)WebPSafeCalloc(
dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
if (dec->prev_frame_disposed_ == NULL) goto Error;
WebPAnimDecoderReset(dec);
return dec;
@ -152,7 +150,7 @@ WebPAnimDecoder* WebPAnimDecoderNewInternal(
int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) {
if (dec == NULL || info == NULL) return 0;
*info = dec->info;
*info = dec->info_;
return 1;
}
@ -340,25 +338,25 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
if (dec == NULL || buf_ptr == NULL || timestamp_ptr == NULL) return 0;
if (!WebPAnimDecoderHasMoreFrames(dec)) return 0;
width = dec->info.canvas_width;
height = dec->info.canvas_height;
blend_row = dec->blend_func;
width = dec->info_.canvas_width;
height = dec->info_.canvas_height;
blend_row = dec->blend_func_;
// Get compressed frame.
if (!WebPDemuxGetFrame(dec->demux, dec->next_frame, &iter)) {
if (!WebPDemuxGetFrame(dec->demux_, dec->next_frame_, &iter)) {
return 0;
}
timestamp = dec->prev_frame_timestamp + iter.duration;
timestamp = dec->prev_frame_timestamp_ + iter.duration;
// Initialize.
is_key_frame = IsKeyFrame(&iter, &dec->prev_iter,
dec->prev_frame_was_keyframe, width, height);
is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_,
dec->prev_frame_was_keyframe_, width, height);
if (is_key_frame) {
if (!ZeroFillCanvas(dec->curr_frame, width, height)) {
if (!ZeroFillCanvas(dec->curr_frame_, width, height)) {
goto Error;
}
} else {
if (!CopyCanvas(dec->prev_frame_disposed, dec->curr_frame,
if (!CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_,
width, height)) {
goto Error;
}
@ -372,12 +370,12 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
const uint64_t out_offset = (uint64_t)iter.y_offset * stride +
(uint64_t)iter.x_offset * NUM_CHANNELS; // 53b
const uint64_t size = (uint64_t)iter.height * stride; // at most 25 + 27b
WebPDecoderConfig* const config = &dec->config;
WebPDecoderConfig* const config = &dec->config_;
WebPRGBABuffer* const buf = &config->output.u.RGBA;
if ((size_t)size != size) goto Error;
buf->stride = (int)stride;
buf->size = (size_t)size;
buf->rgba = dec->curr_frame + out_offset;
buf->rgba = dec->curr_frame_ + out_offset;
if (WebPDecode(in, in_size, config) != VP8_STATUS_OK) {
goto Error;
@ -390,18 +388,18 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
// that pixel in the previous frame if blending method of is WEBP_MUX_BLEND.
if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND &&
!is_key_frame) {
if (dec->prev_iter.dispose_method == WEBP_MUX_DISPOSE_NONE) {
if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_NONE) {
int y;
// Blend transparent pixels with pixels in previous canvas.
for (y = 0; y < iter.height; ++y) {
const size_t offset =
(iter.y_offset + y) * width + iter.x_offset;
blend_row((uint32_t*)dec->curr_frame + offset,
(uint32_t*)dec->prev_frame_disposed + offset, iter.width);
blend_row((uint32_t*)dec->curr_frame_ + offset,
(uint32_t*)dec->prev_frame_disposed_ + offset, iter.width);
}
} else {
int y;
assert(dec->prev_iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND);
assert(dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND);
// We need to blend a transparent pixel with its value just after
// initialization. That is, blend it with:
// * Fully transparent pixel if it belongs to prevRect <-- No-op.
@ -409,39 +407,39 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
for (y = 0; y < iter.height; ++y) {
const int canvas_y = iter.y_offset + y;
int left1, width1, left2, width2;
FindBlendRangeAtRow(&iter, &dec->prev_iter, canvas_y, &left1, &width1,
FindBlendRangeAtRow(&iter, &dec->prev_iter_, canvas_y, &left1, &width1,
&left2, &width2);
if (width1 > 0) {
const size_t offset1 = canvas_y * width + left1;
blend_row((uint32_t*)dec->curr_frame + offset1,
(uint32_t*)dec->prev_frame_disposed + offset1, width1);
blend_row((uint32_t*)dec->curr_frame_ + offset1,
(uint32_t*)dec->prev_frame_disposed_ + offset1, width1);
}
if (width2 > 0) {
const size_t offset2 = canvas_y * width + left2;
blend_row((uint32_t*)dec->curr_frame + offset2,
(uint32_t*)dec->prev_frame_disposed + offset2, width2);
blend_row((uint32_t*)dec->curr_frame_ + offset2,
(uint32_t*)dec->prev_frame_disposed_ + offset2, width2);
}
}
}
}
// Update info of the previous frame and dispose it for the next iteration.
dec->prev_frame_timestamp = timestamp;
WebPDemuxReleaseIterator(&dec->prev_iter);
dec->prev_iter = iter;
dec->prev_frame_was_keyframe = is_key_frame;
if (!CopyCanvas(dec->curr_frame, dec->prev_frame_disposed, width, height)) {
dec->prev_frame_timestamp_ = timestamp;
WebPDemuxReleaseIterator(&dec->prev_iter_);
dec->prev_iter_ = iter;
dec->prev_frame_was_keyframe_ = is_key_frame;
if (!CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height)) {
goto Error;
}
if (dec->prev_iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
ZeroFillFrameRect(dec->prev_frame_disposed, width * NUM_CHANNELS,
dec->prev_iter.x_offset, dec->prev_iter.y_offset,
dec->prev_iter.width, dec->prev_iter.height);
if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
ZeroFillFrameRect(dec->prev_frame_disposed_, width * NUM_CHANNELS,
dec->prev_iter_.x_offset, dec->prev_iter_.y_offset,
dec->prev_iter_.width, dec->prev_iter_.height);
}
++dec->next_frame;
++dec->next_frame_;
// All OK, fill in the values.
*buf_ptr = dec->curr_frame;
*buf_ptr = dec->curr_frame_;
*timestamp_ptr = timestamp;
return 1;
@ -452,30 +450,30 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) {
if (dec == NULL) return 0;
return (dec->next_frame <= (int)dec->info.frame_count);
return (dec->next_frame_ <= (int)dec->info_.frame_count);
}
void WebPAnimDecoderReset(WebPAnimDecoder* dec) {
if (dec != NULL) {
dec->prev_frame_timestamp = 0;
WebPDemuxReleaseIterator(&dec->prev_iter);
memset(&dec->prev_iter, 0, sizeof(dec->prev_iter));
dec->prev_frame_was_keyframe = 0;
dec->next_frame = 1;
dec->prev_frame_timestamp_ = 0;
WebPDemuxReleaseIterator(&dec->prev_iter_);
memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_));
dec->prev_frame_was_keyframe_ = 0;
dec->next_frame_ = 1;
}
}
const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) {
if (dec == NULL) return NULL;
return dec->demux;
return dec->demux_;
}
void WebPAnimDecoderDelete(WebPAnimDecoder* dec) {
if (dec != NULL) {
WebPDemuxReleaseIterator(&dec->prev_iter);
WebPDemuxDelete(dec->demux);
WebPSafeFree(dec->curr_frame);
WebPSafeFree(dec->prev_frame_disposed);
WebPDemuxReleaseIterator(&dec->prev_iter_);
WebPDemuxDelete(dec->demux_);
WebPSafeFree(dec->curr_frame_);
WebPSafeFree(dec->prev_frame_disposed_);
WebPSafeFree(dec);
}
}

View File

@ -22,58 +22,55 @@
#include "src/webp/decode.h" // WebPGetFeatures
#include "src/webp/demux.h"
#include "src/webp/format_constants.h"
#include "src/webp/mux.h"
#include "src/webp/mux_types.h"
#include "src/webp/types.h"
#define DMUX_MAJ_VERSION 1
#define DMUX_MIN_VERSION 6
#define DMUX_MIN_VERSION 4
#define DMUX_REV_VERSION 0
typedef struct {
size_t start; // start location of the data
size_t end; // end location
size_t riff_end; // riff chunk end location, can be > end.
size_t buf_size; // size of the buffer
const uint8_t* buf;
size_t start_; // start location of the data
size_t end_; // end location
size_t riff_end_; // riff chunk end location, can be > end_.
size_t buf_size_; // size of the buffer
const uint8_t* buf_;
} MemBuffer;
typedef struct {
size_t offset;
size_t size;
size_t offset_;
size_t size_;
} ChunkData;
typedef struct Frame {
int x_offset, y_offset;
int width, height;
int has_alpha;
int duration;
WebPMuxAnimDispose dispose_method;
WebPMuxAnimBlend blend_method;
int frame_num;
int complete; // img_components contains a full image.
ChunkData img_components[2]; // 0=VP8{,L} 1=ALPH
struct Frame* next;
int x_offset_, y_offset_;
int width_, height_;
int has_alpha_;
int duration_;
WebPMuxAnimDispose dispose_method_;
WebPMuxAnimBlend blend_method_;
int frame_num_;
int complete_; // img_components_ contains a full image.
ChunkData img_components_[2]; // 0=VP8{,L} 1=ALPH
struct Frame* next_;
} Frame;
typedef struct Chunk {
ChunkData data;
struct Chunk* next;
ChunkData data_;
struct Chunk* next_;
} Chunk;
struct WebPDemuxer {
MemBuffer mem;
WebPDemuxState state;
int is_ext_format;
uint32_t feature_flags;
int canvas_width, canvas_height;
int loop_count;
uint32_t bgcolor;
int num_frames;
Frame* frames;
Frame** frames_tail;
Chunk* chunks; // non-image chunks
Chunk** chunks_tail;
MemBuffer mem_;
WebPDemuxState state_;
int is_ext_format_;
uint32_t feature_flags_;
int canvas_width_, canvas_height_;
int loop_count_;
uint32_t bgcolor_;
int num_frames_;
Frame* frames_;
Frame** frames_tail_;
Chunk* chunks_; // non-image chunks
Chunk** chunks_tail_;
};
typedef enum {
@ -111,10 +108,10 @@ int WebPGetDemuxVersion(void) {
static int RemapMemBuffer(MemBuffer* const mem,
const uint8_t* data, size_t size) {
if (size < mem->buf_size) return 0; // can't remap to a shorter buffer!
if (size < mem->buf_size_) return 0; // can't remap to a shorter buffer!
mem->buf = data;
mem->end = mem->buf_size = size;
mem->buf_ = data;
mem->end_ = mem->buf_size_ = size;
return 1;
}
@ -126,49 +123,49 @@ static int InitMemBuffer(MemBuffer* const mem,
// Return the remaining data size available in 'mem'.
static WEBP_INLINE size_t MemDataSize(const MemBuffer* const mem) {
return (mem->end - mem->start);
return (mem->end_ - mem->start_);
}
// Return true if 'size' exceeds the end of the RIFF chunk.
static WEBP_INLINE int SizeIsInvalid(const MemBuffer* const mem, size_t size) {
return (size > mem->riff_end - mem->start);
return (size > mem->riff_end_ - mem->start_);
}
static WEBP_INLINE void Skip(MemBuffer* const mem, size_t size) {
mem->start += size;
mem->start_ += size;
}
static WEBP_INLINE void Rewind(MemBuffer* const mem, size_t size) {
mem->start -= size;
mem->start_ -= size;
}
static WEBP_INLINE const uint8_t* GetBuffer(MemBuffer* const mem) {
return mem->buf + mem->start;
return mem->buf_ + mem->start_;
}
// Read from 'mem' and skip the read bytes.
static WEBP_INLINE uint8_t ReadByte(MemBuffer* const mem) {
const uint8_t byte = mem->buf[mem->start];
const uint8_t byte = mem->buf_[mem->start_];
Skip(mem, 1);
return byte;
}
static WEBP_INLINE int ReadLE16s(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start;
const uint8_t* const data = mem->buf_ + mem->start_;
const int val = GetLE16(data);
Skip(mem, 2);
return val;
}
static WEBP_INLINE int ReadLE24s(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start;
const uint8_t* const data = mem->buf_ + mem->start_;
const int val = GetLE24(data);
Skip(mem, 3);
return val;
}
static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start;
const uint8_t* const data = mem->buf_ + mem->start_;
const uint32_t val = GetLE32(data);
Skip(mem, 4);
return val;
@ -178,20 +175,20 @@ static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) {
// Secondary chunk parsing
static void AddChunk(WebPDemuxer* const dmux, Chunk* const chunk) {
*dmux->chunks_tail = chunk;
chunk->next = NULL;
dmux->chunks_tail = &chunk->next;
*dmux->chunks_tail_ = chunk;
chunk->next_ = NULL;
dmux->chunks_tail_ = &chunk->next_;
}
// Add a frame to the end of the list, ensuring the last frame is complete.
// Returns true on success, false otherwise.
static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) {
const Frame* const last_frame = *dmux->frames_tail;
if (last_frame != NULL && !last_frame->complete) return 0;
const Frame* const last_frame = *dmux->frames_tail_;
if (last_frame != NULL && !last_frame->complete_) return 0;
*dmux->frames_tail = frame;
frame->next = NULL;
dmux->frames_tail = &frame->next;
*dmux->frames_tail_ = frame;
frame->next_ = NULL;
dmux->frames_tail_ = &frame->next_;
return 1;
}
@ -199,13 +196,13 @@ static void SetFrameInfo(size_t start_offset, size_t size,
int frame_num, int complete,
const WebPBitstreamFeatures* const features,
Frame* const frame) {
frame->img_components[0].offset = start_offset;
frame->img_components[0].size = size;
frame->width = features->width;
frame->height = features->height;
frame->has_alpha |= features->has_alpha;
frame->frame_num = frame_num;
frame->complete = complete;
frame->img_components_[0].offset_ = start_offset;
frame->img_components_[0].size_ = size;
frame->width_ = features->width;
frame->height_ = features->height;
frame->has_alpha_ |= features->has_alpha;
frame->frame_num_ = frame_num;
frame->complete_ = complete;
}
// Store image bearing chunks to 'frame'. 'min_size' is an optional size
@ -221,7 +218,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
if (done) return PARSE_NEED_MORE_DATA;
do {
const size_t chunk_start_offset = mem->start;
const size_t chunk_start_offset = mem->start_;
const uint32_t fourcc = ReadLE32(mem);
const uint32_t payload_size = ReadLE32(mem);
uint32_t payload_size_padded;
@ -241,10 +238,10 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
case MKFOURCC('A', 'L', 'P', 'H'):
if (alpha_chunks == 0) {
++alpha_chunks;
frame->img_components[1].offset = chunk_start_offset;
frame->img_components[1].size = chunk_size;
frame->has_alpha = 1;
frame->frame_num = frame_num;
frame->img_components_[1].offset_ = chunk_start_offset;
frame->img_components_[1].size_ = chunk_size;
frame->has_alpha_ = 1;
frame->frame_num_ = frame_num;
Skip(mem, payload_available);
} else {
goto Done;
@ -259,7 +256,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
// is incomplete.
WebPBitstreamFeatures features;
const VP8StatusCode vp8_status =
WebPGetFeatures(mem->buf + chunk_start_offset, chunk_size,
WebPGetFeatures(mem->buf_ + chunk_start_offset, chunk_size,
&features);
if (status == PARSE_NEED_MORE_DATA &&
vp8_status == VP8_STATUS_NOT_ENOUGH_DATA) {
@ -284,7 +281,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
break;
}
if (mem->start == mem->riff_end) {
if (mem->start_ == mem->riff_end_) {
done = 1;
} else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) {
status = PARSE_NEED_MORE_DATA;
@ -313,42 +310,42 @@ static ParseStatus NewFrame(const MemBuffer* const mem,
// 'frame_chunk_size' is the previously validated, padded chunk size.
static ParseStatus ParseAnimationFrame(
WebPDemuxer* const dmux, uint32_t frame_chunk_size) {
const int is_animation = !!(dmux->feature_flags & ANIMATION_FLAG);
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const uint32_t anmf_payload_size = frame_chunk_size - ANMF_CHUNK_SIZE;
int added_frame = 0;
int bits;
MemBuffer* const mem = &dmux->mem;
MemBuffer* const mem = &dmux->mem_;
Frame* frame;
size_t start_offset;
ParseStatus status =
NewFrame(mem, ANMF_CHUNK_SIZE, frame_chunk_size, &frame);
if (status != PARSE_OK) return status;
frame->x_offset = 2 * ReadLE24s(mem);
frame->y_offset = 2 * ReadLE24s(mem);
frame->width = 1 + ReadLE24s(mem);
frame->height = 1 + ReadLE24s(mem);
frame->duration = ReadLE24s(mem);
frame->x_offset_ = 2 * ReadLE24s(mem);
frame->y_offset_ = 2 * ReadLE24s(mem);
frame->width_ = 1 + ReadLE24s(mem);
frame->height_ = 1 + ReadLE24s(mem);
frame->duration_ = ReadLE24s(mem);
bits = ReadByte(mem);
frame->dispose_method =
frame->dispose_method_ =
(bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE;
frame->blend_method = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND;
if (frame->width * (uint64_t)frame->height >= MAX_IMAGE_AREA) {
frame->blend_method_ = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND;
if (frame->width_ * (uint64_t)frame->height_ >= MAX_IMAGE_AREA) {
WebPSafeFree(frame);
return PARSE_ERROR;
}
// Store a frame only if the animation flag is set there is some data for
// this frame is available.
start_offset = mem->start;
status = StoreFrame(dmux->num_frames + 1, anmf_payload_size, mem, frame);
if (status != PARSE_ERROR && mem->start - start_offset > anmf_payload_size) {
start_offset = mem->start_;
status = StoreFrame(dmux->num_frames_ + 1, anmf_payload_size, mem, frame);
if (status != PARSE_ERROR && mem->start_ - start_offset > anmf_payload_size) {
status = PARSE_ERROR;
}
if (status != PARSE_ERROR && is_animation && frame->frame_num > 0) {
if (status != PARSE_ERROR && is_animation && frame->frame_num_ > 0) {
added_frame = AddFrame(dmux, frame);
if (added_frame) {
++dmux->num_frames;
++dmux->num_frames_;
} else {
status = PARSE_ERROR;
}
@ -367,8 +364,8 @@ static int StoreChunk(WebPDemuxer* const dmux,
Chunk* const chunk = (Chunk*)WebPSafeCalloc(1ULL, sizeof(*chunk));
if (chunk == NULL) return 0;
chunk->data.offset = start_offset;
chunk->data.size = size;
chunk->data_.offset_ = start_offset;
chunk->data_.size_ = size;
AddChunk(dmux, chunk);
return 1;
}
@ -392,9 +389,9 @@ static ParseStatus ReadHeader(MemBuffer* const mem) {
if (riff_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
// There's no point in reading past the end of the RIFF chunk
mem->riff_end = riff_size + CHUNK_HEADER_SIZE;
if (mem->buf_size > mem->riff_end) {
mem->buf_size = mem->end = mem->riff_end;
mem->riff_end_ = riff_size + CHUNK_HEADER_SIZE;
if (mem->buf_size_ > mem->riff_end_) {
mem->buf_size_ = mem->end_ = mem->riff_end_;
}
Skip(mem, RIFF_HEADER_SIZE);
@ -403,12 +400,12 @@ static ParseStatus ReadHeader(MemBuffer* const mem) {
static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
const size_t min_size = CHUNK_HEADER_SIZE;
MemBuffer* const mem = &dmux->mem;
MemBuffer* const mem = &dmux->mem_;
Frame* frame;
ParseStatus status;
int image_added = 0;
if (dmux->frames != NULL) return PARSE_ERROR;
if (dmux->frames_ != NULL) return PARSE_ERROR;
if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR;
if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA;
@ -417,29 +414,29 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
// For the single image case we allow parsing of a partial frame, so no
// minimum size is imposed here.
status = StoreFrame(1, 0, &dmux->mem, frame);
status = StoreFrame(1, 0, &dmux->mem_, frame);
if (status != PARSE_ERROR) {
const int has_alpha = !!(dmux->feature_flags & ALPHA_FLAG);
const int has_alpha = !!(dmux->feature_flags_ & ALPHA_FLAG);
// Clear any alpha when the alpha flag is missing.
if (!has_alpha && frame->img_components[1].size > 0) {
frame->img_components[1].offset = 0;
frame->img_components[1].size = 0;
frame->has_alpha = 0;
if (!has_alpha && frame->img_components_[1].size_ > 0) {
frame->img_components_[1].offset_ = 0;
frame->img_components_[1].size_ = 0;
frame->has_alpha_ = 0;
}
// Use the frame width/height as the canvas values for non-vp8x files.
// Also, set ALPHA_FLAG if this is a lossless image with alpha.
if (!dmux->is_ext_format && frame->width > 0 && frame->height > 0) {
dmux->state = WEBP_DEMUX_PARSED_HEADER;
dmux->canvas_width = frame->width;
dmux->canvas_height = frame->height;
dmux->feature_flags |= frame->has_alpha ? ALPHA_FLAG : 0;
if (!dmux->is_ext_format_ && frame->width_ > 0 && frame->height_ > 0) {
dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
dmux->canvas_width_ = frame->width_;
dmux->canvas_height_ = frame->height_;
dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0;
}
if (!AddFrame(dmux, frame)) {
status = PARSE_ERROR; // last frame was left incomplete
} else {
image_added = 1;
dmux->num_frames = 1;
dmux->num_frames_ = 1;
}
}
@ -448,14 +445,14 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
}
static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
const int is_animation = !!(dmux->feature_flags & ANIMATION_FLAG);
MemBuffer* const mem = &dmux->mem;
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
MemBuffer* const mem = &dmux->mem_;
int anim_chunks = 0;
ParseStatus status = PARSE_OK;
do {
int store_chunk = 1;
const size_t chunk_start_offset = mem->start;
const size_t chunk_start_offset = mem->start_;
const uint32_t fourcc = ReadLE32(mem);
const uint32_t chunk_size = ReadLE32(mem);
uint32_t chunk_size_padded;
@ -486,8 +483,8 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
status = PARSE_NEED_MORE_DATA;
} else if (anim_chunks == 0) {
++anim_chunks;
dmux->bgcolor = ReadLE32(mem);
dmux->loop_count = ReadLE16s(mem);
dmux->bgcolor_ = ReadLE32(mem);
dmux->loop_count_ = ReadLE16s(mem);
Skip(mem, chunk_size_padded - ANIM_CHUNK_SIZE);
} else {
store_chunk = 0;
@ -501,15 +498,15 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
break;
}
case MKFOURCC('I', 'C', 'C', 'P'): {
store_chunk = !!(dmux->feature_flags & ICCP_FLAG);
store_chunk = !!(dmux->feature_flags_ & ICCP_FLAG);
goto Skip;
}
case MKFOURCC('E', 'X', 'I', 'F'): {
store_chunk = !!(dmux->feature_flags & EXIF_FLAG);
store_chunk = !!(dmux->feature_flags_ & EXIF_FLAG);
goto Skip;
}
case MKFOURCC('X', 'M', 'P', ' '): {
store_chunk = !!(dmux->feature_flags & XMP_FLAG);
store_chunk = !!(dmux->feature_flags_ & XMP_FLAG);
goto Skip;
}
Skip:
@ -530,7 +527,7 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
}
}
if (mem->start == mem->riff_end) {
if (mem->start_ == mem->riff_end_) {
break;
} else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) {
status = PARSE_NEED_MORE_DATA;
@ -541,12 +538,12 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
}
static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
MemBuffer* const mem = &dmux->mem;
MemBuffer* const mem = &dmux->mem_;
uint32_t vp8x_size;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
dmux->is_ext_format = 1;
dmux->is_ext_format_ = 1;
Skip(mem, TAG_SIZE); // VP8X
vp8x_size = ReadLE32(mem);
if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
@ -555,15 +552,15 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR;
if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA;
dmux->feature_flags = ReadByte(mem);
dmux->feature_flags_ = ReadByte(mem);
Skip(mem, 3); // Reserved.
dmux->canvas_width = 1 + ReadLE24s(mem);
dmux->canvas_height = 1 + ReadLE24s(mem);
if (dmux->canvas_width * (uint64_t)dmux->canvas_height >= MAX_IMAGE_AREA) {
dmux->canvas_width_ = 1 + ReadLE24s(mem);
dmux->canvas_height_ = 1 + ReadLE24s(mem);
if (dmux->canvas_width_ * (uint64_t)dmux->canvas_height_ >= MAX_IMAGE_AREA) {
return PARSE_ERROR; // image final dimension is too large
}
Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data.
dmux->state = WEBP_DEMUX_PARSED_HEADER;
dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
@ -575,13 +572,13 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
// Format validation
static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
const Frame* const frame = dmux->frames;
if (dmux->state == WEBP_DEMUX_PARSING_HEADER) return 1;
const Frame* const frame = dmux->frames_;
if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
if (dmux->canvas_width <= 0 || dmux->canvas_height <= 0) return 0;
if (dmux->state == WEBP_DEMUX_DONE && frame == NULL) return 0;
if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
if (dmux->state_ == WEBP_DEMUX_DONE && frame == NULL) return 0;
if (frame->width <= 0 || frame->height <= 0) return 0;
if (frame->width_ <= 0 || frame->height_ <= 0) return 0;
return 1;
}
@ -590,65 +587,65 @@ static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
static int CheckFrameBounds(const Frame* const frame, int exact,
int canvas_width, int canvas_height) {
if (exact) {
if (frame->x_offset != 0 || frame->y_offset != 0) {
if (frame->x_offset_ != 0 || frame->y_offset_ != 0) {
return 0;
}
if (frame->width != canvas_width || frame->height != canvas_height) {
if (frame->width_ != canvas_width || frame->height_ != canvas_height) {
return 0;
}
} else {
if (frame->x_offset < 0 || frame->y_offset < 0) return 0;
if (frame->width + frame->x_offset > canvas_width) return 0;
if (frame->height + frame->y_offset > canvas_height) return 0;
if (frame->x_offset_ < 0 || frame->y_offset_ < 0) return 0;
if (frame->width_ + frame->x_offset_ > canvas_width) return 0;
if (frame->height_ + frame->y_offset_ > canvas_height) return 0;
}
return 1;
}
static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
const int is_animation = !!(dmux->feature_flags & ANIMATION_FLAG);
const Frame* f = dmux->frames;
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const Frame* f = dmux->frames_;
if (dmux->state == WEBP_DEMUX_PARSING_HEADER) return 1;
if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
if (dmux->canvas_width <= 0 || dmux->canvas_height <= 0) return 0;
if (dmux->loop_count < 0) return 0;
if (dmux->state == WEBP_DEMUX_DONE && dmux->frames == NULL) return 0;
if (dmux->feature_flags & ~ALL_VALID_FLAGS) return 0; // invalid bitstream
if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
if (dmux->loop_count_ < 0) return 0;
if (dmux->state_ == WEBP_DEMUX_DONE && dmux->frames_ == NULL) return 0;
if (dmux->feature_flags_ & ~ALL_VALID_FLAGS) return 0; // invalid bitstream
while (f != NULL) {
const int cur_frame_set = f->frame_num;
const int cur_frame_set = f->frame_num_;
// Check frame properties.
for (; f != NULL && f->frame_num == cur_frame_set; f = f->next) {
const ChunkData* const image = f->img_components;
const ChunkData* const alpha = f->img_components + 1;
for (; f != NULL && f->frame_num_ == cur_frame_set; f = f->next_) {
const ChunkData* const image = f->img_components_;
const ChunkData* const alpha = f->img_components_ + 1;
if (!is_animation && f->frame_num > 1) return 0;
if (!is_animation && f->frame_num_ > 1) return 0;
if (f->complete) {
if (alpha->size == 0 && image->size == 0) return 0;
if (f->complete_) {
if (alpha->size_ == 0 && image->size_ == 0) return 0;
// Ensure alpha precedes image bitstream.
if (alpha->size > 0 && alpha->offset > image->offset) {
if (alpha->size_ > 0 && alpha->offset_ > image->offset_) {
return 0;
}
if (f->width <= 0 || f->height <= 0) return 0;
if (f->width_ <= 0 || f->height_ <= 0) return 0;
} else {
// There shouldn't be a partial frame in a complete file.
if (dmux->state == WEBP_DEMUX_DONE) return 0;
if (dmux->state_ == WEBP_DEMUX_DONE) return 0;
// Ensure alpha precedes image bitstream.
if (alpha->size > 0 && image->size > 0 &&
alpha->offset > image->offset) {
if (alpha->size_ > 0 && image->size_ > 0 &&
alpha->offset_ > image->offset_) {
return 0;
}
// There shouldn't be any frames after an incomplete one.
if (f->next != NULL) return 0;
if (f->next_ != NULL) return 0;
}
if (f->width > 0 && f->height > 0 &&
if (f->width_ > 0 && f->height_ > 0 &&
!CheckFrameBounds(f, !is_animation,
dmux->canvas_width, dmux->canvas_height)) {
dmux->canvas_width_, dmux->canvas_height_)) {
return 0;
}
}
@ -660,21 +657,21 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
// WebPDemuxer object
static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) {
dmux->state = WEBP_DEMUX_PARSING_HEADER;
dmux->loop_count = 1;
dmux->bgcolor = 0xFFFFFFFF; // White background by default.
dmux->canvas_width = -1;
dmux->canvas_height = -1;
dmux->frames_tail = &dmux->frames;
dmux->chunks_tail = &dmux->chunks;
dmux->mem = *mem;
dmux->state_ = WEBP_DEMUX_PARSING_HEADER;
dmux->loop_count_ = 1;
dmux->bgcolor_ = 0xFFFFFFFF; // White background by default.
dmux->canvas_width_ = -1;
dmux->canvas_height_ = -1;
dmux->frames_tail_ = &dmux->frames_;
dmux->chunks_tail_ = &dmux->chunks_;
dmux->mem_ = *mem;
}
static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem,
WebPDemuxer** demuxer) {
WebPBitstreamFeatures features;
const VP8StatusCode status =
WebPGetFeatures(mem->buf, mem->buf_size, &features);
WebPGetFeatures(mem->buf_, mem->buf_size_, &features);
*demuxer = NULL;
if (status != VP8_STATUS_OK) {
return (status == VP8_STATUS_NOT_ENOUGH_DATA) ? PARSE_NEED_MORE_DATA
@ -686,14 +683,14 @@ static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem,
Frame* const frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(*frame));
if (dmux == NULL || frame == NULL) goto Error;
InitDemux(dmux, mem);
SetFrameInfo(0, mem->buf_size, 1 /*frame_num*/, 1 /*complete*/, &features,
SetFrameInfo(0, mem->buf_size_, 1 /*frame_num*/, 1 /*complete*/, &features,
frame);
if (!AddFrame(dmux, frame)) goto Error;
dmux->state = WEBP_DEMUX_DONE;
dmux->canvas_width = frame->width;
dmux->canvas_height = frame->height;
dmux->feature_flags |= frame->has_alpha ? ALPHA_FLAG : 0;
dmux->num_frames = 1;
dmux->state_ = WEBP_DEMUX_DONE;
dmux->canvas_width_ = frame->width_;
dmux->canvas_height_ = frame->height_;
dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0;
dmux->num_frames_ = 1;
assert(IsValidSimpleFormat(dmux));
*demuxer = dmux;
return PARSE_OK;
@ -737,7 +734,7 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
return NULL;
}
partial = (mem.buf_size < mem.riff_end);
partial = (mem.buf_size_ < mem.riff_end_);
if (!allow_partial && partial) return NULL;
dmux = (WebPDemuxer*)WebPSafeCalloc(1ULL, sizeof(*dmux));
@ -746,16 +743,16 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
status = PARSE_ERROR;
for (parser = kMasterChunks; parser->parse != NULL; ++parser) {
if (!memcmp(parser->id, GetBuffer(&dmux->mem), TAG_SIZE)) {
if (!memcmp(parser->id, GetBuffer(&dmux->mem_), TAG_SIZE)) {
status = parser->parse(dmux);
if (status == PARSE_OK) dmux->state = WEBP_DEMUX_DONE;
if (status == PARSE_OK) dmux->state_ = WEBP_DEMUX_DONE;
if (status == PARSE_NEED_MORE_DATA && !partial) status = PARSE_ERROR;
if (status != PARSE_ERROR && !parser->valid(dmux)) status = PARSE_ERROR;
if (status == PARSE_ERROR) dmux->state = WEBP_DEMUX_PARSE_ERROR;
if (status == PARSE_ERROR) dmux->state_ = WEBP_DEMUX_PARSE_ERROR;
break;
}
}
if (state != NULL) *state = dmux->state;
if (state != NULL) *state = dmux->state_;
if (status == PARSE_ERROR) {
WebPDemuxDelete(dmux);
@ -769,14 +766,14 @@ void WebPDemuxDelete(WebPDemuxer* dmux) {
Frame* f;
if (dmux == NULL) return;
for (f = dmux->frames; f != NULL;) {
for (f = dmux->frames_; f != NULL;) {
Frame* const cur_frame = f;
f = f->next;
f = f->next_;
WebPSafeFree(cur_frame);
}
for (c = dmux->chunks; c != NULL;) {
for (c = dmux->chunks_; c != NULL;) {
Chunk* const cur_chunk = c;
c = c->next;
c = c->next_;
WebPSafeFree(cur_chunk);
}
WebPSafeFree(dmux);
@ -788,12 +785,12 @@ uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) {
if (dmux == NULL) return 0;
switch (feature) {
case WEBP_FF_FORMAT_FLAGS: return dmux->feature_flags;
case WEBP_FF_CANVAS_WIDTH: return (uint32_t)dmux->canvas_width;
case WEBP_FF_CANVAS_HEIGHT: return (uint32_t)dmux->canvas_height;
case WEBP_FF_LOOP_COUNT: return (uint32_t)dmux->loop_count;
case WEBP_FF_BACKGROUND_COLOR: return dmux->bgcolor;
case WEBP_FF_FRAME_COUNT: return (uint32_t)dmux->num_frames;
case WEBP_FF_FORMAT_FLAGS: return dmux->feature_flags_;
case WEBP_FF_CANVAS_WIDTH: return (uint32_t)dmux->canvas_width_;
case WEBP_FF_CANVAS_HEIGHT: return (uint32_t)dmux->canvas_height_;
case WEBP_FF_LOOP_COUNT: return (uint32_t)dmux->loop_count_;
case WEBP_FF_BACKGROUND_COLOR: return dmux->bgcolor_;
case WEBP_FF_FRAME_COUNT: return (uint32_t)dmux->num_frames_;
}
return 0;
}
@ -803,8 +800,8 @@ uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) {
static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) {
const Frame* f;
for (f = dmux->frames; f != NULL; f = f->next) {
if (frame_num == f->frame_num) break;
for (f = dmux->frames_; f != NULL; f = f->next_) {
if (frame_num == f->frame_num_) break;
}
return f;
}
@ -814,19 +811,19 @@ static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
size_t* const data_size) {
*data_size = 0;
if (frame != NULL) {
const ChunkData* const image = frame->img_components;
const ChunkData* const alpha = frame->img_components + 1;
size_t start_offset = image->offset;
*data_size = image->size;
const ChunkData* const image = frame->img_components_;
const ChunkData* const alpha = frame->img_components_ + 1;
size_t start_offset = image->offset_;
*data_size = image->size_;
// if alpha exists it precedes image, update the size allowing for
// intervening chunks.
if (alpha->size > 0) {
const size_t inter_size = (image->offset > 0)
? image->offset - (alpha->offset + alpha->size)
if (alpha->size_ > 0) {
const size_t inter_size = (image->offset_ > 0)
? image->offset_ - (alpha->offset_ + alpha->size_)
: 0;
start_offset = alpha->offset;
*data_size += alpha->size + inter_size;
start_offset = alpha->offset_;
*data_size += alpha->size_ + inter_size;
}
return mem_buf + start_offset;
}
@ -837,23 +834,23 @@ static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
static int SynthesizeFrame(const WebPDemuxer* const dmux,
const Frame* const frame,
WebPIterator* const iter) {
const uint8_t* const mem_buf = dmux->mem.buf;
const uint8_t* const mem_buf = dmux->mem_.buf_;
size_t payload_size = 0;
const uint8_t* const payload = GetFramePayload(mem_buf, frame, &payload_size);
if (payload == NULL) return 0;
assert(frame != NULL);
iter->frame_num = frame->frame_num;
iter->num_frames = dmux->num_frames;
iter->x_offset = frame->x_offset;
iter->y_offset = frame->y_offset;
iter->width = frame->width;
iter->height = frame->height;
iter->has_alpha = frame->has_alpha;
iter->duration = frame->duration;
iter->dispose_method = frame->dispose_method;
iter->blend_method = frame->blend_method;
iter->complete = frame->complete;
iter->frame_num = frame->frame_num_;
iter->num_frames = dmux->num_frames_;
iter->x_offset = frame->x_offset_;
iter->y_offset = frame->y_offset_;
iter->width = frame->width_;
iter->height = frame->height_;
iter->has_alpha = frame->has_alpha_;
iter->duration = frame->duration_;
iter->dispose_method = frame->dispose_method_;
iter->blend_method = frame->blend_method_;
iter->complete = frame->complete_;
iter->fragment.bytes = payload;
iter->fragment.size = payload_size;
return 1;
@ -863,8 +860,8 @@ static int SetFrame(int frame_num, WebPIterator* const iter) {
const Frame* frame;
const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_;
if (dmux == NULL || frame_num < 0) return 0;
if (frame_num > dmux->num_frames) return 0;
if (frame_num == 0) frame_num = dmux->num_frames;
if (frame_num > dmux->num_frames_) return 0;
if (frame_num == 0) frame_num = dmux->num_frames_;
frame = GetFrame(dmux, frame_num);
if (frame == NULL) return 0;
@ -899,11 +896,11 @@ void WebPDemuxReleaseIterator(WebPIterator* iter) {
// Chunk iteration
static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) {
const uint8_t* const mem_buf = dmux->mem.buf;
const uint8_t* const mem_buf = dmux->mem_.buf_;
const Chunk* c;
int count = 0;
for (c = dmux->chunks; c != NULL; c = c->next) {
const uint8_t* const header = mem_buf + c->data.offset;
for (c = dmux->chunks_; c != NULL; c = c->next_) {
const uint8_t* const header = mem_buf + c->data_.offset_;
if (!memcmp(header, fourcc, TAG_SIZE)) ++count;
}
return count;
@ -911,11 +908,11 @@ static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) {
static const Chunk* GetChunk(const WebPDemuxer* const dmux,
const char fourcc[4], int chunk_num) {
const uint8_t* const mem_buf = dmux->mem.buf;
const uint8_t* const mem_buf = dmux->mem_.buf_;
const Chunk* c;
int count = 0;
for (c = dmux->chunks; c != NULL; c = c->next) {
const uint8_t* const header = mem_buf + c->data.offset;
for (c = dmux->chunks_; c != NULL; c = c->next_) {
const uint8_t* const header = mem_buf + c->data_.offset_;
if (!memcmp(header, fourcc, TAG_SIZE)) ++count;
if (count == chunk_num) break;
}
@ -933,10 +930,10 @@ static int SetChunk(const char fourcc[4], int chunk_num,
if (chunk_num == 0) chunk_num = count;
if (chunk_num <= count) {
const uint8_t* const mem_buf = dmux->mem.buf;
const uint8_t* const mem_buf = dmux->mem_.buf_;
const Chunk* const chunk = GetChunk(dmux, fourcc, chunk_num);
iter->chunk.bytes = mem_buf + chunk->data.offset + CHUNK_HEADER_SIZE;
iter->chunk.size = chunk->data.size - CHUNK_HEADER_SIZE;
iter->chunk.bytes = mem_buf + chunk->data_.offset_ + CHUNK_HEADER_SIZE;
iter->chunk.size = chunk->data_.size_ - CHUNK_HEADER_SIZE;
iter->num_chunks = count;
iter->chunk_num = chunk_num;
return 1;
@ -975,3 +972,4 @@ int WebPDemuxPrevChunk(WebPChunkIterator* iter) {
void WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter) {
(void)iter;
}

View File

@ -6,8 +6,8 @@
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,6,0
PRODUCTVERSION 1,0,6,0
FILEVERSION 1,0,4,0
PRODUCTVERSION 1,0,4,0
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
@ -24,12 +24,12 @@ BEGIN
BEGIN
VALUE "CompanyName", "Google, Inc."
VALUE "FileDescription", "libwebpdemux DLL"
VALUE "FileVersion", "1.6.0"
VALUE "FileVersion", "1.4.0"
VALUE "InternalName", "libwebpdemux.dll"
VALUE "LegalCopyright", "Copyright (C) 2025"
VALUE "LegalCopyright", "Copyright (C) 2024"
VALUE "OriginalFilename", "libwebpdemux.dll"
VALUE "ProductName", "WebP Image Demuxer"
VALUE "ProductVersion", "1.6.0"
VALUE "ProductVersion", "1.4.0"
END
END
BLOCK "VarFileInfo"

View File

@ -5,8 +5,6 @@ noinst_LTLIBRARIES += libwebpdsp_sse2.la
noinst_LTLIBRARIES += libwebpdspdecode_sse2.la
noinst_LTLIBRARIES += libwebpdsp_sse41.la
noinst_LTLIBRARIES += libwebpdspdecode_sse41.la
noinst_LTLIBRARIES += libwebpdsp_avx2.la
noinst_LTLIBRARIES += libwebpdspdecode_avx2.la
noinst_LTLIBRARIES += libwebpdsp_neon.la
noinst_LTLIBRARIES += libwebpdspdecode_neon.la
noinst_LTLIBRARIES += libwebpdsp_msa.la
@ -46,11 +44,6 @@ ENC_SOURCES += lossless_enc.c
ENC_SOURCES += quant.h
ENC_SOURCES += ssim.c
libwebpdspdecode_avx2_la_SOURCES =
libwebpdspdecode_avx2_la_SOURCES += lossless_avx2.c
libwebpdspdecode_avx2_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdspdecode_avx2_la_CFLAGS = $(AM_CFLAGS) $(AVX2_FLAGS)
libwebpdspdecode_sse41_la_SOURCES =
libwebpdspdecode_sse41_la_SOURCES += alpha_processing_sse41.c
libwebpdspdecode_sse41_la_SOURCES += dec_sse41.c
@ -130,12 +123,6 @@ libwebpdsp_sse41_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_sse41_la_CFLAGS = $(AM_CFLAGS) $(SSE41_FLAGS)
libwebpdsp_sse41_la_LIBADD = libwebpdspdecode_sse41.la
libwebpdsp_avx2_la_SOURCES =
libwebpdsp_avx2_la_SOURCES += lossless_enc_avx2.c
libwebpdsp_avx2_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_avx2_la_CFLAGS = $(AM_CFLAGS) $(AVX2_FLAGS)
libwebpdsp_avx2_la_LIBADD = libwebpdspdecode_avx2.la
libwebpdsp_neon_la_SOURCES =
libwebpdsp_neon_la_SOURCES += cost_neon.c
libwebpdsp_neon_la_SOURCES += enc_neon.c
@ -180,7 +167,6 @@ libwebpdsp_la_LDFLAGS = -lm
libwebpdsp_la_LIBADD =
libwebpdsp_la_LIBADD += libwebpdsp_sse2.la
libwebpdsp_la_LIBADD += libwebpdsp_sse41.la
libwebpdsp_la_LIBADD += libwebpdsp_avx2.la
libwebpdsp_la_LIBADD += libwebpdsp_neon.la
libwebpdsp_la_LIBADD += libwebpdsp_msa.la
libwebpdsp_la_LIBADD += libwebpdsp_mips32.la
@ -194,7 +180,6 @@ if BUILD_LIBWEBPDECODER
libwebpdspdecode_la_LIBADD =
libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse2.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse41.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_avx2.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_neon.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_msa.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_mips32.la

View File

@ -12,11 +12,7 @@
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stddef.h>
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/webp/types.h"
// Tables can be faster on some platform but incur some extra binary size (~2k).
#if !defined(USE_TABLES_FOR_ALPHA_MULT)

View File

@ -16,9 +16,6 @@
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include "src/webp/types.h"
#include "src/dsp/cpu.h"
//------------------------------------------------------------------------------
static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
@ -29,44 +26,38 @@ static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
uint32_t alpha_and = 0xff;
int i, j;
const __m128i zero = _mm_setzero_si128();
const __m128i alpha_mask = _mm_set1_epi32((int)0xff); // to preserve A
const __m128i all_0xff = _mm_set1_epi8((char)0xff);
__m128i all_alphas16 = all_0xff;
__m128i all_alphas8 = all_0xff;
const __m128i rgb_mask = _mm_set1_epi32((int)0xffffff00); // to preserve RGB
const __m128i all_0xff = _mm_set_epi32(0, 0, ~0, ~0);
__m128i all_alphas = all_0xff;
// We must be able to access 3 extra bytes after the last written byte
// 'dst[4 * width - 4]', because we don't know if alpha is the first or the
// last byte of the quadruplet.
const int limit = (width - 1) & ~7;
for (j = 0; j < height; ++j) {
char* ptr = (char*)dst;
for (i = 0; i + 16 <= width - 1; i += 16) {
// load 16 alpha bytes
const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]);
const __m128i a1_lo = _mm_unpacklo_epi8(a0, zero);
const __m128i a1_hi = _mm_unpackhi_epi8(a0, zero);
const __m128i a2_lo_lo = _mm_unpacklo_epi16(a1_lo, zero);
const __m128i a2_lo_hi = _mm_unpackhi_epi16(a1_lo, zero);
const __m128i a2_hi_lo = _mm_unpacklo_epi16(a1_hi, zero);
const __m128i a2_hi_hi = _mm_unpackhi_epi16(a1_hi, zero);
_mm_maskmoveu_si128(a2_lo_lo, alpha_mask, ptr + 0);
_mm_maskmoveu_si128(a2_lo_hi, alpha_mask, ptr + 16);
_mm_maskmoveu_si128(a2_hi_lo, alpha_mask, ptr + 32);
_mm_maskmoveu_si128(a2_hi_hi, alpha_mask, ptr + 48);
// accumulate 16 alpha 'and' in parallel
all_alphas16 = _mm_and_si128(all_alphas16, a0);
ptr += 64;
}
if (i + 8 <= width - 1) {
__m128i* out = (__m128i*)dst;
for (i = 0; i < limit; i += 8) {
// load 8 alpha bytes
const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]);
const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);
const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);
_mm_maskmoveu_si128(a2_lo, alpha_mask, ptr);
_mm_maskmoveu_si128(a2_hi, alpha_mask, ptr + 16);
// accumulate 8 alpha 'and' in parallel
all_alphas8 = _mm_and_si128(all_alphas8, a0);
i += 8;
// load 8 dst pixels (32 bytes)
const __m128i b0_lo = _mm_loadu_si128(out + 0);
const __m128i b0_hi = _mm_loadu_si128(out + 1);
// mask dst alpha values
const __m128i b1_lo = _mm_and_si128(b0_lo, rgb_mask);
const __m128i b1_hi = _mm_and_si128(b0_hi, rgb_mask);
// combine
const __m128i b2_lo = _mm_or_si128(b1_lo, a2_lo);
const __m128i b2_hi = _mm_or_si128(b1_hi, a2_hi);
// store
_mm_storeu_si128(out + 0, b2_lo);
_mm_storeu_si128(out + 1, b2_hi);
// accumulate eight alpha 'and' in parallel
all_alphas = _mm_and_si128(all_alphas, a0);
out += 2;
}
for (; i < width; ++i) {
const uint32_t alpha_value = alpha[i];
@ -77,9 +68,8 @@ static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
dst += dst_stride;
}
// Combine the eight alpha 'and' into a 8-bit mask.
alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas8, all_0xff)) & 0xff;
return (alpha_and != 0xff ||
_mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas16, all_0xff)) != 0xffff);
alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));
return (alpha_and != 0xff);
}
static void DispatchAlphaToGreen_SSE2(const uint8_t* WEBP_RESTRICT alpha,

View File

@ -11,12 +11,10 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include "src/dsp/cpu.h"
#include "src/webp/types.h"
#include "src/dsp/dsp.h"
#if defined(WEBP_USE_SSE41)
#include <emmintrin.h>
#include <smmintrin.h>
//------------------------------------------------------------------------------

View File

@ -9,15 +9,8 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stddef.h>
#include <stdlib.h>
#include "src/dsp/cpu.h"
#include "src/webp/types.h"
#include "src/dsp/dsp.h"
#include "src/enc/cost_enc.h"
#include "src/enc/vp8i_enc.h"
//------------------------------------------------------------------------------
// Boolean-cost cost table

View File

@ -16,10 +16,6 @@
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include <assert.h>
#include "src/webp/types.h"
#include "src/dsp/cpu.h"
#include "src/enc/cost_enc.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/utils.h"

View File

@ -22,10 +22,6 @@
#include <cpu-features.h>
#endif
#include <stddef.h>
#include "src/webp/types.h"
//------------------------------------------------------------------------------
// SSE2 detection.
//

View File

@ -56,11 +56,6 @@
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE41 // Visual C++ SSE4.1 targets
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1700 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_AVX2 // Visual C++ AVX2 targets
#endif
#endif
// WEBP_HAVE_* are used to indicate the presence of the instruction set in dsp
@ -85,16 +80,6 @@
#define WEBP_HAVE_SSE41
#endif
#if (defined(__AVX2__) || defined(WEBP_MSC_AVX2)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_AVX2))
#define WEBP_USE_AVX2
#endif
#if defined(WEBP_USE_AVX2) && !defined(WEBP_HAVE_AVX2)
#define WEBP_HAVE_AVX2
#endif
#undef WEBP_MSC_AVX2
#undef WEBP_MSC_SSE41
#undef WEBP_MSC_SSE2

View File

@ -12,15 +12,10 @@
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stddef.h>
#include <string.h>
#include "src/dec/common_dec.h"
#include "src/dec/vp8i_dec.h"
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/dec/vp8i_dec.h"
#include "src/utils/utils.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------

View File

@ -11,8 +11,6 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include "src/dsp/cpu.h"
#include "src/webp/types.h"
#include "src/dsp/dsp.h"
// define to 0 to have run-time table initialization

View File

@ -23,12 +23,9 @@
#endif
#include <emmintrin.h>
#include "src/dec/vp8i_dec.h"
#include "src/dsp/common_sse2.h"
#include "src/dsp/cpu.h"
#include "src/dec/vp8i_dec.h"
#include "src/utils/utils.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)

View File

@ -14,12 +14,9 @@
#include "src/dsp/dsp.h"
#if defined(WEBP_USE_SSE41)
#include <emmintrin.h>
#include <smmintrin.h>
#include "src/webp/types.h"
#include <smmintrin.h>
#include "src/dec/vp8i_dec.h"
#include "src/dsp/cpu.h"
#include "src/utils/utils.h"
static void HE16_SSE41(uint8_t* dst) { // horizontal

View File

@ -13,13 +13,9 @@
#include <assert.h>
#include <stdlib.h> // for abs()
#include <string.h>
#include "src/dsp/cpu.h"
#include "src/dsp/dsp.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/utils.h"
#include "src/webp/types.h"
static WEBP_INLINE uint8_t clip_8b(int v) {
return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
@ -692,11 +688,11 @@ static int QuantizeBlock_C(int16_t in[16], int16_t out[16],
for (n = 0; n < 16; ++n) {
const int j = kZigzag[n];
const int sign = (in[j] < 0);
const uint32_t coeff = (sign ? -in[j] : in[j]) + mtx->sharpen[j];
if (coeff > mtx->zthresh[j]) {
const uint32_t Q = mtx->q[j];
const uint32_t iQ = mtx->iq[j];
const uint32_t B = mtx->bias[j];
const uint32_t coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
if (coeff > mtx->zthresh_[j]) {
const uint32_t Q = mtx->q_[j];
const uint32_t iQ = mtx->iq_[j];
const uint32_t B = mtx->bias_[j];
int level = QUANTDIV(coeff, iQ, B);
if (level > MAX_LEVEL) level = MAX_LEVEL;
if (sign) level = -level;

View File

@ -193,11 +193,11 @@ static int QuantizeBlock_MIPS32(int16_t in[16], int16_t out[16],
int16_t* ppin = &in[0];
int16_t* pout = &out[0];
const uint16_t* ppsharpen = &mtx->sharpen[0];
const uint32_t* ppzthresh = &mtx->zthresh[0];
const uint16_t* ppq = &mtx->q[0];
const uint16_t* ppiq = &mtx->iq[0];
const uint32_t* ppbias = &mtx->bias[0];
const uint16_t* ppsharpen = &mtx->sharpen_[0];
const uint32_t* ppzthresh = &mtx->zthresh_[0];
const uint16_t* ppq = &mtx->q_[0];
const uint16_t* ppiq = &mtx->iq_[0];
const uint32_t* ppbias = &mtx->bias_[0];
__asm__ volatile(
QUANTIZE_ONE( 0, 0, 0)

View File

@ -1296,11 +1296,11 @@ static int QuantizeBlock_MIPSdspR2(int16_t in[16], int16_t out[16],
int16_t* ppin = &in[0];
int16_t* pout = &out[0];
const uint16_t* ppsharpen = &mtx->sharpen[0];
const uint32_t* ppzthresh = &mtx->zthresh[0];
const uint16_t* ppq = &mtx->q[0];
const uint16_t* ppiq = &mtx->iq[0];
const uint32_t* ppbias = &mtx->bias[0];
const uint16_t* ppsharpen = &mtx->sharpen_[0];
const uint32_t* ppzthresh = &mtx->zthresh_[0];
const uint16_t* ppq = &mtx->q_[0];
const uint16_t* ppiq = &mtx->iq_[0];
const uint32_t* ppbias = &mtx->bias_[0];
__asm__ volatile (
QUANTIZE_ONE( 0, 0, 0, 2)

View File

@ -845,7 +845,7 @@ static int QuantizeBlock_MSA(int16_t in[16], int16_t out[16],
const v8i16 maxlevel = __msa_fill_h(MAX_LEVEL);
LD_SH2(&in[0], 8, in0, in1);
LD_SH2(&mtx->sharpen[0], 8, sh0, sh1);
LD_SH2(&mtx->sharpen_[0], 8, sh0, sh1);
tmp4 = __msa_add_a_h(in0, zero);
tmp5 = __msa_add_a_h(in1, zero);
ILVRL_H2_SH(sh0, tmp4, tmp0, tmp1);
@ -853,10 +853,10 @@ static int QuantizeBlock_MSA(int16_t in[16], int16_t out[16],
HADD_SH4_SW(tmp0, tmp1, tmp2, tmp3, s0, s1, s2, s3);
sign0 = (in0 < zero);
sign1 = (in1 < zero); // sign
LD_SH2(&mtx->iq[0], 8, tmp0, tmp1); // iq
LD_SH2(&mtx->iq_[0], 8, tmp0, tmp1); // iq
ILVRL_H2_SW(zero, tmp0, t0, t1);
ILVRL_H2_SW(zero, tmp1, t2, t3);
LD_SW4(&mtx->bias[0], 4, b0, b1, b2, b3); // bias
LD_SW4(&mtx->bias_[0], 4, b0, b1, b2, b3); // bias
MUL4(t0, s0, t1, s1, t2, s2, t3, s3, t0, t1, t2, t3);
ADD4(b0, t0, b1, t1, b2, t2, b3, t3, b0, b1, b2, b3);
SRAI_W4_SW(b0, b1, b2, b3, 17);
@ -868,7 +868,7 @@ static int QuantizeBlock_MSA(int16_t in[16], int16_t out[16],
SUB2(zero, tmp2, zero, tmp3, tmp0, tmp1);
tmp2 = (v8i16)__msa_bmnz_v((v16u8)tmp2, (v16u8)tmp0, (v16u8)sign0);
tmp3 = (v8i16)__msa_bmnz_v((v16u8)tmp3, (v16u8)tmp1, (v16u8)sign1);
LD_SW4(&mtx->zthresh[0], 4, t0, t1, t2, t3); // zthresh
LD_SW4(&mtx->zthresh_[0], 4, t0, t1, t2, t3); // zthresh
t0 = (s0 > t0);
t1 = (s1 > t1);
t2 = (s2 > t2);
@ -876,7 +876,7 @@ static int QuantizeBlock_MSA(int16_t in[16], int16_t out[16],
PCKEV_H2_SH(t1, t0, t3, t2, tmp0, tmp1);
tmp4 = (v8i16)__msa_bmnz_v((v16u8)zero, (v16u8)tmp2, (v16u8)tmp0);
tmp5 = (v8i16)__msa_bmnz_v((v16u8)zero, (v16u8)tmp3, (v16u8)tmp1);
LD_SH2(&mtx->q[0], 8, tmp0, tmp1);
LD_SH2(&mtx->q_[0], 8, tmp0, tmp1);
MUL2(tmp4, tmp0, tmp5, tmp1, in0, in1);
VSHF_H2_SH(tmp4, tmp5, tmp4, tmp5, zigzag0, zigzag1, out0, out1);
ST_SH2(in0, in1, &in[0], 8);

View File

@ -841,11 +841,11 @@ static int SSE4x4_NEON(const uint8_t* WEBP_RESTRICT a,
static int16x8_t Quantize_NEON(int16_t* WEBP_RESTRICT const in,
const VP8Matrix* WEBP_RESTRICT const mtx,
int offset) {
const uint16x8_t sharp = vld1q_u16(&mtx->sharpen[offset]);
const uint16x8_t q = vld1q_u16(&mtx->q[offset]);
const uint16x8_t iq = vld1q_u16(&mtx->iq[offset]);
const uint32x4_t bias0 = vld1q_u32(&mtx->bias[offset + 0]);
const uint32x4_t bias1 = vld1q_u32(&mtx->bias[offset + 4]);
const uint16x8_t sharp = vld1q_u16(&mtx->sharpen_[offset]);
const uint16x8_t q = vld1q_u16(&mtx->q_[offset]);
const uint16x8_t iq = vld1q_u16(&mtx->iq_[offset]);
const uint32x4_t bias0 = vld1q_u32(&mtx->bias_[offset + 0]);
const uint32x4_t bias1 = vld1q_u32(&mtx->bias_[offset + 4]);
const int16x8_t a = vld1q_s16(in + offset); // in
const uint16x8_t b = vreinterpretq_u16_s16(vabsq_s16(a)); // coeff = abs(in)
@ -945,28 +945,6 @@ static int Quantize2Blocks_NEON(int16_t in[32], int16_t out[32],
vst1q_u8(dst, r); \
} while (0)
static WEBP_INLINE uint8x8x2_t Vld1U8x2(const uint8_t* ptr) {
#if LOCAL_CLANG_PREREQ(3, 4) || LOCAL_GCC_PREREQ(8, 5) || defined(_MSC_VER)
return vld1_u8_x2(ptr);
#else
uint8x8x2_t res;
INIT_VECTOR2(res, vld1_u8(ptr + 0 * 8), vld1_u8(ptr + 1 * 8));
return res;
#endif
}
static WEBP_INLINE uint8x16x4_t Vld1qU8x4(const uint8_t* ptr) {
#if LOCAL_CLANG_PREREQ(3, 4) || LOCAL_GCC_PREREQ(9, 4) || defined(_MSC_VER)
return vld1q_u8_x4(ptr);
#else
uint8x16x4_t res;
INIT_VECTOR4(res,
vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16),
vld1q_u8(ptr + 2 * 16), vld1q_u8(ptr + 3 * 16));
return res;
#endif
}
static void Intra4Preds_NEON(uint8_t* WEBP_RESTRICT dst,
const uint8_t* WEBP_RESTRICT top) {
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13
@ -993,9 +971,9 @@ static void Intra4Preds_NEON(uint8_t* WEBP_RESTRICT dst,
30, 30, 30, 30, 0, 0, 0, 0, 21, 22, 23, 24, 16, 16, 16, 16
};
const uint8x16x4_t lookup_avgs1 = Vld1qU8x4(kLookupTbl1);
const uint8x16x4_t lookup_avgs2 = Vld1qU8x4(kLookupTbl2);
const uint8x16x4_t lookup_avgs3 = Vld1qU8x4(kLookupTbl3);
const uint8x16x4_t lookup_avgs1 = vld1q_u8_x4(kLookupTbl1);
const uint8x16x4_t lookup_avgs2 = vld1q_u8_x4(kLookupTbl2);
const uint8x16x4_t lookup_avgs3 = vld1q_u8_x4(kLookupTbl3);
const uint8x16_t preload = vld1q_u8(top - 5);
uint8x16x2_t qcombined;
@ -1189,7 +1167,7 @@ static WEBP_INLINE void TrueMotion_NEON(uint8_t* dst, const uint8_t* left,
// Neither left nor top are NULL.
a = vdupq_n_u16(left[-1]);
inner = Vld1U8x2(top);
inner = vld1_u8_x2(top);
for (i = 0; i < 4; i++) {
const uint8x8x4_t outer = vld4_dup_u8(&left[i * 4]);

View File

@ -14,18 +14,13 @@
#include "src/dsp/dsp.h"
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#include <assert.h>
#include <stdlib.h> // for abs()
#include <string.h>
#include <emmintrin.h>
#include "src/dsp/common_sse2.h"
#include "src/dsp/cpu.h"
#include "src/enc/cost_enc.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/utils.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@ -1415,10 +1410,10 @@ static WEBP_INLINE int DoQuantizeBlock_SSE2(
// Load all inputs.
__m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
__m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq[0]);
const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq[8]);
const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q[0]);
const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q[8]);
const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);
const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);
const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);
const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);
// extract sign(in) (0x0000 if positive, 0xffff if negative)
const __m128i sign0 = _mm_cmpgt_epi16(zero, in0);
@ -1451,10 +1446,10 @@ static WEBP_INLINE int DoQuantizeBlock_SSE2(
__m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
__m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
// out = (coeff * iQ + B)
const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias[0]);
const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias[4]);
const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias[8]);
const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias[12]);
const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);
const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);
const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);
const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);
out_00 = _mm_add_epi32(out_00, bias_00);
out_04 = _mm_add_epi32(out_04, bias_04);
out_08 = _mm_add_epi32(out_08, bias_08);
@ -1517,7 +1512,7 @@ static WEBP_INLINE int DoQuantizeBlock_SSE2(
static int QuantizeBlock_SSE2(int16_t in[16], int16_t out[16],
const VP8Matrix* WEBP_RESTRICT const mtx) {
return DoQuantizeBlock_SSE2(in, out, &mtx->sharpen[0], mtx);
return DoQuantizeBlock_SSE2(in, out, &mtx->sharpen_[0], mtx);
}
static int QuantizeBlockWHT_SSE2(int16_t in[16], int16_t out[16],
@ -1528,7 +1523,7 @@ static int QuantizeBlockWHT_SSE2(int16_t in[16], int16_t out[16],
static int Quantize2Blocks_SSE2(int16_t in[32], int16_t out[32],
const VP8Matrix* WEBP_RESTRICT const mtx) {
int nz;
const uint16_t* const sharpen = &mtx->sharpen[0];
const uint16_t* const sharpen = &mtx->sharpen_[0];
nz = DoQuantizeBlock_SSE2(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;
nz |= DoQuantizeBlock_SSE2(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;
return nz;

Some files were not shown because too many files have changed in this diff Show More