Compare commits

..

No commits in common. "main" and "v1.3.1-rc2" have entirely different histories.

234 changed files with 9114 additions and 20226 deletions

1
.gitignore vendored
View File

@ -52,6 +52,5 @@ tests/fuzzer/animdecoder_fuzzer
tests/fuzzer/animencoder_fuzzer tests/fuzzer/animencoder_fuzzer
tests/fuzzer/demux_api_fuzzer tests/fuzzer/demux_api_fuzzer
tests/fuzzer/enc_dec_fuzzer tests/fuzzer/enc_dec_fuzzer
tests/fuzzer/huffman_fuzzer
tests/fuzzer/mux_demux_api_fuzzer tests/fuzzer/mux_demux_api_fuzzer
tests/fuzzer/simple_api_fuzzer tests/fuzzer/simple_api_fuzzer

10
AUTHORS
View File

@ -2,8 +2,6 @@ Contributors:
- Aidan O'Loan (aidanol at gmail dot com) - Aidan O'Loan (aidanol at gmail dot com)
- Alan Browning (browning at google dot com) - Alan Browning (browning at google dot com)
- Alexandru Ardelean (ardeleanalex at gmail dot com) - Alexandru Ardelean (ardeleanalex at gmail dot com)
- Anuraag Agrawal (anuraaga at gmail dot com)
- Arthur Eubanks (aeubanks at google dot com)
- Brian Ledger (brianpl at google dot com) - Brian Ledger (brianpl at google dot com)
- Charles Munger (clm at google dot com) - Charles Munger (clm at google dot com)
- Cheng Yi (cyi at google dot com) - Cheng Yi (cyi at google dot com)
@ -11,20 +9,16 @@ Contributors:
- Christopher Degawa (ccom at randomderp dot com) - Christopher Degawa (ccom at randomderp dot com)
- Clement Courbet (courbet at google dot com) - Clement Courbet (courbet at google dot com)
- Djordje Pesut (djordje dot pesut at imgtec dot com) - Djordje Pesut (djordje dot pesut at imgtec dot com)
- Frank (1433351828 at qq dot com)
- Frank Barchard (fbarchard at google dot com) - Frank Barchard (fbarchard at google dot com)
- Hui Su (huisu at google dot com) - Hui Su (huisu at google dot com)
- H. Vetinari (h dot vetinari at gmx dot com) - H. Vetinari (h dot vetinari at gmx dot com)
- Ilya Kurdyukov (jpegqs at gmail dot com) - Ilya Kurdyukov (jpegqs at gmail dot com)
- Ingvar Stepanyan (rreverser at google dot com) - Ingvar Stepanyan (rreverser at google dot com)
- Istvan Stefan (Istvan dot Stefan at arm dot com)
- James Zern (jzern at google dot com) - James Zern (jzern at google dot com)
- Jan Engelhardt (jengelh at medozas dot de) - Jan Engelhardt (jengelh at medozas dot de)
- Jehan (jehan at girinstud dot io) - Jehan (jehan at girinstud dot io)
- Jeremy Maitin-Shepard (jbms at google dot com) - Jeremy Maitin-Shepard (jbms at google dot com)
- Johann Koenig (johann dot koenig at duck dot com) - Johann Koenig (johann dot koenig at duck dot com)
- Jonathan Grant (jgrantinfotech at gmail dot com)
- Jonliu1993 (13720414433 at 163 dot com)
- Jovan Zelincevic (jovan dot zelincevic at imgtec dot com) - Jovan Zelincevic (jovan dot zelincevic at imgtec dot com)
- Jyrki Alakuijala (jyrki at google dot com) - Jyrki Alakuijala (jyrki at google dot com)
- Konstantin Ivlev (tomskside at gmail dot com) - Konstantin Ivlev (tomskside at gmail dot com)
@ -34,16 +28,13 @@ Contributors:
- Marcin Kowalczyk (qrczak at google dot com) - Marcin Kowalczyk (qrczak at google dot com)
- Martin Olsson (mnemo at minimum dot se) - Martin Olsson (mnemo at minimum dot se)
- Maryla Ustarroz-Calonge (maryla at google dot com) - Maryla Ustarroz-Calonge (maryla at google dot com)
- Masahiro Hanada (hanada at atmark-techno dot com)
- Mikołaj Zalewski (mikolajz at google dot com) - Mikołaj Zalewski (mikolajz at google dot com)
- Mislav Bradac (mislavm at google dot com) - Mislav Bradac (mislavm at google dot com)
- natewood (natewood at fb dot com)
- Nico Weber (thakis at chromium dot org) - Nico Weber (thakis at chromium dot org)
- Noel Chromium (noel at chromium dot org) - Noel Chromium (noel at chromium dot org)
- Nozomi Isozaki (nontan at pixiv dot co dot jp) - Nozomi Isozaki (nontan at pixiv dot co dot jp)
- Oliver Wolff (oliver dot wolff at qt dot io) - Oliver Wolff (oliver dot wolff at qt dot io)
- Owen Rodley (orodley at google dot com) - Owen Rodley (orodley at google dot com)
- Ozkan Sezer (sezeroz at gmail dot com)
- Parag Salasakar (img dot mips1 at gmail dot com) - Parag Salasakar (img dot mips1 at gmail dot com)
- Pascal Massimino (pascal dot massimino at gmail dot com) - Pascal Massimino (pascal dot massimino at gmail dot com)
- Paweł Hajdan, Jr (phajdan dot jr at chromium dot org) - Paweł Hajdan, Jr (phajdan dot jr at chromium dot org)
@ -64,7 +55,6 @@ Contributors:
- Vincent Rabaud (vrabaud at google dot com) - Vincent Rabaud (vrabaud at google dot com)
- Vlad Tsyrklevich (vtsyrklevich at chromium dot org) - Vlad Tsyrklevich (vtsyrklevich at chromium dot org)
- Wan-Teh Chang (wtc at google dot com) - Wan-Teh Chang (wtc at google dot com)
- wrv (wrv at utexas dot edu)
- Yang Zhang (yang dot zhang at arm dot com) - Yang Zhang (yang dot zhang at arm dot com)
- Yannis Guyon (yguyon at google dot com) - Yannis Guyon (yguyon at google dot com)
- Zhi An Ng (zhin at chromium dot org) - Zhi An Ng (zhin at chromium dot org)

View File

@ -164,7 +164,6 @@ utils_dec_srcs := \
src/utils/color_cache_utils.c \ src/utils/color_cache_utils.c \
src/utils/filters_utils.c \ src/utils/filters_utils.c \
src/utils/huffman_utils.c \ src/utils/huffman_utils.c \
src/utils/palette.c \
src/utils/quant_levels_dec_utils.c \ src/utils/quant_levels_dec_utils.c \
src/utils/random_utils.c \ src/utils/random_utils.c \
src/utils/rescaler_utils.c \ src/utils/rescaler_utils.c \

View File

@ -9,7 +9,11 @@
if(APPLE) if(APPLE)
cmake_minimum_required(VERSION 3.17) cmake_minimum_required(VERSION 3.17)
else() else()
cmake_minimum_required(VERSION 3.16) cmake_minimum_required(VERSION 3.7)
endif()
if(POLICY CMP0072)
cmake_policy(SET CMP0072 NEW)
endif() endif()
project(WebP C) project(WebP C)
@ -41,15 +45,12 @@ option(WEBP_BUILD_LIBWEBPMUX "Build the libwebpmux library." ON)
option(WEBP_BUILD_WEBPMUX "Build the webpmux command line tool." ON) option(WEBP_BUILD_WEBPMUX "Build the webpmux command line tool." ON)
option(WEBP_BUILD_EXTRAS "Build extras." ON) option(WEBP_BUILD_EXTRAS "Build extras." ON)
option(WEBP_BUILD_WEBP_JS "Emscripten build of webp.js." OFF) option(WEBP_BUILD_WEBP_JS "Emscripten build of webp.js." OFF)
option(WEBP_BUILD_FUZZTEST "Build the fuzztest tests." OFF)
option(WEBP_USE_THREAD "Enable threading support" ON) option(WEBP_USE_THREAD "Enable threading support" ON)
option(WEBP_NEAR_LOSSLESS "Enable near-lossless encoding" ON) option(WEBP_NEAR_LOSSLESS "Enable near-lossless encoding" ON)
option(WEBP_ENABLE_SWAP_16BIT_CSP "Enable byte swap for 16 bit colorspaces." option(WEBP_ENABLE_SWAP_16BIT_CSP "Enable byte swap for 16 bit colorspaces."
OFF) OFF)
set(WEBP_BITTRACE "0" CACHE STRING "Bit trace mode (0=none, 1=bit, 2=bytes)") set(WEBP_BITTRACE "0" CACHE STRING "Bit trace mode (0=none, 1=bit, 2=bytes)")
set_property(CACHE WEBP_BITTRACE PROPERTY STRINGS 0 1 2) set_property(CACHE WEBP_BITTRACE PROPERTY STRINGS 0 1 2)
option(WEBP_ENABLE_WUNUSED_RESULT "Add [[nodiscard]] to some functions. \
CMake must be at least 3.21 to force C23" OFF)
if(WEBP_LINK_STATIC) if(WEBP_LINK_STATIC)
if(WIN32) if(WIN32)
@ -132,7 +133,7 @@ if(WEBP_UNICODE)
add_definitions(-DUNICODE -D_UNICODE) add_definitions(-DUNICODE -D_UNICODE)
endif() endif()
if(WIN32 AND BUILD_SHARED_LIBS) if(MSVC AND BUILD_SHARED_LIBS)
add_definitions(-DWEBP_DLL) add_definitions(-DWEBP_DLL)
endif() endif()
@ -160,20 +161,7 @@ if(MSVC)
set(CMAKE_STATIC_LIBRARY_PREFIX "${webp_libname_prefix}") set(CMAKE_STATIC_LIBRARY_PREFIX "${webp_libname_prefix}")
endif() endif()
if(NOT WIN32) set(CMAKE_C_VISIBILITY_PRESET hidden)
set(CMAKE_C_VISIBILITY_PRESET hidden)
endif()
if(WEBP_ENABLE_WUNUSED_RESULT)
if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.21.0)
set(CMAKE_C_STANDARD 23)
else()
unset(CMAKE_C_STANDARD)
add_compile_options($<$<COMPILE_LANGUAGE:C>:-std=gnu2x>)
endif()
add_compile_options(-Wunused-result)
add_definitions(-DWEBP_ENABLE_NODISCARD=1)
endif()
# ############################################################################## # ##############################################################################
# Android only. # Android only.
@ -372,11 +360,9 @@ if(XCODE)
endif() endif()
target_link_libraries(webpdecoder ${WEBP_DEP_LIBRARIES}) target_link_libraries(webpdecoder ${WEBP_DEP_LIBRARIES})
target_include_directories( target_include_directories(
webpdecoder webpdecoder PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR} INTERFACE $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
INTERFACE $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR};${CMAKE_CURRENT_BINARY_DIR}>"
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
set_target_properties( set_target_properties(
webpdecoder webpdecoder
PROPERTIES PUBLIC_HEADER "${CMAKE_CURRENT_SOURCE_DIR}/src/webp/decode.h;\ PROPERTIES PUBLIC_HEADER "${CMAKE_CURRENT_SOURCE_DIR}/src/webp/decode.h;\
@ -477,8 +463,6 @@ endif()
if(WEBP_BUILD_ANIM_UTILS if(WEBP_BUILD_ANIM_UTILS
OR WEBP_BUILD_CWEBP OR WEBP_BUILD_CWEBP
OR WEBP_BUILD_DWEBP OR WEBP_BUILD_DWEBP
OR WEBP_BUILD_EXTRAS
OR WEBP_BUILD_FUZZTEST
OR WEBP_BUILD_GIF2WEBP OR WEBP_BUILD_GIF2WEBP
OR WEBP_BUILD_IMG2WEBP OR WEBP_BUILD_IMG2WEBP
OR WEBP_BUILD_VWEBP OR WEBP_BUILD_VWEBP
@ -515,8 +499,6 @@ if(WEBP_BUILD_ANIM_UTILS
TARGET exampleutil imageioutil imagedec imageenc TARGET exampleutil imageioutil imagedec imageenc
PROPERTY INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/src PROPERTY INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}/src
${CMAKE_CURRENT_BINARY_DIR}/src) ${CMAKE_CURRENT_BINARY_DIR}/src)
target_include_directories(imagedec PRIVATE ${WEBP_DEP_IMG_INCLUDE_DIRS})
target_include_directories(imageenc PRIVATE ${WEBP_DEP_IMG_INCLUDE_DIRS})
endif() endif()
if(WEBP_BUILD_DWEBP) if(WEBP_BUILD_DWEBP)
@ -563,8 +545,7 @@ if(WEBP_BUILD_GIF2WEBP)
add_executable(gif2webp ${GIF2WEBP_SRCS}) add_executable(gif2webp ${GIF2WEBP_SRCS})
target_link_libraries(gif2webp exampleutil imageioutil webp libwebpmux target_link_libraries(gif2webp exampleutil imageioutil webp libwebpmux
${WEBP_DEP_GIF_LIBRARIES}) ${WEBP_DEP_GIF_LIBRARIES})
target_include_directories(gif2webp PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src target_include_directories(gif2webp PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src)
${CMAKE_CURRENT_SOURCE_DIR})
install(TARGETS gif2webp RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) install(TARGETS gif2webp RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
endif() endif()
@ -657,30 +638,15 @@ if(WEBP_BUILD_EXTRAS)
${CMAKE_CURRENT_BINARY_DIR}) ${CMAKE_CURRENT_BINARY_DIR})
# vwebp_sdl # vwebp_sdl
find_package(SDL2 QUIET) find_package(SDL)
if(WEBP_BUILD_VWEBP AND SDL2_FOUND) if(WEBP_BUILD_VWEBP AND SDL_FOUND)
add_executable(vwebp_sdl ${VWEBP_SDL_SRCS}) add_executable(vwebp_sdl ${VWEBP_SDL_SRCS})
target_link_libraries(vwebp_sdl ${SDL2_LIBRARIES} imageioutil webp) target_link_libraries(vwebp_sdl ${SDL_LIBRARY} imageioutil webp)
target_include_directories( target_include_directories(
vwebp_sdl PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} vwebp_sdl PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_BINARY_DIR}/src ${SDL2_INCLUDE_DIRS}) ${CMAKE_CURRENT_BINARY_DIR}/src ${SDL_INCLUDE_DIR})
set(WEBP_HAVE_SDL 1) set(WEBP_HAVE_SDL 1)
target_compile_definitions(vwebp_sdl PUBLIC WEBP_HAVE_SDL) target_compile_definitions(vwebp_sdl PUBLIC WEBP_HAVE_SDL)
set(CMAKE_REQUIRED_INCLUDES "${SDL2_INCLUDE_DIRS}")
check_c_source_compiles(
"
#define SDL_MAIN_HANDLED
#include \"SDL.h\"
int main(void) {
return 0;
}
"
HAVE_JUST_SDL_H)
set(CMAKE_REQUIRED_INCLUDES)
if(HAVE_JUST_SDL_H)
target_compile_definitions(vwebp_sdl PRIVATE WEBP_HAVE_JUST_SDL_H)
endif()
endif() endif()
endif() endif()
@ -695,44 +661,31 @@ if(WEBP_BUILD_WEBP_JS)
else() else()
set(emscripten_stack_size "-sTOTAL_STACK=5MB") set(emscripten_stack_size "-sTOTAL_STACK=5MB")
endif() endif()
find_package(SDL2 REQUIRED)
# wasm2js does not support SIMD. # wasm2js does not support SIMD.
if(NOT WEBP_ENABLE_SIMD) if(NOT WEBP_ENABLE_SIMD)
# JavaScript version # JavaScript version
add_executable(webp_js ${CMAKE_CURRENT_SOURCE_DIR}/extras/webp_to_sdl.c) add_executable(webp_js ${CMAKE_CURRENT_SOURCE_DIR}/extras/webp_to_sdl.c)
target_link_libraries(webp_js webpdecoder SDL2) target_link_libraries(webp_js webpdecoder SDL)
target_include_directories(webp_js PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) target_include_directories(webp_js PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
set(WEBP_HAVE_SDL 1) set(WEBP_HAVE_SDL 1)
set_target_properties( set_target_properties(
webp_js webp_js
PROPERTIES PROPERTIES LINK_FLAGS "-sWASM=0 ${emscripten_stack_size} \
# Emscripten puts -sUSE_SDL2=1 in this variable, though it's needed at
# compile time to ensure the headers are downloaded.
COMPILE_OPTIONS "${SDL2_LIBRARIES}"
LINK_FLAGS
"-sWASM=0 ${emscripten_stack_size} \
-sEXPORTED_FUNCTIONS=_WebPToSDL -sINVOKE_RUN=0 \ -sEXPORTED_FUNCTIONS=_WebPToSDL -sINVOKE_RUN=0 \
-sEXPORTED_RUNTIME_METHODS=cwrap ${SDL2_LIBRARIES} \ -sEXPORTED_RUNTIME_METHODS=cwrap")
-sALLOW_MEMORY_GROWTH")
set_target_properties(webp_js PROPERTIES OUTPUT_NAME webp) set_target_properties(webp_js PROPERTIES OUTPUT_NAME webp)
target_compile_definitions(webp_js PUBLIC EMSCRIPTEN WEBP_HAVE_SDL) target_compile_definitions(webp_js PUBLIC EMSCRIPTEN WEBP_HAVE_SDL)
endif() endif()
# WASM version # WASM version
add_executable(webp_wasm ${CMAKE_CURRENT_SOURCE_DIR}/extras/webp_to_sdl.c) add_executable(webp_wasm ${CMAKE_CURRENT_SOURCE_DIR}/extras/webp_to_sdl.c)
target_link_libraries(webp_wasm webpdecoder SDL2) target_link_libraries(webp_wasm webpdecoder SDL)
target_include_directories(webp_wasm PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) target_include_directories(webp_wasm PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
set_target_properties( set_target_properties(
webp_wasm webp_wasm
PROPERTIES PROPERTIES LINK_FLAGS "-sWASM=1 ${emscripten_stack_size} \
# Emscripten puts -sUSE_SDL2=1 in this variable, though it's needed at
# compile time to ensure the headers are downloaded.
COMPILE_OPTIONS "${SDL2_LIBRARIES}"
LINK_FLAGS
"-sWASM=1 ${emscripten_stack_size} \
-sEXPORTED_FUNCTIONS=_WebPToSDL -sINVOKE_RUN=0 \ -sEXPORTED_FUNCTIONS=_WebPToSDL -sINVOKE_RUN=0 \
-sEXPORTED_RUNTIME_METHODS=cwrap ${SDL2_LIBRARIES} \ -sEXPORTED_RUNTIME_METHODS=cwrap")
-sALLOW_MEMORY_GROWTH")
target_compile_definitions(webp_wasm PUBLIC EMSCRIPTEN WEBP_HAVE_SDL) target_compile_definitions(webp_wasm PUBLIC EMSCRIPTEN WEBP_HAVE_SDL)
target_compile_definitions(webpdspdecode PUBLIC EMSCRIPTEN) target_compile_definitions(webpdspdecode PUBLIC EMSCRIPTEN)
@ -772,10 +725,6 @@ if(WEBP_BUILD_ANIM_UTILS)
target_include_directories(anim_dump PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src) target_include_directories(anim_dump PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src)
endif() endif()
if(WEBP_BUILD_FUZZTEST)
add_subdirectory(tests/fuzzer)
endif()
# Install the different headers and libraries. # Install the different headers and libraries.
install( install(
TARGETS ${INSTALLED_LIBRARIES} TARGETS ${INSTALLED_LIBRARIES}

296
ChangeLog
View File

@ -1,309 +1,17 @@
c3d85ce4 update NEWS
ad14e811 tests/fuzzer/*: add missing <string_view> include
74cd026e fuzz_utils.cc: fix build error w/WEBP_REDUCE_SIZE
a027aa93 mux_demux_api_fuzzer.cc: fix -Wshadow warning
25e17c68 update ChangeLog (tag: v1.5.0-rc1)
aa2684fc update NEWS
36923846 bump version to 1.5.0
ceea8ff6 update AUTHORS
e4f7a9f0 img2webp: add a warning for unused options
1b4c967f Merge "Properly check the data size against the end of the RIFF chunk" into main
9e5ecfaf Properly check the data size against the end of the RIFF chunk
da0d9c7d examples: exit w/failure w/no args
fcff86c7 {gif,img}2webp: sync -m help w/cwebp
b76c4a84 man/img2webp.1: sync -m text w/cwebp.1 & gif2webp.1
30633519 muxread: fix reading of buffers > riff size
4c85d860 yuv.h: update RGB<->YUV coefficients in comment
0ab789e0 Merge changes I6dfedfd5,I2376e2dc into main
03236450 {ios,xcframework}build.sh: fix compilation w/Xcode 16
61e2cfda rework AddVectorEq_SSE2
7bda3deb rework AddVector_SSE2
2ddaaf0a Fix variable names in SharpYuvComputeConversionMatrix
a3ba6f19 Makefile.vc: fix gif2webp link error
f999d94f gif2webp: add -sharp_yuv/-near_lossless
dfdcb7f9 Merge "lossless.h: fix function declaration mismatches" into main (tag: webp-rfc9649)
78ed6839 fix overread in Intra4Preds_NEON
d516a68e lossless.h: fix function declaration mismatches
87406904 Merge "Improve documentation of SharpYuvConversionMatrix." into main
fdb229ea Merge changes I07a7e36a,Ib29980f7,I2316122d,I2356e314,I32b53dd3, ... into main
0c3cd9cc Improve documentation of SharpYuvConversionMatrix.
169dfbf9 disable Intra4Preds_NEON
2dd5eb98 dsp/yuv*: use WEBP_RESTRICT qualifier
23bbafbe dsp/upsampling*: use WEBP_RESTRICT qualifier
35915b38 dsp/rescaler*: use WEBP_RESTRICT qualifier
a32b436b dsp/lossless*: use WEBP_RESTRICT qualifier
04d4b4f3 dsp/filters*: use WEBP_RESTRICT qualifier
b1cb37e6 dsp/enc*: use WEBP_RESTRICT qualifier
201894ef dsp/dec*: use WEBP_RESTRICT qualifier
02eac8a7 dsp/cost*: use WEBP_RESTRICT qualifier
84b118c9 Merge "webp-container-spec: normalize notes & unknown chunk link" into main
052cf42f webp-container-spec: normalize notes & unknown chunk link
220ee529 Search for best predictor transform bits
78619478 Try to reduce the sampling for the entropy image
14f09ab7 webp-container-spec: reorder chunk size - N text
a78c5356 Remove a useless malloc for entropy image
bc491763 Merge "Refactor predictor finding" into main
34f92238 man/{cwebp,img2webp}.1: rm 'if needed' from -sharp_yuv
367ca938 Refactor predictor finding
a582b53b webp-lossless-bitstream-spec: clarify some text
0fd25d84 Merge "anim_encode.c: fix function ref in comment" into main
f8882913 anim_encode.c: fix function ref in comment
40e4ca60 specs_generation.md: update kramdown command line
57883c78 img2webp: add -exact/-noexact per-frame options
1c8eba97 img2webp,cosmetics: add missing '.' spacers to help
2e81017c Convert predictor_enc.c to fixed point
94de6c7f Merge "Fix fuzztest link errors w/-DBUILD_SHARED_LIBS=1" into main
51d9832a Fix fuzztest link errors w/-DBUILD_SHARED_LIBS=1
7bcb36b8 Merge "Fix static overflow warning." into main
8e0cc14c Fix static overflow warning.
cea68462 README.md: add security report note
615e5874 Merge "make VP8LPredictor[01]_C() static" into main
233e86b9 Merge changes Ie43dc5ef,I94cd8bab into main
1a29fd2f make VP8LPredictor[01]_C() static
dd9d3770 Do*Filter_*: remove row & num_rows parameters
ab451a49 Do*Filter_C: remove dead 'inverse' code paths
f9a480f7 {TrueMotion,TM16}_NEON: remove zero extension
04834aca Merge changes I25c30a9e,I0a192fc6,I4cf89575 into main
39a602af webp-lossless-bitstream-spec: normalize predictor transform ref
f28c837d Merge "webp-container-spec: align anim pseudocode w/prose" into main
74be8e22 Fix implicit conversion issues
0c01db7c Merge "Increase the transform bits if possible." into main
f2d6dc1e Increase the transform bits if possible.
caa19e5b update link to issue tracker
c9dd9bd4 webp-container-spec: align anim pseudocode w/prose
8a7c8dc6 WASM: Enable VP8L_USE_FAST_LOAD
f0c53cd9 WASM: don't use USE_GENERIC_TREE
eef903d0 WASM: Enable 64-bit BITS caching
6296cc8d iterator_enc: make VP8IteratorReset() static
fbd93896 histogram_enc: make VP8LGetHistogramSize static
cc7ff545 cost_enc: make VP8CalculateLevelCosts[] static
4e2828ba vp8l_dec: make VP8LClear() static
d742b24a Intra16Preds_NEON: fix truemotion saturation
c7bb4cb5 Intra4Preds_NEON: fix truemotion saturation
952a989b Merge "Remove TODO now that log is using fixed point." into main
dde11574 Remove TODO now that log is using fixed point.
a1ca153d Fix hidden myerr in my_error_exit
3bd94202 Merge changes Iff6e47ed,I24c67cd5,Id781e761 into main
d27d246e Merge "Convert VP8LFastSLog2 to fixed point" into main
4838611f Disable msg_code use in fuzzing mode
314a142a Use QuantizeBlock_NEON for VP8EncQuantizeBlockWHT on Arm
3bfb05e3 Add AArch64 Neon implementation of Intra16Preds
baa93808 Add AArch64 Neon implementation of Intra4Preds
41a5e582 Fix errors when compiling code as C++
fb444b69 Convert VP8LFastSLog2 to fixed point
c1c89f51 Fix WEBP_NODISCARD comment and C++ version
66408c2c Switch the histogram_enc.h API to fixed point
ac1e410d Remove leftover tiff dep
b78d3957 Disable TIFF on fuzztest.
cff21a7d Do not build statically on oss-fuzz.
6853a8e5 Merge "Move more internal fuzzers to public." into main
9bc09db4 Merge "Convert VP8LFastLog2 to fixed point" into main
0a9f1c19 Convert VP8LFastLog2 to fixed point
db0cb9c2 Move more internal fuzzers to public.
ff2b5b15 Merge "advanced_api_fuzzer.cc: use crop dims in OOM check" into main
c4af79d0 Put 0 at the end of a palette and do not store it.
0ec80aef Delete last references to delta palettization
96d79f84 advanced_api_fuzzer.cc: use crop dims in OOM check
c35c7e02 Fix huffman fuzzer to not leak.
f2fe8dec Bump fuzztest dependency.
9ce982fd Fix fuzz tests to work on oss-fuzz
3ba8af1a Do not escape quotes anymore in build.sh
ea0e121b Allow centipede to be used as a fuzzing engine.
27731afd make VP8I4ModeOffsets & VP8MakeIntra4Preds static
ddd6245e oss-fuzz/build.sh: use heredoc for script creation
50074930 oss-fuzz/build.sh,cosmetics: fix indent
20e92f7d Limit the possible fuzz engines.
4f200de5 Switch public fuzz tests to fuzztest.
64186bb3 Add huffman_fuzzer to .gitignore
0905f61c Move build script from oss-fuzz repo to here.
e8678758 Fix link to Javascript documentation
5e5b8f0c Fix SSE2 Transform_AC3 function name
45129ee0 Revert "Check all the rows."
ee26766a Check all the rows.
7ec51c59 Increase the transform bits if possible.
3cd16fd3 Revert "Increase the transform bits if possible."
971a03d8 Increase the transform bits if possible.
1bf198a2 Allow transform_bits to be different during encoding.
1e462ca8 Define MAX_TRANSFORM_BITS according to the specification.
64d1ec23 Use (MIN/NUM)_(TRANSFORM/HUFFMAN)_BITS where appropriate
a90160e1 Refactor histograms in predictors.
a7aa7525 Fix some function declarations
68ff4e1e Merge "jpegdec: add a hint for EOF/READ errors" into main
79e7968a jpegdec: add a hint for EOF/READ errors
d33455cd man/*: s/BUGS/REPORTING BUGS/
a67ff735 normalize example exit status
edc28909 upsampling_{neon,sse41}: fix int sanitizer warning
3cada4ce ImgIoUtilReadFile: check ftell() return
dc950585 Merge tag 'v1.4.0'
845d5476 update ChangeLog (tag: v1.4.0, origin/1.4.0)
8a6a55bb update NEWS
cf7c5a5d provide a way to opt-out/override WEBP_NODISCARD
cc34288a update ChangeLog (tag: v1.4.0-rc1)
f13c0886 NEWS: fix date
74555950 Merge "vwebp: fix window title when options are given" into 1.4.0
d781646c vwebp: fix window title when options are given
c2e394de update NEWS
f6d15cb7 bump version to 1.4.0
57c388b8 update AUTHORS
b3d1b2cb Merge changes I26f4aa22,I83386b6c,I320ed1a2 into main
07216886 webp-container-spec: fix VP8 chunk ref ('VP8'->'VP8 ')
f88666eb webp_js/*.html: fix canvas mapping
e2c8f233 cmake,wasm: simplify SDL2 related flags
d537cd37 cmake: fix vwebp_sdl compile w/libsdl-org release
6c484cbf CMakeLists.txt: add missing WEBP_BUILD_EXTRAS check
7b0bc235 man/cwebp.1: add more detail to -partition_limit
3c0011bb WebPMuxGetChunk: add an assert
955a3d14 Merge "muxread,MuxGet: add an assert" into main
00abc000 muxread,MuxGet: add an assert
40e85a0b Have the window title reflect the filename.
1bf46358 man/cwebp.1: clarify -pass > 1 behavior w/o -size/-psnr
eba03acb webp-container-spec: replace 'above' with 'earlier'
a16d30cb webp-container-spec: clarify chunk order requirements
8a7e9112 Merge "CMakeLists.txt: apply cmake-format" into main
7fac6c1b Merge "Copy C code to not have multiplication overflow" into main
e2922e43 Merge "Check for the presence of the ANDROID_ABI variable" into main
501d9274 Copy C code to not have multiplication overflow
fba7d62e CMakeLists.txt: apply cmake-format
661c1b66 Merge "windows exports: use dllexport attribute, instead of visibility." into main
8487860a windows exports: use dllexport attribute, instead of visibility.
8ea678b9 webp/mux.h: data lifetime note w/copy_data=0
79e05c7f Check for the presence of the ANDROID_ABI variable
45f995a3 Expose functions for managing non-image chunks on WebPAnimEncoder
1fb9f3dc gifdec: fix ErrorGIFNotAvailable() declaration
4723db65 cosmetics: s/SANITY_CHECK/DCHECK/
f4b9bc9e clear -Wextra-semi-stmt warnings
713982b8 Limit animdecoder_fuzzer to 320MB
cbe825e4 cmake: fix sharpyuv simd files' build
f99305e9 Makefile.vc: add ARM64 support
5efd6300 mv SharpYuvEstimate420Risk to extras/
e78e924f Makefile.vc: add sharpyuv_risk_table.obj
d7a0506d Add YUV420 riskiness metric.
89c5b917 Merge "BuildHuffmanTable check sorted[] array bounds before writing" into main
34c80749 Remove alpha encoding pessimization.
13d9c30b Add a WEBP_NODISCARD
24d7f9cb Switch code to SDL2.
0b56dedc BuildHuffmanTable check sorted[] array bounds before writing
a429c0de sharpyuv: convert some for() to do/while
f0cd7861 DoSharpArgbToYuv: remove constant from loop
339231cc SharpYuvConvertWithOptions,cosmetics: fix formatting
307071f1 Remove medium/large code model-specific inline asm
deadc339 Fix transfer functions where toGamma and toLinear are swapped.
e7b78d43 Merge "Fix bug in FromLinearLog100." into main
15a1309e Merge "webp-lossless-bitstream-spec: delete extra blank line" into main
54ca9752 Fix bug in FromLinearLog100.
d2cb2d8c Dereference after NULL check.
e9d50107 webp-lossless-bitstream-spec: delete extra blank line
78657971 Merge changes Ief442c90,Ie6e9c9a5 into main
e30a5884 webp-lossless-bitstream-spec: update variable names
09ca1368 Merge "webp-container-spec: change assert to MUST be TRUE" into main
38cb4fc0 iosbuild,xcframeworkbuild: add SharpYuv framework
40afa926 webp-lossless-bitstream-spec: simplify abstract
9db21143 webp-container-spec: change assert to MUST be TRUE
cdbf88ae Fix typo in API docs for incremental decoding
05c46984 Reformat vcpkg build instructions.
8534f539 Merge "Never send VP8_STATUS_SUSPENDED back in non-incremental." into main
35e197bd Never send VP8_STATUS_SUSPENDED back in non-incremental.
61441425 Add vcpkg installation instructions
dce8397f Fix next is invalid pointer when WebPSafeMalloc fails
57c58105 Cmake: wrong public macro WEBP_INCLUDE_DIRS
c1ffd9ac Merge "vp8l_enc: fix non-C90 code" into main
a3965948 Merge changes If628bb93,Ic79f6309,I45f0db23 into main
f80e9b7e vp8l_enc: fix non-C90 code
accd141d Update lossless spec for two simple codes.
ac17ffff Fix non-C90 code.
433c7dca Fix static analyzer warnings.
5fac76cf Merge tag 'v1.3.2'
ca332209 update ChangeLog (tag: v1.3.2)
1ace578c update NEWS
63234c42 bump version to 1.3.2
a35ea50d Add a fuzzer for ReadHuffmanCodes
95ea5226 Fix invalid incremental decoding check.
2af26267 Fix OOB write in BuildHuffmanTable.
902bc919 Fix OOB write in BuildHuffmanTable.
7ba44f80 Homogenize "__asm__ volatile" vs "asm volatile"
68e27135 webp-container-spec: reorder example chunk layout
943b932a Merge changes I6a4d0a04,Ibc37b91e into main
1cc94f95 decode.h: wrap idec example in /* */
63acdd1e decode.h: fix decode example
aac5c5d0 ReadHuffmanCode: rm redundant num code lengths check
a2de25f6 webp-lossless-bitstream-spec: normalize list item case
68820f0e webp-lossless-bitstream-spec: normalize pixel ref
cdb31aa8 webp-lossless-bitstream-spec: add missing periods
0535a8cf webp-lossless-bitstream-spec: fix grammar
b6c4ce26 normalize numbered list item format
dd7364c3 Merge "palette.c: fix msvc warnings" into main
c63c5df6 palette.c: fix msvc warnings
0a2cad51 webp-container-spec: move terms from intro section
dd88d2ff webp-lossless-bitstream-spec: color_cache -> color cache
6e750547 Merge changes I644d7d39,Icf05491e,Ic02e6652,I63b11258 into main
67a7cc2b webp-lossless-bitstream-spec: fix code blocks
1432ebba Refactor palette sorting computation.
cd436142 webp-lossless-bitstream-spec: block -> chunk
3cb66f64 webp-lossless-bitstream-spec: add some missing commas
56471a53 webp-lossless-bitstream-spec: normalize item text in 5.1
af7fbfd2 vp8l_dec,ReadTransform: improve error status reporting
7d8e0896 vp8l_dec: add VP8LSetError()
a71ce1cf animencoder_fuzzer: fix error check w/Nallocfuzz
e94b36d6 webp-lossless-bitstream-spec: relocate details from 5.1
84628e56 webp-lossless-bitstream-spec: clarify image width changes
ee722997 alpha_dec: add missing VP8SetError()
0081693d enc_dec_fuzzer: use WebPDecode()
0fcb311c enc_dec_fuzzer: fix WebPEncode/pic.error_code check
982c177c webp-lossless-bitstream-spec: fix struct member refs
56cf5625 webp-lossless-bitstream-spec: use RFC 7405 for ABNF
6c6b3fd3 webp-lossless-bitstream-spec,cosmetics: delete blank lines
29b9eb15 Merge changes Id56ca4fd,I662bd1d7 into main
47c0af8d ReadHuffmanCodes: rm max_alphabet_size calc
b92deba3 animencoder_fuzzer: no WebPAnimEncoderAssemble check w/nallocfuzz
6be9bf8b animencoder_fuzzer: fix leak on alloc failure
5c965e55 vp8l_dec,cosmetics: add some /*param=*/ comments
e4fc2f78 webp-lossless-bitstream-spec: add validity note for max_symbol
71916726 webp-lossless-bitstream-spec: fix max_symbol definition
eac3bd5c Have the palette code be in its own file.
e2c85878 Add an initializer for the SharpYuvOptions struct.
4222b006 Merge tag 'v1.3.1'
25d94f47 Implement more transfer functions in libsharpyuv
2153a679 Merge changes Id0300937,I5dba5ccf,I57bb68e0,I2dba7b4e,I172aca36, ... into main
4298e976 webp-lossless-bitstream-spec: add PredictorTransformOutput
cd7e02be webp-lossless-bitstream-spec: fix RIFF-header ABNF
6c3845f9 webp-lossless-bitstream-spec: split LZ77 Backward Ref section
7f1b6799 webp-lossless-bitstream-spec: split Meta Prefix Codes section
7b634d8f webp-lossless-bitstream-spec: note transform order
6d6d4915 webp-lossless-bitstream-spec: update transformations text
fd7bb21c update ChangeLog (tag: v1.3.1-rc2, tag: v1.3.1)
e1adea50 update NEWS e1adea50 update NEWS
6b1c722a lossless_common.h,cosmetics: fix a typo
08d60d60 webp-lossless-bitstream-spec: split code length section
7a12afcc webp-lossless-bitstream-spec: rm unused anchor
43393320 enc/*: normalize WebPEncodingSetError() calls 43393320 enc/*: normalize WebPEncodingSetError() calls
287fdefe enc/*: add missing WebPEncodingSetError() calls 287fdefe enc/*: add missing WebPEncodingSetError() calls
c3bd7cff EncodeAlphaInternal: add missing error check c3bd7cff EncodeAlphaInternal: add missing error check
14a9dbfb webp-lossless-bitstream-spec: refine single node text
64819c7c Implement ExtractGreen_SSE2
d49cfbb3 vp8l_enc,WriteImage: add missing error check d49cfbb3 vp8l_enc,WriteImage: add missing error check
2e5a9ec3 muxread,MuxImageParse: add missing error checks 2e5a9ec3 muxread,MuxImageParse: add missing error checks
ebb6f949 cmake,emscripten: explicitly set stack size ebb6f949 cmake,emscripten: explicitly set stack size
59a2b1f9 WebPDecodeYUV: check u/v/stride/uv_stride ptrs 59a2b1f9 WebPDecodeYUV: check u/v/stride/uv_stride ptrs
8e965ccb Call png_get_channels() to see if image has alpha 8e965ccb Call png_get_channels() to see if image has alpha
fe80fbbd webp-container-spec: add some missing commas
e8ed3176 Merge "treat FILTER_NONE as a regular Unfilter[] call" into main
03a7a048 webp-lossless-bitstream-spec: rm redundant statement
c437c7aa webp-lossless-bitstream-spec: mv up prefix code group def
e4f17a31 webp-lossless-bitstream-spec: fix section reference
e2ecd5e9 webp-lossless-bitstream-spec: clarify ABNF syntax
8b55425a webp-lossless-bitstream-spec: refine pixel copy text
29c9f2d4 webp-lossless-bitstream-spec: minor wording updates
6b02f660 treat FILTER_NONE as a regular Unfilter[] call
7f75c91c webp-container-spec: fix location of informative msg
f6499943 webp-container-spec: consistently quote FourCCs
49918af3 webp-container-spec: minor wording updates
7f0a3419 update ChangeLog (tag: v1.3.1-rc1) 7f0a3419 update ChangeLog (tag: v1.3.1-rc1)
bab7efbe update NEWS bab7efbe update NEWS
7138bf8f bump version to 1.3.1 7138bf8f bump version to 1.3.1
435b4ded update AUTHORS 435b4ded update AUTHORS
47351229 update .mailmap 47351229 update .mailmap
46bc4fc9 Merge "Switch ExtraCost to ints and implement it in SSE." into main
828b4ce0 Switch ExtraCost to ints and implement it in SSE.
ff6c7f4e CONTRIBUTING.md: add C style / cmake-format notes ff6c7f4e CONTRIBUTING.md: add C style / cmake-format notes
dd530437 add .cmake-format.py dd530437 add .cmake-format.py
adbe2cb1 cmake,cosmetics: apply cmake-format adbe2cb1 cmake,cosmetics: apply cmake-format
@ -1501,7 +1209,7 @@ b016cb91 NEON: faster fancy upsampling
f04eb376 Merge tag 'v0.5.2' f04eb376 Merge tag 'v0.5.2'
341d711c NEON: 5% faster conversion to RGB565 and RGBA4444 341d711c NEON: 5% faster conversion to RGB565 and RGBA4444
abb54827 remove Clang warnings with unused arch arguments. abb54827 remove Clang warnings with unused arch arguments.
ece9684f update ChangeLog (tag: v0.5.2-rc2, tag: v0.5.2) ece9684f update ChangeLog (tag: v0.5.2-rc2, tag: v0.5.2, origin/0.5.2)
aa7744ca anim_util: quiet implicit conv warnings in 32-bit aa7744ca anim_util: quiet implicit conv warnings in 32-bit
d9120271 jpegdec: correct ContextFill signature d9120271 jpegdec: correct ContextFill signature
24eb3940 Remove some errors when compiling the code as C++. 24eb3940 Remove some errors when compiling the code as C++.
@ -1788,7 +1496,7 @@ bbb6ecd9 Merge "Add MSA optimized distortion functions"
c0991a14 io,EmitRescaledAlphaYUV: factor out a common expr c0991a14 io,EmitRescaledAlphaYUV: factor out a common expr
48bf5ed1 build.gradle: remove tab 48bf5ed1 build.gradle: remove tab
bfef6c9f Merge tag 'v0.5.1' bfef6c9f Merge tag 'v0.5.1'
3d97bb75 update ChangeLog (tag: v0.5.1) 3d97bb75 update ChangeLog (tag: v0.5.1, origin/0.5.1)
deb54d91 Clarify the expected 'config' lifespan in WebPIDecode() deb54d91 Clarify the expected 'config' lifespan in WebPIDecode()
435308e0 Add MSA optimized encoder transform functions 435308e0 Add MSA optimized encoder transform functions
dce64bfa Add MSA optimized alpha filter functions dce64bfa Add MSA optimized alpha filter functions

View File

@ -12,8 +12,6 @@ LIBSHARPYUV_BASENAME = libsharpyuv
ARCH = x86 ARCH = x86
!ELSE IF ! [ cl 2>&1 | find "x64" > NUL ] !ELSE IF ! [ cl 2>&1 | find "x64" > NUL ]
ARCH = x64 ARCH = x64
!ELSE IF ! [ cl 2>&1 | find "ARM64" > NUL ]
ARCH = ARM64
!ELSE IF ! [ cl 2>&1 | find "ARM" > NUL ] !ELSE IF ! [ cl 2>&1 | find "ARM" > NUL ]
ARCH = ARM ARCH = ARM
!ELSE !ELSE
@ -32,7 +30,7 @@ PLATFORM_LDFLAGS = /SAFESEH
NOLOGO = /nologo NOLOGO = /nologo
CCNODBG = cl.exe $(NOLOGO) /O2 /DNDEBUG CCNODBG = cl.exe $(NOLOGO) /O2 /DNDEBUG
CCDEBUG = cl.exe $(NOLOGO) /Od /Zi /D_DEBUG /RTC1 CCDEBUG = cl.exe $(NOLOGO) /Od /Zi /D_DEBUG /RTC1
CFLAGS = /I. /Isrc $(NOLOGO) /MP /W3 /EHsc /c CFLAGS = /I. /Isrc $(NOLOGO) /W3 /EHsc /c
CFLAGS = $(CFLAGS) /DWIN32 /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN CFLAGS = $(CFLAGS) /DWIN32 /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN
LDFLAGS = /LARGEADDRESSAWARE /MANIFEST:EMBED /NXCOMPAT /DYNAMICBASE LDFLAGS = /LARGEADDRESSAWARE /MANIFEST:EMBED /NXCOMPAT /DYNAMICBASE
LDFLAGS = $(LDFLAGS) $(PLATFORM_LDFLAGS) LDFLAGS = $(LDFLAGS) $(PLATFORM_LDFLAGS)
@ -231,7 +229,6 @@ DSP_DEC_OBJS = \
$(DIROBJ)\dsp\lossless_neon.obj \ $(DIROBJ)\dsp\lossless_neon.obj \
$(DIROBJ)\dsp\lossless_sse2.obj \ $(DIROBJ)\dsp\lossless_sse2.obj \
$(DIROBJ)\dsp\lossless_sse41.obj \ $(DIROBJ)\dsp\lossless_sse41.obj \
$(DIROBJ)\dsp\lossless_avx2.obj \
$(DIROBJ)\dsp\rescaler.obj \ $(DIROBJ)\dsp\rescaler.obj \
$(DIROBJ)\dsp\rescaler_mips32.obj \ $(DIROBJ)\dsp\rescaler_mips32.obj \
$(DIROBJ)\dsp\rescaler_mips_dsp_r2.obj \ $(DIROBJ)\dsp\rescaler_mips_dsp_r2.obj \
@ -271,7 +268,6 @@ DSP_ENC_OBJS = \
$(DIROBJ)\dsp\lossless_enc_neon.obj \ $(DIROBJ)\dsp\lossless_enc_neon.obj \
$(DIROBJ)\dsp\lossless_enc_sse2.obj \ $(DIROBJ)\dsp\lossless_enc_sse2.obj \
$(DIROBJ)\dsp\lossless_enc_sse41.obj \ $(DIROBJ)\dsp\lossless_enc_sse41.obj \
$(DIROBJ)\dsp\lossless_enc_avx2.obj \
$(DIROBJ)\dsp\ssim.obj \ $(DIROBJ)\dsp\ssim.obj \
$(DIROBJ)\dsp\ssim_sse2.obj \ $(DIROBJ)\dsp\ssim_sse2.obj \
@ -325,7 +321,6 @@ ENC_OBJS = \
EXTRAS_OBJS = \ EXTRAS_OBJS = \
$(DIROBJ)\extras\extras.obj \ $(DIROBJ)\extras\extras.obj \
$(DIROBJ)\extras\quality_estimate.obj \ $(DIROBJ)\extras\quality_estimate.obj \
$(DIROBJ)\extras\sharpyuv_risk_table.obj \
IMAGEIO_UTIL_OBJS = \ IMAGEIO_UTIL_OBJS = \
$(DIROBJ)\imageio\imageio_util.obj \ $(DIROBJ)\imageio\imageio_util.obj \
@ -341,7 +336,6 @@ UTILS_DEC_OBJS = \
$(DIROBJ)\utils\color_cache_utils.obj \ $(DIROBJ)\utils\color_cache_utils.obj \
$(DIROBJ)\utils\filters_utils.obj \ $(DIROBJ)\utils\filters_utils.obj \
$(DIROBJ)\utils\huffman_utils.obj \ $(DIROBJ)\utils\huffman_utils.obj \
$(DIROBJ)\utils\palette.obj \
$(DIROBJ)\utils\quant_levels_dec_utils.obj \ $(DIROBJ)\utils\quant_levels_dec_utils.obj \
$(DIROBJ)\utils\rescaler_utils.obj \ $(DIROBJ)\utils\rescaler_utils.obj \
$(DIROBJ)\utils\random_utils.obj \ $(DIROBJ)\utils\random_utils.obj \
@ -395,7 +389,7 @@ $(DIRBIN)\dwebp.exe: $(IMAGEIO_UTIL_OBJS)
$(DIRBIN)\dwebp.exe: $(LIBWEBPDEMUX) $(DIRBIN)\dwebp.exe: $(LIBWEBPDEMUX)
$(DIRBIN)\gif2webp.exe: $(DIROBJ)\examples\gif2webp.obj $(EX_GIF_DEC_OBJS) $(DIRBIN)\gif2webp.exe: $(DIROBJ)\examples\gif2webp.obj $(EX_GIF_DEC_OBJS)
$(DIRBIN)\gif2webp.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBPMUX) $(DIRBIN)\gif2webp.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBPMUX)
$(DIRBIN)\gif2webp.exe: $(LIBWEBP) $(LIBSHARPYUV) $(DIRBIN)\gif2webp.exe: $(LIBWEBP)
$(DIRBIN)\vwebp.exe: $(DIROBJ)\examples\vwebp.obj $(EX_UTIL_OBJS) $(DIRBIN)\vwebp.exe: $(DIROBJ)\examples\vwebp.obj $(EX_UTIL_OBJS)
$(DIRBIN)\vwebp.exe: $(IMAGEIO_UTIL_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP) $(DIRBIN)\vwebp.exe: $(IMAGEIO_UTIL_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP)
$(DIRBIN)\vwebp_sdl.exe: $(DIROBJ)\extras\vwebp_sdl.obj $(DIRBIN)\vwebp_sdl.exe: $(DIROBJ)\extras\vwebp_sdl.obj

41
NEWS
View File

@ -1,44 +1,3 @@
- 12/19/2024 version 1.5.0
This is a binary compatible release.
API changes:
- `cross_color_transform_bits` added to WebPAuxStats
* minor lossless encoder speed and compression improvements
* lossless encoding does not use floats anymore
* additional Arm optimizations for lossy & lossless + general code generation
improvements
* improvements to WASM performance (#643)
* improvements and corrections in webp-container-spec.txt and
webp-lossless-bitstream-spec.txt (#646, #355607636)
* further security related hardening and increased fuzzing coverage w/fuzztest
(oss-fuzz: #382816119, #70112, #70102, #69873, #69825, #69508, #69208)
* miscellaneous warning, bug & build fixes (#499, #562, #381372617,
#381109771, #42340561, #375011696, #372109644, chromium: #334120888)
Tool updates:
* gif2webp: add -sharp_yuv & -near_lossless
* img2webp: add -exact & -noexact
* exit codes normalized; running an example program with no
arguments will output its help and exit with an error (#42340557,
#381372617)
- 4/12/2024: version 1.4.0
This is a binary compatible release.
* API changes:
- libwebpmux: WebPAnimEncoderSetChunk, WebPAnimEncoderGetChunk,
WebPAnimEncoderDeleteChunk
- libsharpyuv: SharpYuvOptionsInit, SharpYuvConvertWithOptions
- extras: SharpYuvEstimate420Risk
* further security related hardening in libwebp & examples
* some minor optimizations in the lossless encoder
* added WEBP_NODISCARD to report unused result warnings; enable with
-DWEBP_ENABLE_NODISCARD=1
* improvements and corrections in webp-container-spec.txt and
webp-lossless-bitstream-spec.txt (#611)
* miscellaneous warning, bug & build fixes (#615, #619, #632, #635)
- 9/13/2023: version 1.3.2
This is a binary compatible release.
* security fix for lossless decoder (chromium: #1479274, CVE-2023-4863)
- 6/23/2023: version 1.3.1 - 6/23/2023: version 1.3.1
This is a binary compatible release. This is a binary compatible release.
* security fixes for lossless encoder (#603, chromium: #1420107, #1455619, * security fixes for lossless encoder (#603, chromium: #1420107, #1455619,

View File

@ -7,7 +7,7 @@
\__\__/\____/\_____/__/ ____ ___ \__\__/\____/\_____/__/ ____ ___
/ _/ / \ \ / _ \/ _/ / _/ / \ \ / _ \/ _/
/ \_/ / / \ \ __/ \__ / \_/ / / \ \ __/ \__
\____/____/\_____/_____/____/v1.5.0 \____/____/\_____/_____/____/v1.3.1
``` ```
WebP codec is a library to encode and decode images in WebP format. This package WebP codec is a library to encode and decode images in WebP format. This package
@ -42,8 +42,7 @@ See the [APIs documentation](doc/api.md), and API usage examples in the
## Bugs ## Bugs
Please report all bugs to the [issue tracker](https://issues.webmproject.org). Please report all bugs to the issue tracker: https://bugs.chromium.org/p/webp
For security reports, select 'Security report' from the Template dropdown.
Patches welcome! See [how to contribute](CONTRIBUTING.md). Patches welcome! See [how to contribute](CONTRIBUTING.md).

View File

@ -173,7 +173,6 @@ model {
include "color_cache_utils.c" include "color_cache_utils.c"
include "filters_utils.c" include "filters_utils.c"
include "huffman_utils.c" include "huffman_utils.c"
include "palette.c"
include "quant_levels_dec_utils.c" include "quant_levels_dec_utils.c"
include "random_utils.c" include "random_utils.c"
include "rescaler_utils.c" include "rescaler_utils.c"

View File

@ -11,8 +11,7 @@ endif()
include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake")
set_and_check(WebP_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@") set_and_check(WebP_INCLUDE_DIR "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@")
set(WebP_INCLUDE_DIRS ${WebP_INCLUDE_DIR}) set(WEBP_INCLUDE_DIRS ${WebP_INCLUDE_DIRS})
set(WEBP_INCLUDE_DIRS ${WebP_INCLUDE_DIR})
set(WebP_LIBRARIES "@INSTALLED_LIBRARIES@") set(WebP_LIBRARIES "@INSTALLED_LIBRARIES@")
set(WEBP_LIBRARIES "${WebP_LIBRARIES}") set(WEBP_LIBRARIES "${WebP_LIBRARIES}")

View File

@ -94,9 +94,6 @@
/* Set to 1 if SSE4.1 is supported */ /* Set to 1 if SSE4.1 is supported */
#cmakedefine WEBP_HAVE_SSE41 1 #cmakedefine WEBP_HAVE_SSE41 1
/* Set to 1 if AVX2 is supported */
#cmakedefine WEBP_HAVE_AVX2 1
/* Set to 1 if TIFF library is installed */ /* Set to 1 if TIFF library is installed */
#cmakedefine WEBP_HAVE_TIFF 1 #cmakedefine WEBP_HAVE_TIFF 1

View File

@ -38,9 +38,9 @@ function(webp_check_compiler_flag WEBP_SIMD_FLAG ENABLE_SIMD)
endfunction() endfunction()
# those are included in the names of WEBP_USE_* in c++ code. # those are included in the names of WEBP_USE_* in c++ code.
set(WEBP_SIMD_FLAGS "AVX2;SSE41;SSE2;MIPS32;MIPS_DSP_R2;NEON;MSA") set(WEBP_SIMD_FLAGS "SSE41;SSE2;MIPS32;MIPS_DSP_R2;NEON;MSA")
set(WEBP_SIMD_FILE_EXTENSIONS set(WEBP_SIMD_FILE_EXTENSIONS
"_avx2.c;_sse41.c;_sse2.c;_mips32.c;_mips_dsp_r2.c;_neon.c;_msa.c") "_sse41.c;_sse2.c;_mips32.c;_mips_dsp_r2.c;_neon.c;_msa.c")
if(MSVC AND CMAKE_C_COMPILER_ID STREQUAL "MSVC") if(MSVC AND CMAKE_C_COMPILER_ID STREQUAL "MSVC")
# With at least Visual Studio 12 (2013)+ /arch is not necessary to build SSE2 # With at least Visual Studio 12 (2013)+ /arch is not necessary to build SSE2
# or SSE4 code unless a lesser /arch is forced. MSVC does not have a SSE4 # or SSE4 code unless a lesser /arch is forced. MSVC does not have a SSE4
@ -50,18 +50,18 @@ if(MSVC AND CMAKE_C_COMPILER_ID STREQUAL "MSVC")
if(MSVC_VERSION GREATER_EQUAL 1800 AND NOT CMAKE_C_FLAGS MATCHES "/arch:") if(MSVC_VERSION GREATER_EQUAL 1800 AND NOT CMAKE_C_FLAGS MATCHES "/arch:")
set(SIMD_ENABLE_FLAGS) set(SIMD_ENABLE_FLAGS)
else() else()
set(SIMD_ENABLE_FLAGS "/arch:AVX2;/arch:AVX;/arch:SSE2;;;;") set(SIMD_ENABLE_FLAGS "/arch:AVX;/arch:SSE2;;;;")
endif() endif()
set(SIMD_DISABLE_FLAGS) set(SIMD_DISABLE_FLAGS)
else() else()
set(SIMD_ENABLE_FLAGS "-mavx2;-msse4.1;-msse2;-mips32;-mdspr2;-mfpu=neon;-mmsa") set(SIMD_ENABLE_FLAGS "-msse4.1;-msse2;-mips32;-mdspr2;-mfpu=neon;-mmsa")
set(SIMD_DISABLE_FLAGS "-mno-avx2;-mno-sse4.1;-mno-sse2;;-mno-dspr2;;-mno-msa") set(SIMD_DISABLE_FLAGS "-mno-sse4.1;-mno-sse2;;-mno-dspr2;;-mno-msa")
endif() endif()
set(WEBP_SIMD_FILES_TO_INCLUDE) set(WEBP_SIMD_FILES_TO_INCLUDE)
set(WEBP_SIMD_FLAGS_TO_INCLUDE) set(WEBP_SIMD_FLAGS_TO_INCLUDE)
if(ANDROID AND ANDROID_ABI) if(${ANDROID})
if(${ANDROID_ABI} STREQUAL "armeabi-v7a") if(${ANDROID_ABI} STREQUAL "armeabi-v7a")
# This is because Android studio uses the configuration "-march=armv7-a # This is because Android studio uses the configuration "-march=armv7-a
# -mfloat-abi=softfp -mfpu=vfpv3-d16" that does not trigger neon # -mfloat-abi=softfp -mfpu=vfpv3-d16" that does not trigger neon
@ -106,9 +106,8 @@ foreach(I_SIMD RANGE ${WEBP_SIMD_FLAGS_RANGE})
endif() endif()
# Check which files we should include or not. # Check which files we should include or not.
list(GET WEBP_SIMD_FILE_EXTENSIONS ${I_SIMD} WEBP_SIMD_FILE_EXTENSION) list(GET WEBP_SIMD_FILE_EXTENSIONS ${I_SIMD} WEBP_SIMD_FILE_EXTENSION)
file(GLOB SIMD_FILES file(GLOB SIMD_FILES "${CMAKE_CURRENT_LIST_DIR}/../"
"${CMAKE_CURRENT_LIST_DIR}/../sharpyuv/*${WEBP_SIMD_FILE_EXTENSION}" "src/dsp/*${WEBP_SIMD_FILE_EXTENSION}")
"${CMAKE_CURRENT_LIST_DIR}/../src/dsp/*${WEBP_SIMD_FILE_EXTENSION}")
if(WEBP_HAVE_${WEBP_SIMD_FLAG}) if(WEBP_HAVE_${WEBP_SIMD_FLAG})
# Memorize the file and flags. # Memorize the file and flags.
foreach(FILE ${SIMD_FILES}) foreach(FILE ${SIMD_FILES})

View File

@ -1,5 +1,5 @@
AC_INIT([libwebp], [1.5.0], AC_INIT([libwebp], [1.3.1],
[https://issues.webmproject.org],, [https://bugs.chromium.org/p/webp],,
[https://developers.google.com/speed/webp]) [https://developers.google.com/speed/webp])
AC_CANONICAL_HOST AC_CANONICAL_HOST
AC_PREREQ([2.60]) AC_PREREQ([2.60])
@ -106,7 +106,6 @@ TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wall])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wconstant-conversion]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wconstant-conversion])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wdeclaration-after-statement]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wdeclaration-after-statement])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wextra]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wextra])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wextra-semi-stmt])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wfloat-conversion]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wfloat-conversion])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat -Wformat-nonliteral]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat -Wformat-nonliteral])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat -Wformat-security]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat -Wformat-security])
@ -116,7 +115,6 @@ TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wold-style-definition])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wparentheses-equality]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wparentheses-equality])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wshadow]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wshadow])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wshorten-64-to-32]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wshorten-64-to-32])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wstrict-prototypes])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wundef]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wundef])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunreachable-code-aggressive]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunreachable-code-aggressive])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunreachable-code]) TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunreachable-code])
@ -161,25 +159,6 @@ AS_IF([test "$GCC" = "yes" ], [
AC_SUBST([AM_CFLAGS]) AC_SUBST([AM_CFLAGS])
dnl === Check for machine specific flags dnl === Check for machine specific flags
AC_ARG_ENABLE([avx2],
AS_HELP_STRING([--disable-avx2],
[Disable detection of AVX2 support
@<:@default=auto@:>@]))
AS_IF([test "x$enable_avx2" != "xno" -a "x$enable_sse4_1" != "xno"
-a "x$enable_sse2" != "xno"], [
AVX2_FLAGS="$INTRINSICS_CFLAGS $AVX2_FLAGS"
TEST_AND_ADD_CFLAGS([AVX2_FLAGS], [-mavx2])
AS_IF([test -n "$AVX2_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $AVX2_FLAGS"
AC_CHECK_HEADER([immintrin.h],
[AC_DEFINE(WEBP_HAVE_AVX2, [1],
[Set to 1 if AVX2 is supported])],
[AVX2_FLAGS=""])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([AVX2_FLAGS])])
AC_ARG_ENABLE([sse4.1], AC_ARG_ENABLE([sse4.1],
AS_HELP_STRING([--disable-sse4.1], AS_HELP_STRING([--disable-sse4.1],
[Disable detection of SSE4.1 support [Disable detection of SSE4.1 support
@ -485,7 +464,7 @@ AC_ARG_ENABLE([sdl],
@<:@default=auto@:>@])) @<:@default=auto@:>@]))
AS_IF([test "x$enable_sdl" != "xno"], [ AS_IF([test "x$enable_sdl" != "xno"], [
CLEAR_LIBVARS([SDL]) CLEAR_LIBVARS([SDL])
AC_PATH_PROGS([LIBSDL_CONFIG], [sdl2-config]) AC_PATH_PROGS([LIBSDL_CONFIG], [sdl-config])
if test -n "$LIBSDL_CONFIG"; then if test -n "$LIBSDL_CONFIG"; then
SDL_INCLUDES=`$LIBSDL_CONFIG --cflags` SDL_INCLUDES=`$LIBSDL_CONFIG --cflags`
SDL_LIBS="`$LIBSDL_CONFIG --libs`" SDL_LIBS="`$LIBSDL_CONFIG --libs`"
@ -495,12 +474,13 @@ AS_IF([test "x$enable_sdl" != "xno"], [
sdl_header="no" sdl_header="no"
LIBCHECK_PROLOGUE([SDL]) LIBCHECK_PROLOGUE([SDL])
AC_CHECK_HEADER([SDL2/SDL.h], [sdl_header="SDL2/SDL.h"], AC_CHECK_HEADER([SDL/SDL.h], [sdl_header="SDL/SDL.h"],
[AC_MSG_WARN(SDL2 library not available - no SDL.h)]) [AC_CHECK_HEADER([SDL.h], [sdl_header="SDL.h"],
[AC_MSG_WARN(SDL library not available - no sdl.h)])])
if test x"$sdl_header" != "xno"; then if test x"$sdl_header" != "xno"; then
AC_LANG_PUSH(C) AC_LANG_PUSH(C)
SDL_SAVED_LIBS="$LIBS" SDL_SAVED_LIBS="$LIBS"
for lib in "" "-lSDL2" "-lSDL2main -lSDL2"; do for lib in "" "-lSDL" "-lSDLmain -lSDL"; do
LIBS="$SDL_SAVED_LIBS $lib" LIBS="$SDL_SAVED_LIBS $lib"
# Perform a full link to ensure SDL_main is resolved if needed. # Perform a full link to ensure SDL_main is resolved if needed.
AC_LINK_IFELSE( AC_LINK_IFELSE(
@ -782,8 +762,7 @@ AC_CONFIG_FILES([Makefile src/Makefile man/Makefile \
src/libwebp.pc src/libwebpdecoder.pc \ src/libwebp.pc src/libwebpdecoder.pc \
src/demux/libwebpdemux.pc src/mux/libwebpmux.pc]) src/demux/libwebpdemux.pc src/mux/libwebpmux.pc])
dnl fix exports from MinGW builds
AC_CONFIG_COMMANDS_POST([$SED -i 's/-DDLL_EXPORT/-DWEBP_DLL/' config.status])
AC_OUTPUT AC_OUTPUT
AC_MSG_NOTICE([ AC_MSG_NOTICE([

View File

@ -157,7 +157,7 @@ decoding is not finished yet or VP8_STATUS_OK when decoding is done. Any other
status is an error condition. status is an error condition.
The 'idec' object must always be released (even upon an error condition) by The 'idec' object must always be released (even upon an error condition) by
calling: WebPIDelete(idec). calling: WebPDelete(idec).
To retrieve partially decoded picture samples, one must use the corresponding To retrieve partially decoded picture samples, one must use the corresponding
method: WebPIDecGetRGB or WebPIDecGetYUVA. It will return the last displayable method: WebPIDecGetRGB or WebPIDecGetYUVA. It will return the last displayable

View File

@ -96,24 +96,6 @@ make
make install make install
``` ```
## Building libwebp - Using vcpkg
You can download and install libwebp using the
[vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
```shell
git clone https://github.com/Microsoft/vcpkg.git
cd vcpkg
./bootstrap-vcpkg.sh
./vcpkg integrate install
./vcpkg install libwebp
```
The libwebp port in vcpkg is kept up to date by Microsoft team members and
community contributors. If the version is out of date, please
[create an issue or pull request](https://github.com/Microsoft/vcpkg) on the
vcpkg repository.
## CMake ## CMake
With CMake, you can compile libwebp, cwebp, dwebp, gif2webp, img2webp, webpinfo With CMake, you can compile libwebp, cwebp, dwebp, gif2webp, img2webp, webpinfo
@ -228,4 +210,4 @@ generated code, but is untested.
## Javascript decoder ## Javascript decoder
Libwebp can be compiled into a JavaScript decoder using Emscripten and CMake. Libwebp can be compiled into a JavaScript decoder using Emscripten and CMake.
See the [corresponding documentation](../webp_js/README.md) See the [corresponding documentation](../README.md)

View File

@ -17,11 +17,10 @@ rubygems will install automatically. The following will apply inline CSS
styling; an external stylesheet is not needed. styling; an external stylesheet is not needed.
```shell ```shell
$ kramdown doc/webp-lossless-bitstream-spec.txt \ $ kramdown doc/webp-lossless-bitstream-spec.txt --template \
--template doc/template.html \ doc/template.html --coderay-css style --coderay-line-numbers ' ' \
-x syntax-coderay --syntax-highlighter coderay \ --coderay-default-lang c > \
--syntax-highlighter-opts "{default_lang: c, line_numbers: , css: style}" \ doc/output/webp-lossless-bitstream-spec.html
> doc/output/webp-lossless-bitstream-spec.html
``` ```
Optimally, use kramdown 0.13.7 or newer if syntax highlighting desired. Optimally, use kramdown 0.13.7 or newer if syntax highlighting desired.

View File

@ -321,13 +321,10 @@ Per-frame options (only used for subsequent images input):
``` ```
-d <int> ............. frame duration in ms (default: 100) -d <int> ............. frame duration in ms (default: 100)
-lossless ............ use lossless mode (default) -lossless ........... use lossless mode (default)
-lossy ............... use lossy mode -lossy ... ........... use lossy mode
-q <float> ........... quality -q <float> ........... quality
-m <int> ............. compression method (0=fast, 6=slowest), default=4 -m <int> ............. method to use
-exact, -noexact ..... preserve or alter RGB values in transparent area
(default: -noexact, may cause artifacts
with lossy animations)
``` ```
example: `img2webp -loop 2 in0.png -lossy in1.jpg -d 80 in2.tiff -o out.webp` example: `img2webp -loop 2 in0.png -lossy in1.jpg -d 80 in2.tiff -o out.webp`
@ -354,12 +351,8 @@ Options:
-lossy ................. encode image using lossy compression -lossy ................. encode image using lossy compression
-mixed ................. for each frame in the image, pick lossy -mixed ................. for each frame in the image, pick lossy
or lossless compression heuristically or lossless compression heuristically
-near_lossless <int> ... use near-lossless image preprocessing
(0..100=off), default=100
-sharp_yuv ............. use sharper (and slower) RGB->YUV conversion
(lossy only)
-q <float> ............. quality factor (0:small..100:big) -q <float> ............. quality factor (0:small..100:big)
-m <int> ............... compression method (0=fast, 6=slowest), default=4 -m <int> ............... compression method (0=fast, 6=slowest)
-min_size .............. minimize output size (default:off) -min_size .............. minimize output size (default:off)
lossless compression by default; can be lossless compression by default; can be
combined with -q, -m, -lossy or -mixed combined with -q, -m, -lossy or -mixed

View File

@ -21,9 +21,9 @@ Introduction
------------ ------------
WebP is an image format that uses either (i) the VP8 key frame encoding to WebP is an image format that uses either (i) the VP8 key frame encoding to
compress image data in a lossy way or (ii) the WebP lossless encoding. These compress image data in a lossy way, or (ii) the WebP lossless encoding. These
encoding schemes should make it more efficient than older formats, such as JPEG, encoding schemes should make it more efficient than older formats such as JPEG,
GIF, and PNG. It is optimized for fast image transfer over the network (for GIF and PNG. It is optimized for fast image transfer over the network (for
example, for websites). The WebP format has feature parity (color profile, example, for websites). The WebP format has feature parity (color profile,
metadata, animation, etc.) with other formats as well. This document describes metadata, animation, etc.) with other formats as well. This document describes
the structure of a WebP file. the structure of a WebP file.
@ -31,37 +31,36 @@ the structure of a WebP file.
The WebP container (that is, the RIFF container for WebP) allows feature support The WebP container (that is, the RIFF container for WebP) allows feature support
over and above the basic use case of WebP (that is, a file containing a single over and above the basic use case of WebP (that is, a file containing a single
image encoded as a VP8 key frame). The WebP container provides additional image encoded as a VP8 key frame). The WebP container provides additional
support for the following: support for:
* Lossless Compression: An image can be losslessly compressed, using the * **Lossless compression.** An image can be losslessly compressed, using the
WebP Lossless Format. WebP Lossless Format.
* Metadata: An image may have metadata stored in Exchangeable Image File * **Metadata.** An image may have metadata stored in Exif or XMP formats.
Format (Exif) or Extensible Metadata Platform (XMP) format.
* Transparency: An image may have transparency, that is, an alpha channel. * **Transparency.** An image may have transparency, that is, an alpha channel.
* Color Profile: An image may have an embedded ICC profile as described * **Color Profile.** An image may have an embedded ICC profile as described
by the [International Color Consortium][iccspec]. by the [International Color Consortium][iccspec].
* Animation: An image may have multiple frames with pauses between them, * **Animation.** An image may have multiple frames with pauses between them,
making it an animation. making it an animation.
Terminology & Basics
--------------------
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
"SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this
document are to be interpreted as described in BCP 14 [RFC 2119][] [RFC 8174][] document are to be interpreted as described in BCP 14 [RFC 2119][] [RFC 8174][]
when, and only when, they appear in all capitals, as shown here. when, and only when, they appear in all capitals, as shown here.
Bit numbering in chunk diagrams starts at `0` for the most significant bit
('MSB 0') as described in [RFC 1166][].
Terminology & Basics
--------------------
A WebP file contains either a still image (that is, an encoded matrix of pixels) A WebP file contains either a still image (that is, an encoded matrix of pixels)
or an [animation](#animation). Optionally, it can also contain transparency or an [animation](#animation). Optionally, it can also contain transparency
information, a color profile and metadata. We refer to the matrix of pixels as information, color profile and metadata. We refer to the matrix of pixels as the
the _canvas_ of the image. _canvas_ of the image.
Bit numbering in chunk diagrams starts at `0` for the most significant bit
('MSB 0'), as described in [RFC 1166][].
Below are additional terms used throughout this document: Below are additional terms used throughout this document:
@ -84,7 +83,7 @@ _uint32_
_FourCC_ _FourCC_
: A four-character code (FourCC) is a _uint32_ created by concatenating four : A _FourCC_ (four-character code) is a _uint32_ created by concatenating four
ASCII characters in little-endian order. This means 'aaaa' (0x61616161) and ASCII characters in little-endian order. This means 'aaaa' (0x61616161) and
'AAAA' (0x41414141) are treated as different _FourCCs_. 'AAAA' (0x41414141) are treated as different _FourCCs_.
@ -95,8 +94,9 @@ _1-based_
_ChunkHeader('ABCD')_ _ChunkHeader('ABCD')_
: Used to describe the _FourCC_ and _Chunk Size_ header of individual chunks, : This is used to describe the _FourCC_ and _Chunk Size_ header of individual
where 'ABCD' is the FourCC for the chunk. This element's size is 8 bytes. chunks, where 'ABCD' is the FourCC for the chunk. This element's size is 8
bytes.
RIFF File Format RIFF File Format
@ -124,14 +124,14 @@ Chunk FourCC: 32 bits
Chunk Size: 32 bits (_uint32_) Chunk Size: 32 bits (_uint32_)
: The size of the chunk in bytes, not including this field, the chunk : The size of the chunk in bytes, not including this field, the chunk
identifier, or padding. identifier or padding.
Chunk Payload: _Chunk Size_ bytes Chunk Payload: _Chunk Size_ bytes
: The data payload. If _Chunk Size_ is odd, a single padding byte -- which MUST : The data payload. If _Chunk Size_ is odd, a single padding byte -- that MUST
be `0` to conform with RIFF -- is added. be `0` to conform with RIFF -- is added.
**Note**: RIFF has a convention that all-uppercase chunk FourCCs are standard **Note:** RIFF has a convention that all-uppercase chunk FourCCs are standard
chunks that apply to any RIFF file format, while FourCCs specific to a file chunks that apply to any RIFF file format, while FourCCs specific to a file
format are all lowercase. WebP does not follow this convention. format are all lowercase. WebP does not follow this convention.
@ -151,17 +151,17 @@ WebP File Header
'RIFF': 32 bits 'RIFF': 32 bits
: The ASCII characters 'R', 'I', 'F', 'F'. : The ASCII characters 'R' 'I' 'F' 'F'.
File Size: 32 bits (_uint32_) File Size: 32 bits (_uint32_)
: The size of the file in bytes, starting at offset 8. The maximum value of : The size of the file in bytes starting at offset 8. The maximum value of
this field is 2^32 minus 10 bytes and thus the size of the whole file is at this field is 2^32 minus 10 bytes and thus the size of the whole file is at
most 4 GiB minus 2 bytes. most 4GiB minus 2 bytes.
'WEBP': 32 bits 'WEBP': 32 bits
: The ASCII characters 'W', 'E', 'B', 'P'. : The ASCII characters 'W' 'E' 'B' 'P'.
A WebP file MUST begin with a RIFF header with the FourCC 'WEBP'. The file size A WebP file MUST begin with a RIFF header with the FourCC 'WEBP'. The file size
in the header is the total size of the chunks that follow plus `4` bytes for in the header is the total size of the chunks that follow plus `4` bytes for
@ -188,10 +188,10 @@ Simple WebP (lossy) file format:
| WebP file header (12 bytes) | | WebP file header (12 bytes) |
| | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
: 'VP8 ' Chunk : : VP8 chunk :
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
'VP8 ' Chunk: VP8 chunk:
0 1 2 3 0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -206,21 +206,21 @@ VP8 data: _Chunk Size_ bytes
: VP8 bitstream data. : VP8 bitstream data.
Note that the fourth character in the 'VP8 ' FourCC is an ASCII space (0x20). Note the fourth character in the 'VP8 ' FourCC is an ASCII space (0x20).
The VP8 bitstream format specification is described in [VP8 Data Format and The VP8 bitstream format specification can be found at [VP8 Data Format and
Decoding Guide][rfc 6386]. Note that the VP8 frame header contains the VP8 frame Decoding Guide][vp8spec]. Note that the VP8 frame header contains the VP8 frame
width and height. That is assumed to be the width and height of the canvas. width and height. That is assumed to be the width and height of the canvas.
The VP8 specification describes how to decode the image into Y'CbCr format. To The VP8 specification describes how to decode the image into Y'CbCr format. To
convert to RGB, [Recommendation BT.601][rec601] SHOULD be used. Applications MAY convert to RGB, Rec. 601 SHOULD be used. Applications MAY use another
use another conversion method, but visual results may differ among decoders. conversion method, but visual results may differ among decoders.
Simple File Format (Lossless) Simple File Format (Lossless)
----------------------------- -----------------------------
**Note**: Older readers may not support files using the lossless format. **Note:** Older readers may not support files using the lossless format.
This layout SHOULD be used if the image requires _lossless_ encoding (with an This layout SHOULD be used if the image requires _lossless_ encoding (with an
optional transparency channel) and does not require advanced features provided optional transparency channel) and does not require advanced features provided
@ -235,10 +235,10 @@ Simple WebP (lossless) file format:
| WebP file header (12 bytes) | | WebP file header (12 bytes) |
| | | |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
: 'VP8L' Chunk : : VP8L chunk :
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
'VP8L' Chunk: VP8L chunk:
0 1 2 3 0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -262,21 +262,21 @@ and height of the canvas.
Extended File Format Extended File Format
-------------------- --------------------
**Note**: Older readers may not support files using the extended format. **Note:** Older readers may not support files using the extended format.
An extended format file consists of: An extended format file consists of:
* A 'VP8X' Chunk with information about features used in the file. * A 'VP8X' chunk with information about features used in the file.
* An optional 'ICCP' Chunk with a color profile. * An optional 'ICCP' chunk with color profile.
* An optional 'ANIM' Chunk with animation control data. * An optional 'ANIM' chunk with animation control data.
* Image data. * Image data.
* An optional 'EXIF' Chunk with Exif metadata. * An optional 'EXIF' chunk with Exif metadata.
* An optional 'XMP ' Chunk with XMP metadata. * An optional 'XMP ' chunk with XMP metadata.
* An optional list of [unknown chunks](#unknown-chunks). * An optional list of [unknown chunks](#unknown-chunks).
@ -290,18 +290,15 @@ up of:
For an _animated image_, the _image data_ consists of multiple frames. More For an _animated image_, the _image data_ consists of multiple frames. More
details about frames can be found in the [Animation](#animation) section. details about frames can be found in the [Animation](#animation) section.
All chunks necessary for reconstruction and color correction, that is, 'VP8X', All chunks SHOULD be placed in the same order as listed above. If a chunk
'ICCP', 'ANIM', 'ANMF', 'ALPH', 'VP8 ', and 'VP8L', MUST appear in the order appears in the wrong place, the file is invalid, but readers MAY parse the
described earlier. Readers SHOULD fail when chunks necessary for reconstruction file, ignoring the chunks that are out of order.
and color correction are out of order.
[Metadata](#metadata) and [unknown chunks](#unknown-chunks) MAY appear out of **Rationale:** Setting the order of chunks should allow quicker file
order. parsing. For example, if an 'ALPH' chunk does not appear in its required
position, a decoder can choose to stop searching for it. The rule of
**Rationale:** The chunks necessary for reconstruction should appear first in ignoring late chunks should make programs that need to do a full search
the file to allow a reader to begin decoding an image before receiving all of give the same results as the ones stopping early.
the data. An application may benefit from varying the order of metadata and
custom chunks to suit the implementation.
Extended WebP file header: Extended WebP file header:
{:#extended_header} {:#extended_header}
@ -329,7 +326,7 @@ Reserved (Rsv): 2 bits
ICC profile (I): 1 bit ICC profile (I): 1 bit
: Set if the file contains an 'ICCP' Chunk. : Set if the file contains an ICC profile.
Alpha (L): 1 bit Alpha (L): 1 bit
@ -346,7 +343,7 @@ XMP metadata (X): 1 bit
Animation (A): 1 bit Animation (A): 1 bit
: Set if this is an animated image. Data in 'ANIM' and 'ANMF' Chunks should be : Set if this is an animated image. Data in 'ANIM' and 'ANMF' chunks should be
used to control the animation. used to control the animation.
Reserved (R): 1 bit Reserved (R): 1 bit
@ -375,9 +372,9 @@ Future specifications may add more fields. Unknown fields MUST be ignored.
#### Animation #### Animation
An animation is controlled by 'ANIM' and 'ANMF' Chunks. An animation is controlled by ANIM and ANMF chunks.
'ANIM' Chunk: ANIM Chunk:
{:#anim_chunk} {:#anim_chunk}
For an animated image, this chunk contains the _global parameters_ of the For an animated image, this chunk contains the _global parameters_ of the
@ -399,14 +396,14 @@ Background Color: 32 bits (_uint32_)
: The default background color of the canvas in \[Blue, Green, Red, Alpha\] : The default background color of the canvas in \[Blue, Green, Red, Alpha\]
byte order. This color MAY be used to fill the unused space on the canvas byte order. This color MAY be used to fill the unused space on the canvas
around the frames, as well as the transparent pixels of the first frame. around the frames, as well as the transparent pixels of the first frame.
The background color is also used when the Disposal method is `1`. Background color is also used when disposal method is `1`.
**Notes**: **Note**:
* The background color MAY contain a non-opaque alpha value, even if the * Background color MAY contain a non-opaque alpha value, even if the _Alpha_
_Alpha_ flag in the ['VP8X' Chunk](#extended_header) is unset. flag in [VP8X chunk](#extended_header) is unset.
* Viewer applications SHOULD treat the background color value as a hint and * Viewer applications SHOULD treat the background color value as a hint, and
are not required to use it. are not required to use it.
* The canvas is cleared at the start of each loop. The background color MAY be * The canvas is cleared at the start of each loop. The background color MAY be
@ -414,14 +411,13 @@ Background Color: 32 bits (_uint32_)
Loop Count: 16 bits (_uint16_) Loop Count: 16 bits (_uint16_)
: The number of times to loop the animation. If it is `0`, this means : The number of times to loop the animation. `0` means infinitely.
infinitely.
This chunk MUST appear if the _Animation_ flag in the 'VP8X' Chunk is set. This chunk MUST appear if the _Animation_ flag in the VP8X chunk is set.
If the _Animation_ flag is not set and this chunk is present, it MUST be If the _Animation_ flag is not set and this chunk is present, it MUST be
ignored. ignored.
'ANMF' Chunk: ANMF chunk:
For animated images, this chunk contains information about a _single_ frame. For animated images, this chunk contains information about a _single_ frame.
If the _Animation flag_ is not set, then this chunk SHOULD NOT be present. If the _Animation flag_ is not set, then this chunk SHOULD NOT be present.
@ -463,10 +459,10 @@ Frame Height Minus One: 24 bits (_uint24_)
Frame Duration: 24 bits (_uint24_) Frame Duration: 24 bits (_uint24_)
: The time to wait before displaying the next frame, in 1-millisecond units. : The time to wait before displaying the next frame, in 1 millisecond units.
Note that the interpretation of the Frame Duration of 0 (and often <= 10) is Note the interpretation of frame duration of 0 (and often <= 10) is
defined by the implementation. Many tools and browsers assign a minimum implementation defined. Many tools and browsers assign a minimum duration
duration similar to GIF. similar to GIF.
Reserved: 6 bits Reserved: 6 bits
@ -477,10 +473,10 @@ Blending method (B): 1 bit
: Indicates how transparent pixels of _the current frame_ are to be blended : Indicates how transparent pixels of _the current frame_ are to be blended
with corresponding pixels of the previous canvas: with corresponding pixels of the previous canvas:
* `0`: Use alpha-blending. After disposing of the previous frame, render the * `0`: Use alpha blending. After disposing of the previous frame, render the
current frame on the canvas using [alpha-blending](#alpha-blending). If current frame on the canvas using [alpha-blending](#alpha-blending). If
the current frame does not have an alpha channel, assume the alpha value the current frame does not have an alpha channel, assume alpha value of
is 255, effectively replacing the rectangle. 255, effectively replacing the rectangle.
* `1`: Do not blend. After disposing of the previous frame, render the * `1`: Do not blend. After disposing of the previous frame, render the
current frame on the canvas by overwriting the rectangle covered by the current frame on the canvas by overwriting the rectangle covered by the
@ -493,20 +489,20 @@ Disposal method (D): 1 bit
* `0`: Do not dispose. Leave the canvas as is. * `0`: Do not dispose. Leave the canvas as is.
* `1`: Dispose to the background color. Fill the _rectangle_ on the canvas * `1`: Dispose to background color. Fill the _rectangle_ on the canvas
covered by the _current frame_ with the background color specified in the covered by the _current frame_ with background color specified in the
['ANIM' Chunk](#anim_chunk). [ANIM chunk](#anim_chunk).
**Notes**: **Notes**:
* The frame disposal only applies to the _frame rectangle_, that is, the * The frame disposal only applies to the _frame rectangle_, that is, the
rectangle defined by _Frame X_, _Frame Y_, _frame width_, and _frame rectangle defined by _Frame X_, _Frame Y_, _frame width_ and _frame height_.
height_. It may or may not cover the whole canvas. It may or may not cover the whole canvas.
{:#alpha-blending} {:#alpha-blending}
* Alpha-blending: * **Alpha-blending**:
Given that each of the R, G, B, and A channels is 8 bits, and the RGB Given that each of the R, G, B and A channels is 8-bit, and the RGB
channels are _not premultiplied_ by alpha, the formula for blending channels are _not premultiplied_ by alpha, the formula for blending
'dst' onto 'src' is: 'dst' onto 'src' is:
@ -522,10 +518,10 @@ Disposal method (D): 1 bit
* Alpha-blending SHOULD be done in linear color space, by taking into account * Alpha-blending SHOULD be done in linear color space, by taking into account
the [color profile](#color-profile) of the image. If the color profile is the [color profile](#color-profile) of the image. If the color profile is
not present, standard RGB (sRGB) is to be assumed. (Note that sRGB also not present, sRGB is to be assumed. (Note that sRGB also needs to be
needs to be linearized due to a gamma of ~2.2.) linearized due to a gamma of ~2.2).
Frame Data: _Chunk Size_ bytes - `16` Frame Data: _Chunk Size_ - `16` bytes
: Consists of: : Consists of:
@ -535,8 +531,8 @@ Frame Data: _Chunk Size_ bytes - `16`
* An optional list of [unknown chunks](#unknown-chunks). * An optional list of [unknown chunks](#unknown-chunks).
**Note**: The 'ANMF' payload, _Frame Data_, consists of individual **Note**: The 'ANMF' payload, _Frame Data_ above, consists of individual
_padded_ chunks, as described by the [RIFF file format](#riff-file-format). _padded_ chunks as described by the [RIFF file format](#riff-file-format).
#### Alpha #### Alpha
@ -553,20 +549,18 @@ Reserved (Rsv): 2 bits
: MUST be `0`. Readers MUST ignore this field. : MUST be `0`. Readers MUST ignore this field.
Preprocessing (P): 2 bits Pre-processing (P): 2 bits
: These _informative_ bits are used to signal the preprocessing that has : These _informative_ bits are used to signal the pre-processing that has
been performed during compression. The decoder can use this information to been performed during compression. The decoder can use this information to
for example, dither the values or smooth the gradients prior to display. for example, dither the values or smooth the gradients prior to display.
* `0`: No preprocessing. * `0`: No pre-processing.
* `1`: Level reduction. * `1`: Level reduction.
Decoders are not required to use this information in any specified way.
Filtering method (F): 2 bits Filtering method (F): 2 bits
: The filtering methods used are described as follows: : The filtering method used:
* `0`: None. * `0`: None.
* `1`: Horizontal filter. * `1`: Horizontal filter.
@ -590,8 +584,8 @@ made depending on the filtering method:
where `clip(v)` is equal to: where `clip(v)` is equal to:
* 0 if v < 0, * 0 if v < 0
* 255 if v > 255, or * 255 if v > 255
* v otherwise * v otherwise
The final value is derived by adding the decompressed value `X` to the The final value is derived by adding the decompressed value `X` to the
@ -600,15 +594,17 @@ into the \[0..255\] one:
`alpha = (predictor + X) % 256` `alpha = (predictor + X) % 256`
There are special cases for the left-most and top-most pixel positions. For There are special cases for the left-most and top-most pixel positions:
example, the top-left value at location (0, 0) uses 0 as the predictor value.
Otherwise:
* The top-left value at location (0, 0) uses 0 as predictor value. Otherwise,
* For horizontal or gradient filtering methods, the left-most pixels at * For horizontal or gradient filtering methods, the left-most pixels at
location (0, y) are predicted using the location (0, y-1) just above. location (0, y) are predicted using the location (0, y-1) just above.
* For vertical or gradient filtering methods, the top-most pixels at * For vertical or gradient filtering methods, the top-most pixels at
location (x, 0) are predicted using the location (x-1, 0) on the left. location (x, 0) are predicted using the location (x-1, 0) on the left.
Decoders are not required to use this information in any specified way.
Compression method (C): 2 bits Compression method (C): 2 bits
: The compression method used: : The compression method used:
@ -616,37 +612,37 @@ Compression method (C): 2 bits
* `0`: No compression. * `0`: No compression.
* `1`: Compressed using the WebP lossless format. * `1`: Compressed using the WebP lossless format.
Alpha bitstream: _Chunk Size_ bytes - `1` Alpha bitstream: _Chunk Size_ - `1` bytes
: Encoded alpha bitstream. : Encoded alpha bitstream.
This optional chunk contains encoded alpha data for this frame. A frame This optional chunk contains encoded alpha data for this frame. A frame
containing a 'VP8L' Chunk SHOULD NOT contain this chunk. containing a 'VP8L' chunk SHOULD NOT contain this chunk.
**Rationale**: The transparency information is already part of the 'VP8L' **Rationale**: The transparency information is already part of the 'VP8L'
Chunk. chunk.
The alpha channel data is stored as uncompressed raw data (when the The alpha channel data is stored as uncompressed raw data (when
compression method is '0') or compressed using the lossless format compression method is '0') or compressed using the lossless format
(when the compression method is '1'). (when the compression method is '1').
* Raw data: This consists of a byte sequence of length = width * height, * Raw data: consists of a byte sequence of length width * height,
containing all the 8-bit transparency values in scan order. containing all the 8-bit transparency values in scan order.
* Lossless format compression: The byte sequence is a compressed * Lossless format compression: the byte sequence is a compressed
image-stream (as described in ["WebP Lossless Bitstream Format"] image-stream (as described in the [WebP Lossless Bitstream Format]
[webpllspec]) of implicit dimensions width x height. That is, this [webpllspec]) of implicit dimension width x height. That is, this
image-stream does NOT contain any headers describing the image dimensions. image-stream does NOT contain any headers describing the image dimension.
**Rationale**: The dimensions are already known from other sources, **Rationale**: the dimension is already known from other sources,
so storing them again would be redundant and prone to error. so storing it again would be redundant and error-prone.
Once the image-stream is decoded into Alpha, Red, Green, Blue (ARGB) color Once the image-stream is decoded into ARGB color values, following
values, following the process described in the lossless format the process described in the lossless format specification, the
specification, the transparency information must be extracted from the transparency information must be extracted from the *green* channel
*green* channel of the ARGB quadruplet. of the ARGB quadruplet.
**Rationale**: The green channel is allowed extra transformation **Rationale**: the green channel is allowed extra transformation
steps in the specification -- unlike the other channels -- that can steps in the specification -- unlike the other channels -- that can
improve compression. improve compression.
@ -654,13 +650,13 @@ compression method is '0') or compressed using the lossless format
This chunk contains compressed bitstream data for a single frame. This chunk contains compressed bitstream data for a single frame.
A bitstream chunk may be either (i) a 'VP8 ' Chunk, using 'VP8 ' (note the A bitstream chunk may be either (i) a VP8 chunk, using "VP8 " (note the
significant fourth-character space) as its FourCC, _or_ (ii) a 'VP8L' Chunk, significant fourth-character space) as its tag _or_ (ii) a VP8L chunk, using
using 'VP8L' as its FourCC. "VP8L" as its tag.
The formats of 'VP8 ' and 'VP8L' Chunks are as described in sections The formats of VP8 and VP8L chunks are as described in sections
[Simple File Format (Lossy)](#simple-file-format-lossy) [Simple File Format (Lossy)](#simple-file-format-lossy)
and [Simple File Format (Lossless)](#simple-file-format-lossless), respectively. and [Simple File Format (Lossless)](#simple-file-format-lossless) respectively.
#### Color Profile #### Color Profile
@ -687,14 +683,14 @@ If this chunk is not present, sRGB SHOULD be assumed.
#### Metadata #### Metadata
Metadata can be stored in 'EXIF' or 'XMP ' Chunks. Metadata can be stored in 'EXIF' or 'XMP ' chunks.
There SHOULD be at most one chunk of each type ('EXIF' and 'XMP '). If there There SHOULD be at most one chunk of each type ('EXIF' and 'XMP '). If there
are more such chunks, readers MAY ignore all except the first one. are more such chunks, readers MAY ignore all except the first one.
The chunks are defined as follows: The chunks are defined as follows:
'EXIF' Chunk: EXIF chunk:
0 1 2 3 0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -709,7 +705,7 @@ Exif Metadata: _Chunk Size_ bytes
: Image metadata in Exif format. : Image metadata in Exif format.
'XMP ' Chunk: XMP chunk:
0 1 2 3 0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@ -724,73 +720,72 @@ XMP Metadata: _Chunk Size_ bytes
: Image metadata in XMP format. : Image metadata in XMP format.
Note that the fourth character in the 'XMP ' FourCC is an ASCII space (0x20). Note the fourth character in the 'XMP ' FourCC is an ASCII space (0x20).
Additional guidance about handling metadata can be found in the Additional guidance about handling metadata can be found in the
Metadata Working Group's ["Guidelines for Handling Metadata"][metadata]. Metadata Working Group's [Guidelines for Handling Metadata][metadata].
#### Unknown Chunks #### Unknown Chunks
A RIFF chunk (described in the [RIFF File Format](#riff-file-format) section) A RIFF chunk (described in [this](#terminology-amp-basics) section) whose _chunk
whose FourCC is different from any of the chunks described in this document, is tag_ is different from any of the chunks described in this document, is
considered an _unknown chunk_. considered an _unknown chunk_.
**Rationale**: Allowing unknown chunks gives a provision for future extension **Rationale**: Allowing unknown chunks gives a provision for future extension
of the format and also allows storage of any application-specific data. of the format, and also allows storage of any application-specific data.
A file MAY contain unknown chunks: A file MAY contain unknown chunks:
* at the end of the file, as described in [Extended WebP file * At the end of the file as described in [Extended WebP file
header](#extended_header) section, or header](#extended_header) section.
* at the end of 'ANMF' Chunks, as described in the * At the end of ANMF chunks as described in the
[Animation](#animation) section. [Animation](#animation) section.
Readers SHOULD ignore these chunks. Writers SHOULD preserve them in their Readers SHOULD ignore these chunks. Writers SHOULD preserve them in their
original order (unless they specifically intend to modify these chunks). original order (unless they specifically intend to modify these chunks).
### Canvas Assembly from Frames ### Assembling the Canvas From Frames
Here we provide an overview of how a reader MUST assemble a canvas in the case Here we provide an overview of how a reader MUST assemble a canvas in the case
of an animated image. of an animated image.
The process begins with creating a canvas using the dimensions given in the The process begins with creating a canvas using the dimensions given in the
'VP8X' Chunk, `Canvas Width Minus One + 1` pixels wide by `Canvas Height Minus 'VP8X' chunk, `Canvas Width Minus One + 1` pixels wide by `Canvas Height Minus
One + 1` pixels high. The `Loop Count` field from the 'ANIM' Chunk controls how One + 1` pixels high. The `Loop Count` field from the 'ANIM' chunk controls how
many times the animation process is repeated. This is `Loop Count - 1` for many times the animation process is repeated. This is `Loop Count - 1` for
nonzero `Loop Count` values or infinite if the `Loop Count` is zero. non-zero `Loop Count` values or infinitely if `Loop Count` is zero.
At the beginning of each loop iteration, the canvas is filled using the At the beginning of each loop iteration the canvas is filled using the
background color from the 'ANIM' Chunk or an application-defined color. background color from the 'ANIM' chunk or an application defined color.
'ANMF' Chunks contain individual frames given in display order. Before rendering 'ANMF' chunks contain individual frames given in display order. Before rendering
each frame, the previous frame's `Disposal method` is applied. each frame, the previous frame's `Disposal method` is applied.
The rendering of the decoded frame begins at the Cartesian coordinates (`2 * The rendering of the decoded frame begins at the Cartesian coordinates (`2 *
Frame X`, `2 * Frame Y`), using the top-left corner of the canvas as the origin. Frame X`, `2 * Frame Y`) using the top-left corner of the canvas as the origin.
`Frame Width Minus One + 1` pixels wide by `Frame Height Minus One + 1` pixels `Frame Width Minus One + 1` pixels wide by `Frame Height Minus One + 1` pixels
high are rendered onto the canvas using the `Blending method`. high are rendered onto the canvas using the `Blending method`.
The canvas is displayed for `Frame Duration` milliseconds. This continues until The canvas is displayed for `Frame Duration` milliseconds. This continues until
all frames given by 'ANMF' Chunks have been displayed. A new loop iteration is all frames given by 'ANMF' chunks have been displayed. A new loop iteration is
then begun, or the canvas is left in its final state if all iterations have been then begun or the canvas is left in its final state if all iterations have been
completed. completed.
The following pseudocode illustrates the rendering process. The notation The following pseudocode illustrates the rendering process. The notation
_VP8X.field_ means the field in the 'VP8X' Chunk with the same description. _VP8X.field_ means the field in the 'VP8X' chunk with the same description.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
VP8X.flags.hasAnimation MUST be TRUE assert VP8X.flags.hasAnimation
canvas ← new image of size VP8X.canvasWidth x VP8X.canvasHeight with canvas ← new image of size VP8X.canvasWidth x VP8X.canvasHeight with
background color ANIM.background_color or background color ANIM.background_color.
application-defined color.
loop_count ← ANIM.loopCount loop_count ← ANIM.loopCount
dispose_method ← Dispose to background color dispose_method ← Dispose to background color
if loop_count == 0: if loop_count == 0:
loop_count = ∞ loop_count = ∞
frame_params ← nil frame_params ← nil
next chunk in image_data is ANMF MUST be TRUE assert next chunk in image_data is ANMF
for loop = 0..loop_count - 1 for loop = 0..loop_count - 1
clear canvas to ANIM.background_color or application-defined color clear canvas to ANIM.background_color or application defined color
until eof or non-ANMF chunk until eof or non-ANMF chunk
frame_params.frameX = Frame X frame_params.frameX = Frame X
frame_params.frameY = Frame Y frame_params.frameY = Frame Y
@ -799,25 +794,22 @@ for loop = 0..loop_count - 1
frame_params.frameDuration = Frame Duration frame_params.frameDuration = Frame Duration
frame_right = frame_params.frameX + frame_params.frameWidth frame_right = frame_params.frameX + frame_params.frameWidth
frame_bottom = frame_params.frameY + frame_params.frameHeight frame_bottom = frame_params.frameY + frame_params.frameHeight
VP8X.canvasWidth >= frame_right MUST be TRUE assert VP8X.canvasWidth >= frame_right
VP8X.canvasHeight >= frame_bottom MUST be TRUE assert VP8X.canvasHeight >= frame_bottom
for subchunk in 'Frame Data': for subchunk in 'Frame Data':
if subchunk.tag == "ALPH": if subchunk.tag == "ALPH":
alpha subchunks not found in 'Frame Data' earlier MUST be assert alpha subchunks not found in 'Frame Data' earlier
TRUE
frame_params.alpha = alpha_data frame_params.alpha = alpha_data
else if subchunk.tag == "VP8 " OR subchunk.tag == "VP8L": else if subchunk.tag == "VP8 " OR subchunk.tag == "VP8L":
bitstream subchunks not found in 'Frame Data' earlier MUST assert bitstream subchunks not found in 'Frame Data' earlier
be TRUE
frame_params.bitstream = bitstream_data frame_params.bitstream = bitstream_data
apply dispose_method.
render frame with frame_params.alpha and frame_params.bitstream render frame with frame_params.alpha and frame_params.bitstream
on canvas with top-left corner at (frame_params.frameX, on canvas with top-left corner at (frame_params.frameX,
frame_params.frameY), using Blending method frame_params.frameY), using blending method
frame_params.blendingMethod. frame_params.blendingMethod.
canvas contains the decoded image. canvas contains the decoded image.
Show the contents of the canvas for Show the contents of the canvas for
frame_params.frameDuration * 1 ms. frame_params.frameDuration * 1ms.
dispose_method = frame_params.disposeMethod dispose_method = frame_params.disposeMethod
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -825,7 +817,7 @@ for loop = 0..loop_count - 1
Example File Layouts Example File Layouts
-------------------- --------------------
A lossy-encoded image with alpha may look as follows: A lossy encoded image with alpha may look as follows:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RIFF/WEBP RIFF/WEBP
@ -834,16 +826,16 @@ RIFF/WEBP
+- VP8 (bitstream) +- VP8 (bitstream)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A lossless-encoded image may look as follows: A losslessly encoded image may look as follows:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RIFF/WEBP RIFF/WEBP
+- VP8X (descriptions of features used) +- VP8X (descriptions of features used)
+- VP8L (lossless bitstream)
+- XYZW (unknown chunk) +- XYZW (unknown chunk)
+- VP8L (lossless bitstream)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A lossless image with an ICC profile and XMP metadata may A lossless image with ICC profile and XMP metadata may
look as follows: look as follows:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -867,11 +859,10 @@ RIFF/WEBP
+- EXIF (metadata) +- EXIF (metadata)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[vp8spec]: https://datatracker.ietf.org/doc/html/rfc6386
[webpllspec]: https://chromium.googlesource.com/webm/libwebp/+/HEAD/doc/webp-lossless-bitstream-spec.txt [webpllspec]: https://chromium.googlesource.com/webm/libwebp/+/HEAD/doc/webp-lossless-bitstream-spec.txt
[iccspec]: https://www.color.org/icc_specs2.xalter [iccspec]: https://www.color.org/icc_specs2.xalter
[metadata]: https://web.archive.org/web/20180919181934/http://www.metadataworkinggroup.org/pdf/mwg_guidance.pdf [metadata]: https://web.archive.org/web/20180919181934/http://www.metadataworkinggroup.org/pdf/mwg_guidance.pdf
[rec601]: https://www.itu.int/rec/R-REC-BT.601
[rfc 1166]: https://datatracker.ietf.org/doc/html/rfc1166 [rfc 1166]: https://datatracker.ietf.org/doc/html/rfc1166
[rfc 2119]: https://datatracker.ietf.org/doc/html/rfc2119 [rfc 2119]: https://datatracker.ietf.org/doc/html/rfc2119
[rfc 6386]: https://datatracker.ietf.org/doc/html/rfc6386
[rfc 8174]: https://datatracker.ietf.org/doc/html/rfc8174 [rfc 8174]: https://datatracker.ietf.org/doc/html/rfc8174

File diff suppressed because it is too large Load Diff

View File

@ -67,7 +67,7 @@ dwebp_LDADD += ../src/libwebp.la
dwebp_LDADD +=$(PNG_LIBS) $(JPEG_LIBS) dwebp_LDADD +=$(PNG_LIBS) $(JPEG_LIBS)
gif2webp_SOURCES = gif2webp.c gifdec.c gifdec.h gif2webp_SOURCES = gif2webp.c gifdec.c gifdec.h
gif2webp_CPPFLAGS = $(AM_CPPFLAGS) $(GIF_INCLUDES) -I$(top_srcdir) gif2webp_CPPFLAGS = $(AM_CPPFLAGS) $(GIF_INCLUDES)
gif2webp_LDADD = gif2webp_LDADD =
gif2webp_LDADD += libexample_util.la gif2webp_LDADD += libexample_util.la
gif2webp_LDADD += ../imageio/libimageio_util.la gif2webp_LDADD += ../imageio/libimageio_util.la

View File

@ -16,7 +16,7 @@
#include <assert.h> #include <assert.h>
#include <limits.h> #include <limits.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h> // for 'strtod'.
#include <string.h> // for 'strcmp'. #include <string.h> // for 'strcmp'.
#include "./anim_util.h" #include "./anim_util.h"
@ -206,9 +206,8 @@ static void Help(void) {
printf(" -version ............ print version number and exit\n"); printf(" -version ............ print version number and exit\n");
} }
// Returns 0 on success, 1 if animation files differ, and 2 for any error.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int return_code = 2; int return_code = -1;
int dump_frames = 0; int dump_frames = 0;
const char* dump_folder = NULL; const char* dump_folder = NULL;
double min_psnr = 0.; double min_psnr = 0.;
@ -270,18 +269,18 @@ int main(int argc, const char* argv[]) {
} }
if (parse_error) { if (parse_error) {
Help(); Help();
FREE_WARGV_AND_RETURN(return_code); FREE_WARGV_AND_RETURN(-1);
} }
} }
if (argc < 3) { if (argc < 3) {
Help(); Help();
FREE_WARGV_AND_RETURN(return_code); FREE_WARGV_AND_RETURN(-1);
} }
if (!got_input2) { if (!got_input2) {
Help(); Help();
FREE_WARGV_AND_RETURN(return_code); FREE_WARGV_AND_RETURN(-1);
} }
if (dump_frames) { if (dump_frames) {
@ -294,7 +293,7 @@ int main(int argc, const char* argv[]) {
if (!ReadAnimatedImage(files[i], &images[i], dump_frames, dump_folder)) { if (!ReadAnimatedImage(files[i], &images[i], dump_frames, dump_folder)) {
WFPRINTF(stderr, "Error decoding file: %s\n Aborting.\n", WFPRINTF(stderr, "Error decoding file: %s\n Aborting.\n",
(const W_CHAR*)files[i]); (const W_CHAR*)files[i]);
return_code = 2; return_code = -2;
goto End; goto End;
} else { } else {
MinimizeAnimationFrames(&images[i], max_diff); MinimizeAnimationFrames(&images[i], max_diff);
@ -305,7 +304,7 @@ int main(int argc, const char* argv[]) {
premultiply, min_psnr)) { premultiply, min_psnr)) {
WFPRINTF(stderr, "\nFiles %s and %s differ.\n", (const W_CHAR*)files[0], WFPRINTF(stderr, "\nFiles %s and %s differ.\n", (const W_CHAR*)files[0],
(const W_CHAR*)files[1]); (const W_CHAR*)files[1]);
return_code = 1; return_code = -3;
} else { } else {
WPRINTF("\nFiles %s and %s are identical.\n", (const W_CHAR*)files[0], WPRINTF("\nFiles %s and %s are identical.\n", (const W_CHAR*)files[0],
(const W_CHAR*)files[1]); (const W_CHAR*)files[1]);

View File

@ -12,7 +12,6 @@
// Author: Skal (pascal.massimino@gmail.com) // Author: Skal (pascal.massimino@gmail.com)
#include <stdio.h> #include <stdio.h>
#include <stdlib.h>
#include <string.h> // for 'strcmp'. #include <string.h> // for 'strcmp'.
#include "./anim_util.h" #include "./anim_util.h"
@ -36,7 +35,6 @@ static void Help(void) {
printf(" -version ............ print version number and exit\n"); printf(" -version ............ print version number and exit\n");
} }
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int error = 0; int error = 0;
const W_CHAR* dump_folder = TO_W_CHAR("."); const W_CHAR* dump_folder = TO_W_CHAR(".");
@ -49,7 +47,7 @@ int main(int argc, const char* argv[]) {
if (argc < 2) { if (argc < 2) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
for (c = 1; !error && c < argc; ++c) { for (c = 1; !error && c < argc; ++c) {
@ -75,7 +73,7 @@ int main(int argc, const char* argv[]) {
suffix = TO_W_CHAR("pam"); suffix = TO_W_CHAR("pam");
} else if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) { } else if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-version")) { } else if (!strcmp(argv[c], "-version")) {
int dec_version, demux_version; int dec_version, demux_version;
GetAnimatedImageVersions(&dec_version, &demux_version); GetAnimatedImageVersions(&dec_version, &demux_version);
@ -84,7 +82,7 @@ int main(int argc, const char* argv[]) {
(dec_version >> 0) & 0xff, (dec_version >> 0) & 0xff,
(demux_version >> 16) & 0xff, (demux_version >> 8) & 0xff, (demux_version >> 16) & 0xff, (demux_version >> 8) & 0xff,
(demux_version >> 0) & 0xff); (demux_version >> 0) & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else { } else {
uint32_t i; uint32_t i;
AnimatedImage image; AnimatedImage image;
@ -100,11 +98,7 @@ int main(int argc, const char* argv[]) {
for (i = 0; !error && i < image.num_frames; ++i) { for (i = 0; !error && i < image.num_frames; ++i) {
W_CHAR out_file[1024]; W_CHAR out_file[1024];
WebPDecBuffer buffer; WebPDecBuffer buffer;
if (!WebPInitDecBuffer(&buffer)) { WebPInitDecBuffer(&buffer);
fprintf(stderr, "Cannot init dec buffer\n");
error = 1;
continue;
}
buffer.colorspace = MODE_RGBA; buffer.colorspace = MODE_RGBA;
buffer.is_external_memory = 1; buffer.is_external_memory = 1;
buffer.width = image.canvas_width; buffer.width = image.canvas_width;
@ -123,5 +117,5 @@ int main(int argc, const char* argv[]) {
ClearAnimatedImage(&image); ClearAnimatedImage(&image);
} }
} }
FREE_WARGV_AND_RETURN(error ? EXIT_FAILURE : EXIT_SUCCESS); FREE_WARGV_AND_RETURN(error ? 1 : 0);
} }

View File

@ -771,7 +771,6 @@ void GetDiffAndPSNR(const uint8_t rgba1[], const uint8_t rgba2[],
*psnr = 99.; // PSNR when images are identical. *psnr = 99.; // PSNR when images are identical.
} else { } else {
sse /= stride * height; sse /= stride * height;
assert(sse != 0.0);
*psnr = 4.3429448 * log(255. * 255. / sse); *psnr = 4.3429448 * log(255. * 255. / sse);
} }
} }

View File

@ -178,14 +178,8 @@ static void PrintFullLosslessInfo(const WebPAuxStats* const stats,
if (stats->lossless_features & 8) fprintf(stderr, " PALETTE"); if (stats->lossless_features & 8) fprintf(stderr, " PALETTE");
fprintf(stderr, "\n"); fprintf(stderr, "\n");
} }
fprintf(stderr, " * Precision Bits: histogram=%d", stats->histogram_bits); fprintf(stderr, " * Precision Bits: histogram=%d transform=%d cache=%d\n",
if (stats->lossless_features & 1) { stats->histogram_bits, stats->transform_bits, stats->cache_bits);
fprintf(stderr, " prediction=%d", stats->transform_bits);
}
if (stats->lossless_features & 2) {
fprintf(stderr, " cross-color=%d", stats->cross_color_transform_bits);
}
fprintf(stderr, " cache=%d\n", stats->cache_bits);
if (stats->palette_size > 0) { if (stats->palette_size > 0) {
fprintf(stderr, " * Palette size: %d\n", stats->palette_size); fprintf(stderr, " * Palette size: %d\n", stats->palette_size);
} }
@ -312,7 +306,6 @@ static int MyWriter(const uint8_t* data, size_t data_size,
// Dumps a picture as a PGM file using the IMC4 layout. // Dumps a picture as a PGM file using the IMC4 layout.
static int DumpPicture(const WebPPicture* const picture, const char* PGM_name) { static int DumpPicture(const WebPPicture* const picture, const char* PGM_name) {
int y; int y;
int ok = 0;
const int uv_width = (picture->width + 1) / 2; const int uv_width = (picture->width + 1) / 2;
const int uv_height = (picture->height + 1) / 2; const int uv_height = (picture->height + 1) / 2;
const int stride = (picture->width + 1) & ~1; const int stride = (picture->width + 1) & ~1;
@ -327,26 +320,23 @@ static int DumpPicture(const WebPPicture* const picture, const char* PGM_name) {
if (f == NULL) return 0; if (f == NULL) return 0;
fprintf(f, "P5\n%d %d\n255\n", stride, height); fprintf(f, "P5\n%d %d\n255\n", stride, height);
for (y = 0; y < picture->height; ++y) { for (y = 0; y < picture->height; ++y) {
if (fwrite(src_y, picture->width, 1, f) != 1) goto Error; if (fwrite(src_y, picture->width, 1, f) != 1) return 0;
if (picture->width & 1) fputc(0, f); // pad if (picture->width & 1) fputc(0, f); // pad
src_y += picture->y_stride; src_y += picture->y_stride;
} }
for (y = 0; y < uv_height; ++y) { for (y = 0; y < uv_height; ++y) {
if (fwrite(src_u, uv_width, 1, f) != 1) goto Error; if (fwrite(src_u, uv_width, 1, f) != 1) return 0;
if (fwrite(src_v, uv_width, 1, f) != 1) goto Error; if (fwrite(src_v, uv_width, 1, f) != 1) return 0;
src_u += picture->uv_stride; src_u += picture->uv_stride;
src_v += picture->uv_stride; src_v += picture->uv_stride;
} }
for (y = 0; y < alpha_height; ++y) { for (y = 0; y < alpha_height; ++y) {
if (fwrite(src_a, picture->width, 1, f) != 1) goto Error; if (fwrite(src_a, picture->width, 1, f) != 1) return 0;
if (picture->width & 1) fputc(0, f); // pad if (picture->width & 1) fputc(0, f); // pad
src_a += picture->a_stride; src_a += picture->a_stride;
} }
ok = 1;
Error:
fclose(f); fclose(f);
return ok; return 1;
} }
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
@ -657,9 +647,8 @@ static const char* const kErrorMessages[VP8_ENC_ERROR_LAST] = {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int return_value = EXIT_FAILURE; int return_value = -1;
const char* in_file = NULL, *out_file = NULL, *dump_file = NULL; const char* in_file = NULL, *out_file = NULL, *dump_file = NULL;
FILE* out = NULL; FILE* out = NULL;
int c; int c;
@ -693,22 +682,22 @@ int main(int argc, const char* argv[]) {
!WebPPictureInit(&original_picture) || !WebPPictureInit(&original_picture) ||
!WebPConfigInit(&config)) { !WebPConfigInit(&config)) {
fprintf(stderr, "Error! Version mismatch!\n"); fprintf(stderr, "Error! Version mismatch!\n");
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
if (argc == 1) { if (argc == 1) {
HelpShort(); HelpShort();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(0);
} }
for (c = 1; c < argc; ++c) { for (c = 1; c < argc; ++c) {
int parse_error = 0; int parse_error = 0;
if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) { if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
HelpShort(); HelpShort();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-H") || !strcmp(argv[c], "-longhelp")) { } else if (!strcmp(argv[c], "-H") || !strcmp(argv[c], "-longhelp")) {
HelpLong(); HelpLong();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-o") && c + 1 < argc) { } else if (!strcmp(argv[c], "-o") && c + 1 < argc) {
out_file = (const char*)GET_WARGV(argv, ++c); out_file = (const char*)GET_WARGV(argv, ++c);
} else if (!strcmp(argv[c], "-d") && c + 1 < argc) { } else if (!strcmp(argv[c], "-d") && c + 1 < argc) {
@ -849,7 +838,7 @@ int main(int argc, const char* argv[]) {
printf("libsharpyuv: %d.%d.%d\n", printf("libsharpyuv: %d.%d.%d\n",
(sharpyuv_version >> 24) & 0xff, (sharpyuv_version >> 16) & 0xffff, (sharpyuv_version >> 24) & 0xff, (sharpyuv_version >> 16) & 0xffff,
sharpyuv_version & 0xff); sharpyuv_version & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-progress")) { } else if (!strcmp(argv[c], "-progress")) {
show_progress = 1; show_progress = 1;
} else if (!strcmp(argv[c], "-quiet")) { } else if (!strcmp(argv[c], "-quiet")) {
@ -911,7 +900,7 @@ int main(int argc, const char* argv[]) {
if (i == kNumTokens) { if (i == kNumTokens) {
fprintf(stderr, "Error! Unknown metadata type '%.*s'\n", fprintf(stderr, "Error! Unknown metadata type '%.*s'\n",
(int)(token - start), start); (int)(token - start), start);
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
start = token + 1; start = token + 1;
} }
@ -930,14 +919,14 @@ int main(int argc, const char* argv[]) {
} else if (argv[c][0] == '-') { } else if (argv[c][0] == '-') {
fprintf(stderr, "Error! Unknown option '%s'\n", argv[c]); fprintf(stderr, "Error! Unknown option '%s'\n", argv[c]);
HelpLong(); HelpLong();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} else { } else {
in_file = (const char*)GET_WARGV(argv, c); in_file = (const char*)GET_WARGV(argv, c);
} }
if (parse_error) { if (parse_error) {
HelpLong(); HelpLong();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
} }
if (in_file == NULL) { if (in_file == NULL) {
@ -1238,7 +1227,7 @@ int main(int argc, const char* argv[]) {
PrintMetadataInfo(&metadata, metadata_written); PrintMetadataInfo(&metadata, metadata_written);
} }
} }
return_value = EXIT_SUCCESS; return_value = 0;
Error: Error:
WebPMemoryWriterClear(&memory_writer); WebPMemoryWriterClear(&memory_writer);

View File

@ -177,7 +177,6 @@ static uint8_t* AllocateExternalBuffer(WebPDecoderConfig* config,
return external_buffer; return external_buffer;
} }
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int ok = 0; int ok = 0;
const char* in_file = NULL; const char* in_file = NULL;
@ -198,14 +197,14 @@ int main(int argc, const char* argv[]) {
if (!WebPInitDecoderConfig(&config)) { if (!WebPInitDecoderConfig(&config)) {
fprintf(stderr, "Library version mismatch!\n"); fprintf(stderr, "Library version mismatch!\n");
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
for (c = 1; c < argc; ++c) { for (c = 1; c < argc; ++c) {
int parse_error = 0; int parse_error = 0;
if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) { if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-o") && c < argc - 1) { } else if (!strcmp(argv[c], "-o") && c < argc - 1) {
out_file = (const char*)GET_WARGV(argv, ++c); out_file = (const char*)GET_WARGV(argv, ++c);
} else if (!strcmp(argv[c], "-alpha")) { } else if (!strcmp(argv[c], "-alpha")) {
@ -228,7 +227,7 @@ int main(int argc, const char* argv[]) {
const int version = WebPGetDecoderVersion(); const int version = WebPGetDecoderVersion();
printf("%d.%d.%d\n", printf("%d.%d.%d\n",
(version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff); (version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-pgm")) { } else if (!strcmp(argv[c], "-pgm")) {
format = PGM; format = PGM;
} else if (!strcmp(argv[c], "-yuv")) { } else if (!strcmp(argv[c], "-yuv")) {
@ -294,21 +293,21 @@ int main(int argc, const char* argv[]) {
} else if (argv[c][0] == '-') { } else if (argv[c][0] == '-') {
fprintf(stderr, "Unknown option '%s'\n", argv[c]); fprintf(stderr, "Unknown option '%s'\n", argv[c]);
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} else { } else {
in_file = (const char*)GET_WARGV(argv, c); in_file = (const char*)GET_WARGV(argv, c);
} }
if (parse_error) { if (parse_error) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
} }
if (in_file == NULL) { if (in_file == NULL) {
fprintf(stderr, "missing input file!!\n"); fprintf(stderr, "missing input file!!\n");
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
if (quiet) verbose = 0; if (quiet) verbose = 0;
@ -317,7 +316,7 @@ int main(int argc, const char* argv[]) {
VP8StatusCode status = VP8_STATUS_OK; VP8StatusCode status = VP8_STATUS_OK;
size_t data_size = 0; size_t data_size = 0;
if (!LoadWebP(in_file, &data, &data_size, bitstream)) { if (!LoadWebP(in_file, &data, &data_size, bitstream)) {
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
switch (format) { switch (format) {
@ -416,7 +415,7 @@ int main(int argc, const char* argv[]) {
WebPFreeDecBuffer(output_buffer); WebPFreeDecBuffer(output_buffer);
WebPFree((void*)external_buffer); WebPFree((void*)external_buffer);
WebPFree((void*)data); WebPFree((void*)data);
FREE_WARGV_AND_RETURN(ok ? EXIT_SUCCESS : EXIT_FAILURE); FREE_WARGV_AND_RETURN(ok ? 0 : -1);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -66,17 +66,17 @@ float ExUtilGetFloat(const char* const v, int* const error) {
static void ResetCommandLineArguments(int argc, const char* argv[], static void ResetCommandLineArguments(int argc, const char* argv[],
CommandLineArguments* const args) { CommandLineArguments* const args) {
assert(args != NULL); assert(args != NULL);
args->argc = argc; args->argc_ = argc;
args->argv = argv; args->argv_ = argv;
args->own_argv = 0; args->own_argv_ = 0;
WebPDataInit(&args->argv_data); WebPDataInit(&args->argv_data_);
} }
void ExUtilDeleteCommandLineArguments(CommandLineArguments* const args) { void ExUtilDeleteCommandLineArguments(CommandLineArguments* const args) {
if (args != NULL) { if (args != NULL) {
if (args->own_argv) { if (args->own_argv_) {
WebPFree((void*)args->argv); WebPFree((void*)args->argv_);
WebPDataClear(&args->argv_data); WebPDataClear(&args->argv_data_);
} }
ResetCommandLineArguments(0, NULL, args); ResetCommandLineArguments(0, NULL, args);
} }
@ -98,18 +98,18 @@ int ExUtilInitCommandLineArguments(int argc, const char* argv[],
return 0; return 0;
#endif #endif
if (!ExUtilReadFileToWebPData(argv[0], &args->argv_data)) { if (!ExUtilReadFileToWebPData(argv[0], &args->argv_data_)) {
return 0; return 0;
} }
args->own_argv = 1; args->own_argv_ = 1;
args->argv = (const char**)WebPMalloc(MAX_ARGC * sizeof(*args->argv)); args->argv_ = (const char**)WebPMalloc(MAX_ARGC * sizeof(*args->argv_));
if (args->argv == NULL) { if (args->argv_ == NULL) {
ExUtilDeleteCommandLineArguments(args); ExUtilDeleteCommandLineArguments(args);
return 0; return 0;
} }
argc = 0; argc = 0;
for (cur = strtok((char*)args->argv_data.bytes, sep); for (cur = strtok((char*)args->argv_data_.bytes, sep);
cur != NULL; cur != NULL;
cur = strtok(NULL, sep)) { cur = strtok(NULL, sep)) {
if (argc == MAX_ARGC) { if (argc == MAX_ARGC) {
@ -118,9 +118,9 @@ int ExUtilInitCommandLineArguments(int argc, const char* argv[],
return 0; return 0;
} }
assert(strlen(cur) != 0); assert(strlen(cur) != 0);
args->argv[argc++] = cur; args->argv_[argc++] = cur;
} }
args->argc = argc; args->argc_ = argc;
} }
return 1; return 1;
} }

View File

@ -45,10 +45,10 @@ int ExUtilReadFileToWebPData(const char* const filename,
// Command-line arguments // Command-line arguments
typedef struct { typedef struct {
int argc; int argc_;
const char** argv; const char** argv_;
WebPData argv_data; WebPData argv_data_;
int own_argv; int own_argv_;
} CommandLineArguments; } CommandLineArguments;
// Initializes the structure from the command-line parameters. If there is // Initializes the structure from the command-line parameters. If there is

View File

@ -28,7 +28,6 @@
#endif #endif
#include <gif_lib.h> #include <gif_lib.h>
#include "sharpyuv/sharpyuv.h"
#include "webp/encode.h" #include "webp/encode.h"
#include "webp/mux.h" #include "webp/mux.h"
#include "../examples/example_util.h" #include "../examples/example_util.h"
@ -71,14 +70,8 @@ static void Help(void) {
printf(" -lossy ................. encode image using lossy compression\n"); printf(" -lossy ................. encode image using lossy compression\n");
printf(" -mixed ................. for each frame in the image, pick lossy\n" printf(" -mixed ................. for each frame in the image, pick lossy\n"
" or lossless compression heuristically\n"); " or lossless compression heuristically\n");
printf(" -near_lossless <int> ... use near-lossless image preprocessing\n"
" (0..100=off), default=100\n");
printf(" -sharp_yuv ............. use sharper (and slower) RGB->YUV "
"conversion\n"
" (lossy only)\n");
printf(" -q <float> ............. quality factor (0:small..100:big)\n"); printf(" -q <float> ............. quality factor (0:small..100:big)\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest), " printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
"default=4\n");
printf(" -min_size .............. minimize output size (default:off)\n" printf(" -min_size .............. minimize output size (default:off)\n"
" lossless compression by default; can be\n" " lossless compression by default; can be\n"
" combined with -q, -m, -lossy or -mixed\n" " combined with -q, -m, -lossy or -mixed\n"
@ -103,7 +96,6 @@ static void Help(void) {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int verbose = 0; int verbose = 0;
int gif_error = GIF_ERROR; int gif_error = GIF_ERROR;
@ -148,7 +140,7 @@ int main(int argc, const char* argv[]) {
!WebPPictureInit(&frame) || !WebPPictureInit(&curr_canvas) || !WebPPictureInit(&frame) || !WebPPictureInit(&curr_canvas) ||
!WebPPictureInit(&prev_canvas)) { !WebPPictureInit(&prev_canvas)) {
fprintf(stderr, "Error! Version mismatch!\n"); fprintf(stderr, "Error! Version mismatch!\n");
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
config.lossless = 1; // Use lossless compression by default. config.lossless = 1; // Use lossless compression by default.
@ -158,14 +150,14 @@ int main(int argc, const char* argv[]) {
if (argc == 1) { if (argc == 1) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(0);
} }
for (c = 1; c < argc; ++c) { for (c = 1; c < argc; ++c) {
int parse_error = 0; int parse_error = 0;
if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) { if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-o") && c < argc - 1) { } else if (!strcmp(argv[c], "-o") && c < argc - 1) {
out_file = GET_WARGV(argv, ++c); out_file = GET_WARGV(argv, ++c);
} else if (!strcmp(argv[c], "-lossy")) { } else if (!strcmp(argv[c], "-lossy")) {
@ -173,10 +165,6 @@ int main(int argc, const char* argv[]) {
} else if (!strcmp(argv[c], "-mixed")) { } else if (!strcmp(argv[c], "-mixed")) {
enc_options.allow_mixed = 1; enc_options.allow_mixed = 1;
config.lossless = 0; config.lossless = 0;
} else if (!strcmp(argv[c], "-near_lossless") && c < argc - 1) {
config.near_lossless = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-sharp_yuv")) {
config.use_sharp_yuv = 1;
} else if (!strcmp(argv[c], "-loop_compatibility")) { } else if (!strcmp(argv[c], "-loop_compatibility")) {
loop_compatibility = 1; loop_compatibility = 1;
} else if (!strcmp(argv[c], "-q") && c < argc - 1) { } else if (!strcmp(argv[c], "-q") && c < argc - 1) {
@ -228,7 +216,7 @@ int main(int argc, const char* argv[]) {
fprintf(stderr, "Error! Unknown metadata type '%.*s'\n", fprintf(stderr, "Error! Unknown metadata type '%.*s'\n",
(int)(token - start), start); (int)(token - start), start);
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
start = token + 1; start = token + 1;
} }
@ -237,14 +225,11 @@ int main(int argc, const char* argv[]) {
} else if (!strcmp(argv[c], "-version")) { } else if (!strcmp(argv[c], "-version")) {
const int enc_version = WebPGetEncoderVersion(); const int enc_version = WebPGetEncoderVersion();
const int mux_version = WebPGetMuxVersion(); const int mux_version = WebPGetMuxVersion();
const int sharpyuv_version = SharpYuvGetVersion();
printf("WebP Encoder version: %d.%d.%d\nWebP Mux version: %d.%d.%d\n", printf("WebP Encoder version: %d.%d.%d\nWebP Mux version: %d.%d.%d\n",
(enc_version >> 16) & 0xff, (enc_version >> 8) & 0xff, (enc_version >> 16) & 0xff, (enc_version >> 8) & 0xff,
enc_version & 0xff, (mux_version >> 16) & 0xff, enc_version & 0xff, (mux_version >> 16) & 0xff,
(mux_version >> 8) & 0xff, mux_version & 0xff); (mux_version >> 8) & 0xff, mux_version & 0xff);
printf("libsharpyuv: %d.%d.%d\n", (sharpyuv_version >> 24) & 0xff, FREE_WARGV_AND_RETURN(0);
(sharpyuv_version >> 16) & 0xffff, sharpyuv_version & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS);
} else if (!strcmp(argv[c], "-quiet")) { } else if (!strcmp(argv[c], "-quiet")) {
quiet = 1; quiet = 1;
enc_options.verbose = 0; enc_options.verbose = 0;
@ -257,14 +242,14 @@ int main(int argc, const char* argv[]) {
} else if (argv[c][0] == '-') { } else if (argv[c][0] == '-') {
fprintf(stderr, "Error! Unknown option '%s'\n", argv[c]); fprintf(stderr, "Error! Unknown option '%s'\n", argv[c]);
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} else { } else {
in_file = GET_WARGV(argv, c); in_file = GET_WARGV(argv, c);
} }
if (parse_error) { if (parse_error) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
} }
@ -608,7 +593,7 @@ int main(int argc, const char* argv[]) {
#endif #endif
} }
FREE_WARGV_AND_RETURN(ok ? EXIT_SUCCESS : EXIT_FAILURE); FREE_WARGV_AND_RETURN(!ok);
} }
#else // !WEBP_HAVE_GIF #else // !WEBP_HAVE_GIF
@ -616,7 +601,7 @@ int main(int argc, const char* argv[]) {
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
fprintf(stderr, "GIF support not enabled in %s.\n", argv[0]); fprintf(stderr, "GIF support not enabled in %s.\n", argv[0]);
(void)argc; (void)argc;
return EXIT_FAILURE; return 0;
} }
#endif #endif

View File

@ -317,7 +317,7 @@ void GIFDisplayError(const GifFileType* const gif, int gif_error) {
#else // !WEBP_HAVE_GIF #else // !WEBP_HAVE_GIF
static void ErrorGIFNotAvailable(void) { static void ErrorGIFNotAvailable() {
fprintf(stderr, "GIF support not compiled. Please install the libgif-dev " fprintf(stderr, "GIF support not compiled. Please install the libgif-dev "
"package before building.\n"); "package before building.\n");
} }

View File

@ -59,15 +59,10 @@ static void Help(void) {
printf("Per-frame options (only used for subsequent images input):\n"); printf("Per-frame options (only used for subsequent images input):\n");
printf(" -d <int> ............. frame duration in ms (default: 100)\n"); printf(" -d <int> ............. frame duration in ms (default: 100)\n");
printf(" -lossless ............ use lossless mode (default)\n"); printf(" -lossless ........... use lossless mode (default)\n");
printf(" -lossy ............... use lossy mode\n"); printf(" -lossy ... ........... use lossy mode\n");
printf(" -q <float> ........... quality\n"); printf(" -q <float> ........... quality\n");
printf(" -m <int> ............. compression method (0=fast, 6=slowest), " printf(" -m <int> ............. method to use\n");
"default=4\n");
printf(" -exact, -noexact ..... preserve or alter RGB values in transparent "
"area\n"
" (default: -noexact, may cause artifacts\n"
" with lossy animations)\n");
printf("\n"); printf("\n");
printf("example: img2webp -loop 2 in0.png -lossy in1.jpg\n" printf("example: img2webp -loop 2 in0.png -lossy in1.jpg\n"
@ -135,7 +130,6 @@ static int SetLoopCount(int loop_count, WebPData* const webp_data) {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
const char* output = NULL; const char* output = NULL;
WebPAnimEncoder* enc = NULL; WebPAnimEncoder* enc = NULL;
@ -151,17 +145,16 @@ int main(int argc, const char* argv[]) {
WebPData webp_data; WebPData webp_data;
int c; int c;
int have_input = 0; int have_input = 0;
int last_input_index = 0;
CommandLineArguments cmd_args; CommandLineArguments cmd_args;
int ok; int ok;
INIT_WARGV(argc, argv); INIT_WARGV(argc, argv);
ok = ExUtilInitCommandLineArguments(argc - 1, argv + 1, &cmd_args); ok = ExUtilInitCommandLineArguments(argc - 1, argv + 1, &cmd_args);
if (!ok) FREE_WARGV_AND_RETURN(EXIT_FAILURE); if (!ok) FREE_WARGV_AND_RETURN(1);
argc = cmd_args.argc; argc = cmd_args.argc_;
argv = cmd_args.argv; argv = cmd_args.argv_;
WebPDataInit(&webp_data); WebPDataInit(&webp_data);
if (!WebPAnimEncoderOptionsInit(&anim_config) || if (!WebPAnimEncoderOptionsInit(&anim_config) ||
@ -206,7 +199,7 @@ int main(int argc, const char* argv[]) {
verbose = 1; verbose = 1;
} else if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) { } else if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-version")) { } else if (!strcmp(argv[c], "-version")) {
const int enc_version = WebPGetEncoderVersion(); const int enc_version = WebPGetEncoderVersion();
const int mux_version = WebPGetMuxVersion(); const int mux_version = WebPGetMuxVersion();
@ -230,8 +223,6 @@ int main(int argc, const char* argv[]) {
} }
if (!have_input) { if (!have_input) {
fprintf(stderr, "No input file(s) for generating animation!\n"); fprintf(stderr, "No input file(s) for generating animation!\n");
ok = 0;
Help();
goto End; goto End;
} }
@ -256,10 +247,6 @@ int main(int argc, const char* argv[]) {
fprintf(stderr, "Invalid negative duration (%d)\n", duration); fprintf(stderr, "Invalid negative duration (%d)\n", duration);
parse_error = 1; parse_error = 1;
} }
} else if (!strcmp(argv[c], "-exact")) {
config.exact = 1;
} else if (!strcmp(argv[c], "-noexact")) {
config.exact = 0;
} else { } else {
parse_error = 1; // shouldn't be here. parse_error = 1; // shouldn't be here.
fprintf(stderr, "Unknown option [%s]\n", argv[c]); fprintf(stderr, "Unknown option [%s]\n", argv[c]);
@ -280,7 +267,6 @@ int main(int argc, const char* argv[]) {
// read next input image // read next input image
pic.use_argb = 1; pic.use_argb = 1;
ok = ReadImage((const char*)GET_WARGV_SHIFTED(argv, c), &pic); ok = ReadImage((const char*)GET_WARGV_SHIFTED(argv, c), &pic);
last_input_index = c;
if (!ok) goto End; if (!ok) goto End;
if (enc == NULL) { if (enc == NULL) {
@ -319,13 +305,6 @@ int main(int argc, const char* argv[]) {
++pic_num; ++pic_num;
} }
for (c = last_input_index + 1; c < argc; ++c) {
if (argv[c] != NULL) {
fprintf(stderr, "Warning: unused option [%s]!"
" Frame options go before the input frame.\n", argv[c]);
}
}
// add a last fake frame to signal the last duration // add a last fake frame to signal the last duration
ok = ok && WebPAnimEncoderAdd(enc, NULL, timestamp_ms, NULL); ok = ok && WebPAnimEncoderAdd(enc, NULL, timestamp_ms, NULL);
ok = ok && WebPAnimEncoderAssemble(enc, &webp_data); ok = ok && WebPAnimEncoderAssemble(enc, &webp_data);
@ -356,5 +335,5 @@ int main(int argc, const char* argv[]) {
} }
WebPDataClear(&webp_data); WebPDataClear(&webp_data);
ExUtilDeleteCommandLineArguments(&cmd_args); ExUtilDeleteCommandLineArguments(&cmd_args);
FREE_WARGV_AND_RETURN(ok ? EXIT_SUCCESS : EXIT_FAILURE); FREE_WARGV_AND_RETURN(ok ? 0 : 1);
} }

View File

@ -18,7 +18,6 @@
#define _POSIX_C_SOURCE 200112L // for setenv #define _POSIX_C_SOURCE 200112L // for setenv
#endif #endif
#include <assert.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -431,13 +430,10 @@ static void HandleDisplay(void) {
#endif #endif
} }
static void StartDisplay(const char* filename) { static void StartDisplay(void) {
int width = kParams.canvas_width; int width = kParams.canvas_width;
int height = kParams.canvas_height; int height = kParams.canvas_height;
int screen_width, screen_height; int screen_width, screen_height;
const char viewername[] = " - WebP viewer";
// max linux file len + viewername string
char title[4096 + sizeof(viewername)] = "";
// TODO(webp:365) GLUT_DOUBLE results in flickering / old frames to be // TODO(webp:365) GLUT_DOUBLE results in flickering / old frames to be
// partially displayed with animated webp + alpha. // partially displayed with animated webp + alpha.
#if defined(__APPLE__) || defined(_WIN32) #if defined(__APPLE__) || defined(_WIN32)
@ -457,9 +453,8 @@ static void StartDisplay(const char* filename) {
height = screen_height; height = screen_height;
} }
} }
snprintf(title, sizeof(title), "%s%s", filename, viewername);
glutInitWindowSize(width, height); glutInitWindowSize(width, height);
glutCreateWindow(title); glutCreateWindow("WebP viewer");
glutDisplayFunc(HandleDisplay); glutDisplayFunc(HandleDisplay);
glutReshapeFunc(HandleReshape); glutReshapeFunc(HandleReshape);
glutIdleFunc(NULL); glutIdleFunc(NULL);
@ -498,7 +493,7 @@ static void Help(void) {
} }
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
int c, file_name_argv_index = 1; int c;
WebPDecoderConfig* const config = &kParams.config; WebPDecoderConfig* const config = &kParams.config;
WebPIterator* const curr = &kParams.curr_frame; WebPIterator* const curr = &kParams.curr_frame;
@ -506,7 +501,7 @@ int main(int argc, char* argv[]) {
if (!WebPInitDecoderConfig(config)) { if (!WebPInitDecoderConfig(config)) {
fprintf(stderr, "Library version mismatch!\n"); fprintf(stderr, "Library version mismatch!\n");
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
config->options.dithering_strength = 50; config->options.dithering_strength = 50;
config->options.alpha_dithering_strength = 100; config->options.alpha_dithering_strength = 100;
@ -518,7 +513,7 @@ int main(int argc, char* argv[]) {
int parse_error = 0; int parse_error = 0;
if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) { if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-noicc")) { } else if (!strcmp(argv[c], "-noicc")) {
kParams.use_color_profile = 0; kParams.use_color_profile = 0;
} else if (!strcmp(argv[c], "-nofancy")) { } else if (!strcmp(argv[c], "-nofancy")) {
@ -541,34 +536,30 @@ int main(int argc, char* argv[]) {
(dec_version >> 16) & 0xff, (dec_version >> 8) & 0xff, (dec_version >> 16) & 0xff, (dec_version >> 8) & 0xff,
dec_version & 0xff, (dmux_version >> 16) & 0xff, dec_version & 0xff, (dmux_version >> 16) & 0xff,
(dmux_version >> 8) & 0xff, dmux_version & 0xff); (dmux_version >> 8) & 0xff, dmux_version & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else if (!strcmp(argv[c], "-mt")) { } else if (!strcmp(argv[c], "-mt")) {
config->options.use_threads = 1; config->options.use_threads = 1;
} else if (!strcmp(argv[c], "--")) { } else if (!strcmp(argv[c], "--")) {
if (c < argc - 1) { if (c < argc - 1) kParams.file_name = (const char*)GET_WARGV(argv, ++c);
kParams.file_name = (const char*)GET_WARGV(argv, ++c);
file_name_argv_index = c;
}
break; break;
} else if (argv[c][0] == '-') { } else if (argv[c][0] == '-') {
printf("Unknown option '%s'\n", argv[c]); printf("Unknown option '%s'\n", argv[c]);
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} else { } else {
kParams.file_name = (const char*)GET_WARGV(argv, c); kParams.file_name = (const char*)GET_WARGV(argv, c);
file_name_argv_index = c;
} }
if (parse_error) { if (parse_error) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
} }
if (kParams.file_name == NULL) { if (kParams.file_name == NULL) {
printf("missing input file!!\n"); printf("missing input file!!\n");
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(0);
} }
if (!ImgIoUtilReadFile(kParams.file_name, if (!ImgIoUtilReadFile(kParams.file_name,
@ -622,7 +613,7 @@ int main(int argc, char* argv[]) {
// Position iterator to last frame. Next call to HandleDisplay will wrap over. // Position iterator to last frame. Next call to HandleDisplay will wrap over.
// We take this into account by bumping up loop_count. // We take this into account by bumping up loop_count.
if (!WebPDemuxGetFrame(kParams.dmux, 0, curr)) goto Error; WebPDemuxGetFrame(kParams.dmux, 0, curr);
if (kParams.loop_count) ++kParams.loop_count; if (kParams.loop_count) ++kParams.loop_count;
#if defined(__unix__) || defined(__CYGWIN__) #if defined(__unix__) || defined(__CYGWIN__)
@ -636,18 +627,18 @@ int main(int argc, char* argv[]) {
#ifdef FREEGLUT #ifdef FREEGLUT
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION); glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
#endif #endif
StartDisplay(argv[file_name_argv_index]); StartDisplay();
if (kParams.has_animation) glutTimerFunc(0, decode_callback, 0); if (kParams.has_animation) glutTimerFunc(0, decode_callback, 0);
glutMainLoop(); glutMainLoop();
// Should only be reached when using FREEGLUT: // Should only be reached when using FREEGLUT:
ClearParams(); ClearParams();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
Error: Error:
ClearParams(); ClearParams();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(-1);
} }
#else // !WEBP_HAVE_GL #else // !WEBP_HAVE_GL
@ -655,7 +646,7 @@ int main(int argc, char* argv[]) {
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
fprintf(stderr, "OpenGL support not enabled in %s.\n", argv[0]); fprintf(stderr, "OpenGL support not enabled in %s.\n", argv[0]);
(void)argc; (void)argc;
return EXIT_FAILURE; return 0;
} }
#endif #endif

View File

@ -14,7 +14,6 @@
#include <assert.h> #include <assert.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_CONFIG_H #ifdef HAVE_CONFIG_H
#include "webp/config.h" #include "webp/config.h"
@ -32,17 +31,17 @@
#define LOG_ERROR(MESSAGE) \ #define LOG_ERROR(MESSAGE) \
do { \ do { \
if (webp_info->show_diagnosis) { \ if (webp_info->show_diagnosis_) { \
fprintf(stderr, "Error: %s\n", MESSAGE); \ fprintf(stderr, "Error: %s\n", MESSAGE); \
} \ } \
} while (0) } while (0)
#define LOG_WARN(MESSAGE) \ #define LOG_WARN(MESSAGE) \
do { \ do { \
if (webp_info->show_diagnosis) { \ if (webp_info->show_diagnosis_) { \
fprintf(stderr, "Warning: %s\n", MESSAGE); \ fprintf(stderr, "Warning: %s\n", MESSAGE); \
} \ } \
++webp_info->num_warnings; \ ++webp_info->num_warnings_; \
} while (0) } while (0)
static const char* const kFormats[3] = { static const char* const kFormats[3] = {
@ -90,36 +89,36 @@ typedef enum ChunkID {
} ChunkID; } ChunkID;
typedef struct { typedef struct {
size_t start; size_t start_;
size_t end; size_t end_;
const uint8_t* buf; const uint8_t* buf_;
} MemBuffer; } MemBuffer;
typedef struct { typedef struct {
size_t offset; size_t offset_;
size_t size; size_t size_;
const uint8_t* payload; const uint8_t* payload_;
ChunkID id; ChunkID id_;
} ChunkData; } ChunkData;
typedef struct WebPInfo { typedef struct WebPInfo {
int canvas_width; int canvas_width_;
int canvas_height; int canvas_height_;
int loop_count; int loop_count_;
int num_frames; int num_frames_;
int chunk_counts[CHUNK_TYPES]; int chunk_counts_[CHUNK_TYPES];
int anmf_subchunk_counts[3]; // 0 VP8; 1 VP8L; 2 ALPH. int anmf_subchunk_counts_[3]; // 0 VP8; 1 VP8L; 2 ALPH.
uint32_t bgcolor; uint32_t bgcolor_;
int feature_flags; int feature_flags_;
int has_alpha; int has_alpha_;
// Used for parsing ANMF chunks. // Used for parsing ANMF chunks.
int frame_width, frame_height; int frame_width_, frame_height_;
size_t anim_frame_data_size; size_t anim_frame_data_size_;
int is_processing_anim_frame, seen_alpha_subchunk, seen_image_subchunk; int is_processing_anim_frame_, seen_alpha_subchunk_, seen_image_subchunk_;
// Print output control. // Print output control.
int quiet, show_diagnosis, show_summary; int quiet_, show_diagnosis_, show_summary_;
int num_warnings; int num_warnings_;
int parse_bitstream; int parse_bitstream_;
} WebPInfo; } WebPInfo;
static void WebPInfoInit(WebPInfo* const webp_info) { static void WebPInfoInit(WebPInfo* const webp_info) {
@ -185,25 +184,25 @@ static int ReadFileToWebPData(const char* const filename,
// MemBuffer object. // MemBuffer object.
static void InitMemBuffer(MemBuffer* const mem, const WebPData* webp_data) { static void InitMemBuffer(MemBuffer* const mem, const WebPData* webp_data) {
mem->buf = webp_data->bytes; mem->buf_ = webp_data->bytes;
mem->start = 0; mem->start_ = 0;
mem->end = webp_data->size; mem->end_ = webp_data->size;
} }
static size_t MemDataSize(const MemBuffer* const mem) { static size_t MemDataSize(const MemBuffer* const mem) {
return (mem->end - mem->start); return (mem->end_ - mem->start_);
} }
static const uint8_t* GetBuffer(MemBuffer* const mem) { static const uint8_t* GetBuffer(MemBuffer* const mem) {
return mem->buf + mem->start; return mem->buf_ + mem->start_;
} }
static void Skip(MemBuffer* const mem, size_t size) { static void Skip(MemBuffer* const mem, size_t size) {
mem->start += size; mem->start_ += size;
} }
static uint32_t ReadMemBufLE32(MemBuffer* const mem) { static uint32_t ReadMemBufLE32(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start; const uint8_t* const data = mem->buf_ + mem->start_;
const uint32_t val = GetLE32(data); const uint32_t val = GetLE32(data);
assert(MemDataSize(mem) >= 4); assert(MemDataSize(mem) >= 4);
Skip(mem, 4); Skip(mem, 4);
@ -334,8 +333,8 @@ static WebPInfoStatus ParseLossyFilterHeader(const WebPInfo* const webp_info,
static WebPInfoStatus ParseLossyHeader(const ChunkData* const chunk_data, static WebPInfoStatus ParseLossyHeader(const ChunkData* const chunk_data,
const WebPInfo* const webp_info) { const WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload; const uint8_t* data = chunk_data->payload_;
size_t data_size = chunk_data->size - CHUNK_HEADER_SIZE; size_t data_size = chunk_data->size_ - CHUNK_HEADER_SIZE;
const uint32_t bits = (uint32_t)data[0] | (data[1] << 8) | (data[2] << 16); const uint32_t bits = (uint32_t)data[0] | (data[1] << 8) | (data[2] << 16);
const int key_frame = !(bits & 1); const int key_frame = !(bits & 1);
const int profile = (bits >> 1) & 7; const int profile = (bits >> 1) & 7;
@ -347,7 +346,7 @@ static WebPInfoStatus ParseLossyHeader(const ChunkData* const chunk_data,
int colorspace, clamp_type; int colorspace, clamp_type;
printf(" Parsing lossy bitstream...\n"); printf(" Parsing lossy bitstream...\n");
// Calling WebPGetFeatures() in ProcessImageChunk() should ensure this. // Calling WebPGetFeatures() in ProcessImageChunk() should ensure this.
assert(chunk_data->size >= CHUNK_HEADER_SIZE + 10); assert(chunk_data->size_ >= CHUNK_HEADER_SIZE + 10);
if (profile > 3) { if (profile > 3) {
LOG_ERROR("Unknown profile."); LOG_ERROR("Unknown profile.");
return WEBP_INFO_BITSTREAM_ERROR; return WEBP_INFO_BITSTREAM_ERROR;
@ -358,12 +357,12 @@ static WebPInfoStatus ParseLossyHeader(const ChunkData* const chunk_data,
} }
data += 3; data += 3;
data_size -= 3; data_size -= 3;
printf( printf(" Key frame: %s\n"
" Key frame: %s\n" " Profile: %d\n"
" Profile: %d\n" " Display: %s\n"
" Display: Yes\n" " Part. 0 length: %d\n",
" Part. 0 length: %d\n", key_frame ? "Yes" : "No", profile,
key_frame ? "Yes" : "No", profile, partition0_length); display ? "Yes" : "No", partition0_length);
if (key_frame) { if (key_frame) {
if (!(data[0] == 0x9d && data[1] == 0x01 && data[2] == 0x2a)) { if (!(data[0] == 0x9d && data[1] == 0x01 && data[2] == 0x2a)) {
LOG_ERROR("Invalid lossy bitstream signature."); LOG_ERROR("Invalid lossy bitstream signature.");
@ -505,8 +504,8 @@ static WebPInfoStatus ParseLosslessTransform(WebPInfo* const webp_info,
static WebPInfoStatus ParseLosslessHeader(const ChunkData* const chunk_data, static WebPInfoStatus ParseLosslessHeader(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload; const uint8_t* data = chunk_data->payload_;
size_t data_size = chunk_data->size - CHUNK_HEADER_SIZE; size_t data_size = chunk_data->size_ - CHUNK_HEADER_SIZE;
uint64_t bit_position = 0; uint64_t bit_position = 0;
uint64_t* const bit_pos = &bit_position; uint64_t* const bit_pos = &bit_position;
WebPInfoStatus status; WebPInfoStatus status;
@ -541,8 +540,8 @@ static WebPInfoStatus ParseLosslessHeader(const ChunkData* const chunk_data,
static WebPInfoStatus ParseAlphaHeader(const ChunkData* const chunk_data, static WebPInfoStatus ParseAlphaHeader(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload; const uint8_t* data = chunk_data->payload_;
size_t data_size = chunk_data->size - CHUNK_HEADER_SIZE; size_t data_size = chunk_data->size_ - CHUNK_HEADER_SIZE;
if (data_size <= ALPHA_HEADER_LEN) { if (data_size <= ALPHA_HEADER_LEN) {
LOG_ERROR("Truncated ALPH chunk."); LOG_ERROR("Truncated ALPH chunk.");
return WEBP_INFO_TRUNCATED_DATA; return WEBP_INFO_TRUNCATED_DATA;
@ -607,14 +606,14 @@ static WebPInfoStatus ParseRIFFHeader(WebPInfo* const webp_info,
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
riff_size += CHUNK_HEADER_SIZE; riff_size += CHUNK_HEADER_SIZE;
if (!webp_info->quiet) { if (!webp_info->quiet_) {
printf("RIFF HEADER:\n"); printf("RIFF HEADER:\n");
printf(" File size: %6d\n", (int)riff_size); printf(" File size: %6d\n", (int)riff_size);
} }
if (riff_size < mem->end) { if (riff_size < mem->end_) {
LOG_WARN("RIFF size is smaller than the file size."); LOG_WARN("RIFF size is smaller than the file size.");
mem->end = riff_size; mem->end_ = riff_size;
} else if (riff_size > mem->end) { } else if (riff_size > mem->end_) {
LOG_ERROR("Truncated data detected when parsing RIFF payload."); LOG_ERROR("Truncated data detected when parsing RIFF payload.");
return WEBP_INFO_TRUNCATED_DATA; return WEBP_INFO_TRUNCATED_DATA;
} }
@ -630,7 +629,7 @@ static WebPInfoStatus ParseChunk(const WebPInfo* const webp_info,
LOG_ERROR("Truncated data detected when parsing chunk header."); LOG_ERROR("Truncated data detected when parsing chunk header.");
return WEBP_INFO_TRUNCATED_DATA; return WEBP_INFO_TRUNCATED_DATA;
} else { } else {
const size_t chunk_start_offset = mem->start; const size_t chunk_start_offset = mem->start_;
const uint32_t fourcc = ReadMemBufLE32(mem); const uint32_t fourcc = ReadMemBufLE32(mem);
const uint32_t payload_size = ReadMemBufLE32(mem); const uint32_t payload_size = ReadMemBufLE32(mem);
const uint32_t payload_size_padded = payload_size + (payload_size & 1); const uint32_t payload_size_padded = payload_size + (payload_size & 1);
@ -647,11 +646,11 @@ static WebPInfoStatus ParseChunk(const WebPInfo* const webp_info,
for (i = 0; i < CHUNK_TYPES; ++i) { for (i = 0; i < CHUNK_TYPES; ++i) {
if (kWebPChunkTags[i] == fourcc) break; if (kWebPChunkTags[i] == fourcc) break;
} }
chunk_data->offset = chunk_start_offset; chunk_data->offset_ = chunk_start_offset;
chunk_data->size = chunk_size; chunk_data->size_ = chunk_size;
chunk_data->id = (ChunkID)i; chunk_data->id_ = (ChunkID)i;
chunk_data->payload = GetBuffer(mem); chunk_data->payload_ = GetBuffer(mem);
if (chunk_data->id == CHUNK_ANMF) { if (chunk_data->id_ == CHUNK_ANMF) {
if (payload_size != payload_size_padded) { if (payload_size != payload_size_padded) {
LOG_ERROR("ANMF chunk size should always be even."); LOG_ERROR("ANMF chunk size should always be even.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
@ -670,39 +669,39 @@ static WebPInfoStatus ParseChunk(const WebPInfo* const webp_info,
static WebPInfoStatus ProcessVP8XChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessVP8XChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload; const uint8_t* data = chunk_data->payload_;
if (webp_info->chunk_counts[CHUNK_VP8] || if (webp_info->chunk_counts_[CHUNK_VP8] ||
webp_info->chunk_counts[CHUNK_VP8L] || webp_info->chunk_counts_[CHUNK_VP8L] ||
webp_info->chunk_counts[CHUNK_VP8X]) { webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("Already seen a VP8/VP8L/VP8X chunk when parsing VP8X chunk."); LOG_ERROR("Already seen a VP8/VP8L/VP8X chunk when parsing VP8X chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (chunk_data->size != VP8X_CHUNK_SIZE + CHUNK_HEADER_SIZE) { if (chunk_data->size_ != VP8X_CHUNK_SIZE + CHUNK_HEADER_SIZE) {
LOG_ERROR("Corrupted VP8X chunk."); LOG_ERROR("Corrupted VP8X chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
++webp_info->chunk_counts[CHUNK_VP8X]; ++webp_info->chunk_counts_[CHUNK_VP8X];
webp_info->feature_flags = *data; webp_info->feature_flags_ = *data;
data += 4; data += 4;
webp_info->canvas_width = 1 + ReadLE24(&data); webp_info->canvas_width_ = 1 + ReadLE24(&data);
webp_info->canvas_height = 1 + ReadLE24(&data); webp_info->canvas_height_ = 1 + ReadLE24(&data);
if (!webp_info->quiet) { if (!webp_info->quiet_) {
printf(" ICCP: %d\n Alpha: %d\n EXIF: %d\n XMP: %d\n Animation: %d\n", printf(" ICCP: %d\n Alpha: %d\n EXIF: %d\n XMP: %d\n Animation: %d\n",
(webp_info->feature_flags & ICCP_FLAG) != 0, (webp_info->feature_flags_ & ICCP_FLAG) != 0,
(webp_info->feature_flags & ALPHA_FLAG) != 0, (webp_info->feature_flags_ & ALPHA_FLAG) != 0,
(webp_info->feature_flags & EXIF_FLAG) != 0, (webp_info->feature_flags_ & EXIF_FLAG) != 0,
(webp_info->feature_flags & XMP_FLAG) != 0, (webp_info->feature_flags_ & XMP_FLAG) != 0,
(webp_info->feature_flags & ANIMATION_FLAG) != 0); (webp_info->feature_flags_ & ANIMATION_FLAG) != 0);
printf(" Canvas size %d x %d\n", printf(" Canvas size %d x %d\n",
webp_info->canvas_width, webp_info->canvas_height); webp_info->canvas_width_, webp_info->canvas_height_);
} }
if (webp_info->canvas_width > MAX_CANVAS_SIZE) { if (webp_info->canvas_width_ > MAX_CANVAS_SIZE) {
LOG_WARN("Canvas width is out of range in VP8X chunk."); LOG_WARN("Canvas width is out of range in VP8X chunk.");
} }
if (webp_info->canvas_height > MAX_CANVAS_SIZE) { if (webp_info->canvas_height_ > MAX_CANVAS_SIZE) {
LOG_WARN("Canvas height is out of range in VP8X chunk."); LOG_WARN("Canvas height is out of range in VP8X chunk.");
} }
if ((uint64_t)webp_info->canvas_width * webp_info->canvas_height > if ((uint64_t)webp_info->canvas_width_ * webp_info->canvas_height_ >
MAX_IMAGE_AREA) { MAX_IMAGE_AREA) {
LOG_WARN("Canvas area is out of range in VP8X chunk."); LOG_WARN("Canvas area is out of range in VP8X chunk.");
} }
@ -711,27 +710,27 @@ static WebPInfoStatus ProcessVP8XChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessANIMChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessANIMChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload; const uint8_t* data = chunk_data->payload_;
if (!webp_info->chunk_counts[CHUNK_VP8X]) { if (!webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("ANIM chunk detected before VP8X chunk."); LOG_ERROR("ANIM chunk detected before VP8X chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (chunk_data->size != ANIM_CHUNK_SIZE + CHUNK_HEADER_SIZE) { if (chunk_data->size_ != ANIM_CHUNK_SIZE + CHUNK_HEADER_SIZE) {
LOG_ERROR("Corrupted ANIM chunk."); LOG_ERROR("Corrupted ANIM chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
webp_info->bgcolor = ReadLE32(&data); webp_info->bgcolor_ = ReadLE32(&data);
webp_info->loop_count = ReadLE16(&data); webp_info->loop_count_ = ReadLE16(&data);
++webp_info->chunk_counts[CHUNK_ANIM]; ++webp_info->chunk_counts_[CHUNK_ANIM];
if (!webp_info->quiet) { if (!webp_info->quiet_) {
printf(" Background color:(ARGB) %02x %02x %02x %02x\n", printf(" Background color:(ARGB) %02x %02x %02x %02x\n",
(webp_info->bgcolor >> 24) & 0xff, (webp_info->bgcolor_ >> 24) & 0xff,
(webp_info->bgcolor >> 16) & 0xff, (webp_info->bgcolor_ >> 16) & 0xff,
(webp_info->bgcolor >> 8) & 0xff, (webp_info->bgcolor_ >> 8) & 0xff,
webp_info->bgcolor & 0xff); webp_info->bgcolor_ & 0xff);
printf(" Loop count : %d\n", webp_info->loop_count); printf(" Loop count : %d\n", webp_info->loop_count_);
} }
if (webp_info->loop_count > MAX_LOOP_COUNT) { if (webp_info->loop_count_ > MAX_LOOP_COUNT) {
LOG_WARN("Loop count is out of range in ANIM chunk."); LOG_WARN("Loop count is out of range in ANIM chunk.");
} }
return WEBP_INFO_OK; return WEBP_INFO_OK;
@ -739,17 +738,17 @@ static WebPInfoStatus ProcessANIMChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload; const uint8_t* data = chunk_data->payload_;
int offset_x, offset_y, width, height, duration, blend, dispose, temp; int offset_x, offset_y, width, height, duration, blend, dispose, temp;
if (webp_info->is_processing_anim_frame) { if (webp_info->is_processing_anim_frame_) {
LOG_ERROR("ANMF chunk detected within another ANMF chunk."); LOG_ERROR("ANMF chunk detected within another ANMF chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (!webp_info->chunk_counts[CHUNK_ANIM]) { if (!webp_info->chunk_counts_[CHUNK_ANIM]) {
LOG_ERROR("ANMF chunk detected before ANIM chunk."); LOG_ERROR("ANMF chunk detected before ANIM chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (chunk_data->size <= CHUNK_HEADER_SIZE + ANMF_CHUNK_SIZE) { if (chunk_data->size_ <= CHUNK_HEADER_SIZE + ANMF_CHUNK_SIZE) {
LOG_ERROR("Truncated data detected when parsing ANMF chunk."); LOG_ERROR("Truncated data detected when parsing ANMF chunk.");
return WEBP_INFO_TRUNCATED_DATA; return WEBP_INFO_TRUNCATED_DATA;
} }
@ -761,8 +760,8 @@ static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data,
temp = *data; temp = *data;
dispose = temp & 1; dispose = temp & 1;
blend = (temp >> 1) & 1; blend = (temp >> 1) & 1;
++webp_info->chunk_counts[CHUNK_ANMF]; ++webp_info->chunk_counts_[CHUNK_ANMF];
if (!webp_info->quiet) { if (!webp_info->quiet_) {
printf(" Offset_X: %d\n Offset_Y: %d\n Width: %d\n Height: %d\n" printf(" Offset_X: %d\n Offset_Y: %d\n Width: %d\n Height: %d\n"
" Duration: %d\n Dispose: %d\n Blend: %d\n", " Duration: %d\n Dispose: %d\n Blend: %d\n",
offset_x, offset_y, width, height, duration, dispose, blend); offset_x, offset_y, width, height, duration, dispose, blend);
@ -775,92 +774,92 @@ static WebPInfoStatus ProcessANMFChunk(const ChunkData* const chunk_data,
LOG_ERROR("Invalid offset parameters in ANMF chunk."); LOG_ERROR("Invalid offset parameters in ANMF chunk.");
return WEBP_INFO_INVALID_PARAM; return WEBP_INFO_INVALID_PARAM;
} }
if ((uint64_t)offset_x + width > (uint64_t)webp_info->canvas_width || if ((uint64_t)offset_x + width > (uint64_t)webp_info->canvas_width_ ||
(uint64_t)offset_y + height > (uint64_t)webp_info->canvas_height) { (uint64_t)offset_y + height > (uint64_t)webp_info->canvas_height_) {
LOG_ERROR("Frame exceeds canvas in ANMF chunk."); LOG_ERROR("Frame exceeds canvas in ANMF chunk.");
return WEBP_INFO_INVALID_PARAM; return WEBP_INFO_INVALID_PARAM;
} }
webp_info->is_processing_anim_frame = 1; webp_info->is_processing_anim_frame_ = 1;
webp_info->seen_alpha_subchunk = 0; webp_info->seen_alpha_subchunk_ = 0;
webp_info->seen_image_subchunk = 0; webp_info->seen_image_subchunk_ = 0;
webp_info->frame_width = width; webp_info->frame_width_ = width;
webp_info->frame_height = height; webp_info->frame_height_ = height;
webp_info->anim_frame_data_size = webp_info->anim_frame_data_size_ =
chunk_data->size - CHUNK_HEADER_SIZE - ANMF_CHUNK_SIZE; chunk_data->size_ - CHUNK_HEADER_SIZE - ANMF_CHUNK_SIZE;
return WEBP_INFO_OK; return WEBP_INFO_OK;
} }
static WebPInfoStatus ProcessImageChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessImageChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
const uint8_t* data = chunk_data->payload - CHUNK_HEADER_SIZE; const uint8_t* data = chunk_data->payload_ - CHUNK_HEADER_SIZE;
WebPBitstreamFeatures features; WebPBitstreamFeatures features;
const VP8StatusCode vp8_status = const VP8StatusCode vp8_status =
WebPGetFeatures(data, chunk_data->size, &features); WebPGetFeatures(data, chunk_data->size_, &features);
if (vp8_status != VP8_STATUS_OK) { if (vp8_status != VP8_STATUS_OK) {
LOG_ERROR("VP8/VP8L bitstream error."); LOG_ERROR("VP8/VP8L bitstream error.");
return WEBP_INFO_BITSTREAM_ERROR; return WEBP_INFO_BITSTREAM_ERROR;
} }
if (!webp_info->quiet) { if (!webp_info->quiet_) {
assert(features.format >= 0 && features.format <= 2); assert(features.format >= 0 && features.format <= 2);
printf(" Width: %d\n Height: %d\n Alpha: %d\n Animation: %d\n" printf(" Width: %d\n Height: %d\n Alpha: %d\n Animation: %d\n"
" Format: %s (%d)\n", " Format: %s (%d)\n",
features.width, features.height, features.has_alpha, features.width, features.height, features.has_alpha,
features.has_animation, kFormats[features.format], features.format); features.has_animation, kFormats[features.format], features.format);
} }
if (webp_info->is_processing_anim_frame) { if (webp_info->is_processing_anim_frame_) {
++webp_info->anmf_subchunk_counts[chunk_data->id == CHUNK_VP8 ? 0 : 1]; ++webp_info->anmf_subchunk_counts_[chunk_data->id_ == CHUNK_VP8 ? 0 : 1];
if (chunk_data->id == CHUNK_VP8L && webp_info->seen_alpha_subchunk) { if (chunk_data->id_ == CHUNK_VP8L && webp_info->seen_alpha_subchunk_) {
LOG_ERROR("Both VP8L and ALPH sub-chunks are present in an ANMF chunk."); LOG_ERROR("Both VP8L and ALPH sub-chunks are present in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (webp_info->frame_width != features.width || if (webp_info->frame_width_ != features.width ||
webp_info->frame_height != features.height) { webp_info->frame_height_ != features.height) {
LOG_ERROR("Frame size in VP8/VP8L sub-chunk differs from ANMF header."); LOG_ERROR("Frame size in VP8/VP8L sub-chunk differs from ANMF header.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (webp_info->seen_image_subchunk) { if (webp_info->seen_image_subchunk_) {
LOG_ERROR("Consecutive VP8/VP8L sub-chunks in an ANMF chunk."); LOG_ERROR("Consecutive VP8/VP8L sub-chunks in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
webp_info->seen_image_subchunk = 1; webp_info->seen_image_subchunk_ = 1;
} else { } else {
if (webp_info->chunk_counts[CHUNK_VP8] || if (webp_info->chunk_counts_[CHUNK_VP8] ||
webp_info->chunk_counts[CHUNK_VP8L]) { webp_info->chunk_counts_[CHUNK_VP8L]) {
LOG_ERROR("Multiple VP8/VP8L chunks detected."); LOG_ERROR("Multiple VP8/VP8L chunks detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (chunk_data->id == CHUNK_VP8L && if (chunk_data->id_ == CHUNK_VP8L &&
webp_info->chunk_counts[CHUNK_ALPHA]) { webp_info->chunk_counts_[CHUNK_ALPHA]) {
LOG_WARN("Both VP8L and ALPH chunks are detected."); LOG_WARN("Both VP8L and ALPH chunks are detected.");
} }
if (webp_info->chunk_counts[CHUNK_ANIM] || if (webp_info->chunk_counts_[CHUNK_ANIM] ||
webp_info->chunk_counts[CHUNK_ANMF]) { webp_info->chunk_counts_[CHUNK_ANMF]) {
LOG_ERROR("VP8/VP8L chunk and ANIM/ANMF chunk are both detected."); LOG_ERROR("VP8/VP8L chunk and ANIM/ANMF chunk are both detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (webp_info->chunk_counts[CHUNK_VP8X]) { if (webp_info->chunk_counts_[CHUNK_VP8X]) {
if (webp_info->canvas_width != features.width || if (webp_info->canvas_width_ != features.width ||
webp_info->canvas_height != features.height) { webp_info->canvas_height_ != features.height) {
LOG_ERROR("Image size in VP8/VP8L chunk differs from VP8X chunk."); LOG_ERROR("Image size in VP8/VP8L chunk differs from VP8X chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
} else { } else {
webp_info->canvas_width = features.width; webp_info->canvas_width_ = features.width;
webp_info->canvas_height = features.height; webp_info->canvas_height_ = features.height;
if (webp_info->canvas_width < 1 || webp_info->canvas_height < 1 || if (webp_info->canvas_width_ < 1 || webp_info->canvas_height_ < 1 ||
webp_info->canvas_width > MAX_CANVAS_SIZE || webp_info->canvas_width_ > MAX_CANVAS_SIZE ||
webp_info->canvas_height > MAX_CANVAS_SIZE || webp_info->canvas_height_ > MAX_CANVAS_SIZE ||
(uint64_t)webp_info->canvas_width * webp_info->canvas_height > (uint64_t)webp_info->canvas_width_ * webp_info->canvas_height_ >
MAX_IMAGE_AREA) { MAX_IMAGE_AREA) {
LOG_WARN("Invalid parameters in VP8/VP8L chunk."); LOG_WARN("Invalid parameters in VP8/VP8L chunk.");
} }
} }
++webp_info->chunk_counts[chunk_data->id]; ++webp_info->chunk_counts_[chunk_data->id_];
} }
++webp_info->num_frames; ++webp_info->num_frames_;
webp_info->has_alpha |= features.has_alpha; webp_info->has_alpha_ |= features.has_alpha;
if (webp_info->parse_bitstream) { if (webp_info->parse_bitstream_) {
const int is_lossy = (chunk_data->id == CHUNK_VP8); const int is_lossy = (chunk_data->id_ == CHUNK_VP8);
const WebPInfoStatus status = const WebPInfoStatus status =
is_lossy ? ParseLossyHeader(chunk_data, webp_info) is_lossy ? ParseLossyHeader(chunk_data, webp_info)
: ParseLosslessHeader(chunk_data, webp_info); : ParseLosslessHeader(chunk_data, webp_info);
@ -871,41 +870,41 @@ static WebPInfoStatus ProcessImageChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessALPHChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessALPHChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
if (webp_info->is_processing_anim_frame) { if (webp_info->is_processing_anim_frame_) {
++webp_info->anmf_subchunk_counts[2]; ++webp_info->anmf_subchunk_counts_[2];
if (webp_info->seen_alpha_subchunk) { if (webp_info->seen_alpha_subchunk_) {
LOG_ERROR("Consecutive ALPH sub-chunks in an ANMF chunk."); LOG_ERROR("Consecutive ALPH sub-chunks in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
webp_info->seen_alpha_subchunk = 1; webp_info->seen_alpha_subchunk_ = 1;
if (webp_info->seen_image_subchunk) { if (webp_info->seen_image_subchunk_) {
LOG_ERROR("ALPHA sub-chunk detected after VP8 sub-chunk " LOG_ERROR("ALPHA sub-chunk detected after VP8 sub-chunk "
"in an ANMF chunk."); "in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
} else { } else {
if (webp_info->chunk_counts[CHUNK_ANIM] || if (webp_info->chunk_counts_[CHUNK_ANIM] ||
webp_info->chunk_counts[CHUNK_ANMF]) { webp_info->chunk_counts_[CHUNK_ANMF]) {
LOG_ERROR("ALPHA chunk and ANIM/ANMF chunk are both detected."); LOG_ERROR("ALPHA chunk and ANIM/ANMF chunk are both detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (!webp_info->chunk_counts[CHUNK_VP8X]) { if (!webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("ALPHA chunk detected before VP8X chunk."); LOG_ERROR("ALPHA chunk detected before VP8X chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (webp_info->chunk_counts[CHUNK_VP8]) { if (webp_info->chunk_counts_[CHUNK_VP8]) {
LOG_ERROR("ALPHA chunk detected after VP8 chunk."); LOG_ERROR("ALPHA chunk detected after VP8 chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (webp_info->chunk_counts[CHUNK_ALPHA]) { if (webp_info->chunk_counts_[CHUNK_ALPHA]) {
LOG_ERROR("Multiple ALPHA chunks detected."); LOG_ERROR("Multiple ALPHA chunks detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
++webp_info->chunk_counts[CHUNK_ALPHA]; ++webp_info->chunk_counts_[CHUNK_ALPHA];
} }
webp_info->has_alpha = 1; webp_info->has_alpha_ = 1;
if (webp_info->parse_bitstream) { if (webp_info->parse_bitstream_) {
const WebPInfoStatus status = ParseAlphaHeader(chunk_data, webp_info); const WebPInfoStatus status = ParseAlphaHeader(chunk_data, webp_info);
if (status != WEBP_INFO_OK) return status; if (status != WEBP_INFO_OK) return status;
} }
@ -915,41 +914,41 @@ static WebPInfoStatus ProcessALPHChunk(const ChunkData* const chunk_data,
static WebPInfoStatus ProcessICCPChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessICCPChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
(void)chunk_data; (void)chunk_data;
if (!webp_info->chunk_counts[CHUNK_VP8X]) { if (!webp_info->chunk_counts_[CHUNK_VP8X]) {
LOG_ERROR("ICCP chunk detected before VP8X chunk."); LOG_ERROR("ICCP chunk detected before VP8X chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (webp_info->chunk_counts[CHUNK_VP8] || if (webp_info->chunk_counts_[CHUNK_VP8] ||
webp_info->chunk_counts[CHUNK_VP8L] || webp_info->chunk_counts_[CHUNK_VP8L] ||
webp_info->chunk_counts[CHUNK_ANIM]) { webp_info->chunk_counts_[CHUNK_ANIM]) {
LOG_ERROR("ICCP chunk detected after image data."); LOG_ERROR("ICCP chunk detected after image data.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
++webp_info->chunk_counts[CHUNK_ICCP]; ++webp_info->chunk_counts_[CHUNK_ICCP];
return WEBP_INFO_OK; return WEBP_INFO_OK;
} }
static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data, static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data,
WebPInfo* const webp_info) { WebPInfo* const webp_info) {
WebPInfoStatus status = WEBP_INFO_OK; WebPInfoStatus status = WEBP_INFO_OK;
ChunkID id = chunk_data->id; ChunkID id = chunk_data->id_;
if (chunk_data->id == CHUNK_UNKNOWN) { if (chunk_data->id_ == CHUNK_UNKNOWN) {
char error_message[50]; char error_message[50];
snprintf(error_message, 50, "Unknown chunk at offset %6d, length %6d", snprintf(error_message, 50, "Unknown chunk at offset %6d, length %6d",
(int)chunk_data->offset, (int)chunk_data->size); (int)chunk_data->offset_, (int)chunk_data->size_);
LOG_WARN(error_message); LOG_WARN(error_message);
} else { } else {
if (!webp_info->quiet) { if (!webp_info->quiet_) {
char tag[4]; char tag[4];
uint32_t fourcc = kWebPChunkTags[chunk_data->id]; uint32_t fourcc = kWebPChunkTags[chunk_data->id_];
#ifdef WORDS_BIGENDIAN #ifdef WORDS_BIGENDIAN
fourcc = (fourcc >> 24) | ((fourcc >> 8) & 0xff00) | fourcc = (fourcc >> 24) | ((fourcc >> 8) & 0xff00) |
((fourcc << 8) & 0xff0000) | (fourcc << 24); ((fourcc << 8) & 0xff0000) | (fourcc << 24);
#endif #endif
memcpy(tag, &fourcc, sizeof(tag)); memcpy(tag, &fourcc, sizeof(tag));
printf("Chunk %c%c%c%c at offset %6d, length %6d\n", printf("Chunk %c%c%c%c at offset %6d, length %6d\n",
tag[0], tag[1], tag[2], tag[3], (int)chunk_data->offset, tag[0], tag[1], tag[2], tag[3], (int)chunk_data->offset_,
(int)chunk_data->size); (int)chunk_data->size_);
} }
} }
switch (id) { switch (id) {
@ -974,21 +973,21 @@ static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data,
break; break;
case CHUNK_EXIF: case CHUNK_EXIF:
case CHUNK_XMP: case CHUNK_XMP:
++webp_info->chunk_counts[id]; ++webp_info->chunk_counts_[id];
break; break;
case CHUNK_UNKNOWN: case CHUNK_UNKNOWN:
default: default:
break; break;
} }
if (webp_info->is_processing_anim_frame && id != CHUNK_ANMF) { if (webp_info->is_processing_anim_frame_ && id != CHUNK_ANMF) {
if (webp_info->anim_frame_data_size == chunk_data->size) { if (webp_info->anim_frame_data_size_ == chunk_data->size_) {
if (!webp_info->seen_image_subchunk) { if (!webp_info->seen_image_subchunk_) {
LOG_ERROR("No VP8/VP8L chunk detected in an ANMF chunk."); LOG_ERROR("No VP8/VP8L chunk detected in an ANMF chunk.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
webp_info->is_processing_anim_frame = 0; webp_info->is_processing_anim_frame_ = 0;
} else if (webp_info->anim_frame_data_size > chunk_data->size) { } else if (webp_info->anim_frame_data_size_ > chunk_data->size_) {
webp_info->anim_frame_data_size -= chunk_data->size; webp_info->anim_frame_data_size_ -= chunk_data->size_;
} else { } else {
LOG_ERROR("Truncated data detected when parsing ANMF chunk."); LOG_ERROR("Truncated data detected when parsing ANMF chunk.");
return WEBP_INFO_TRUNCATED_DATA; return WEBP_INFO_TRUNCATED_DATA;
@ -998,55 +997,55 @@ static WebPInfoStatus ProcessChunk(const ChunkData* const chunk_data,
} }
static WebPInfoStatus Validate(WebPInfo* const webp_info) { static WebPInfoStatus Validate(WebPInfo* const webp_info) {
if (webp_info->num_frames < 1) { if (webp_info->num_frames_ < 1) {
LOG_ERROR("No image/frame detected."); LOG_ERROR("No image/frame detected.");
return WEBP_INFO_MISSING_DATA; return WEBP_INFO_MISSING_DATA;
} }
if (webp_info->chunk_counts[CHUNK_VP8X]) { if (webp_info->chunk_counts_[CHUNK_VP8X]) {
const int iccp = !!(webp_info->feature_flags & ICCP_FLAG); const int iccp = !!(webp_info->feature_flags_ & ICCP_FLAG);
const int exif = !!(webp_info->feature_flags & EXIF_FLAG); const int exif = !!(webp_info->feature_flags_ & EXIF_FLAG);
const int xmp = !!(webp_info->feature_flags & XMP_FLAG); const int xmp = !!(webp_info->feature_flags_ & XMP_FLAG);
const int animation = !!(webp_info->feature_flags & ANIMATION_FLAG); const int animation = !!(webp_info->feature_flags_ & ANIMATION_FLAG);
const int alpha = !!(webp_info->feature_flags & ALPHA_FLAG); const int alpha = !!(webp_info->feature_flags_ & ALPHA_FLAG);
if (!alpha && webp_info->has_alpha) { if (!alpha && webp_info->has_alpha_) {
LOG_ERROR("Unexpected alpha data detected."); LOG_ERROR("Unexpected alpha data detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (alpha && !webp_info->has_alpha) { if (alpha && !webp_info->has_alpha_) {
LOG_WARN("Alpha flag is set with no alpha data present."); LOG_WARN("Alpha flag is set with no alpha data present.");
} }
if (iccp && !webp_info->chunk_counts[CHUNK_ICCP]) { if (iccp && !webp_info->chunk_counts_[CHUNK_ICCP]) {
LOG_ERROR("Missing ICCP chunk."); LOG_ERROR("Missing ICCP chunk.");
return WEBP_INFO_MISSING_DATA; return WEBP_INFO_MISSING_DATA;
} }
if (exif && !webp_info->chunk_counts[CHUNK_EXIF]) { if (exif && !webp_info->chunk_counts_[CHUNK_EXIF]) {
LOG_ERROR("Missing EXIF chunk."); LOG_ERROR("Missing EXIF chunk.");
return WEBP_INFO_MISSING_DATA; return WEBP_INFO_MISSING_DATA;
} }
if (xmp && !webp_info->chunk_counts[CHUNK_XMP]) { if (xmp && !webp_info->chunk_counts_[CHUNK_XMP]) {
LOG_ERROR("Missing XMP chunk."); LOG_ERROR("Missing XMP chunk.");
return WEBP_INFO_MISSING_DATA; return WEBP_INFO_MISSING_DATA;
} }
if (!iccp && webp_info->chunk_counts[CHUNK_ICCP]) { if (!iccp && webp_info->chunk_counts_[CHUNK_ICCP]) {
LOG_ERROR("Unexpected ICCP chunk detected."); LOG_ERROR("Unexpected ICCP chunk detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (!exif && webp_info->chunk_counts[CHUNK_EXIF]) { if (!exif && webp_info->chunk_counts_[CHUNK_EXIF]) {
LOG_ERROR("Unexpected EXIF chunk detected."); LOG_ERROR("Unexpected EXIF chunk detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (!xmp && webp_info->chunk_counts[CHUNK_XMP]) { if (!xmp && webp_info->chunk_counts_[CHUNK_XMP]) {
LOG_ERROR("Unexpected XMP chunk detected."); LOG_ERROR("Unexpected XMP chunk detected.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
// Incomplete animation frame. // Incomplete animation frame.
if (webp_info->is_processing_anim_frame) return WEBP_INFO_MISSING_DATA; if (webp_info->is_processing_anim_frame_) return WEBP_INFO_MISSING_DATA;
if (!animation && webp_info->num_frames > 1) { if (!animation && webp_info->num_frames_ > 1) {
LOG_ERROR("More than 1 frame detected in non-animation file."); LOG_ERROR("More than 1 frame detected in non-animation file.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
if (animation && (!webp_info->chunk_counts[CHUNK_ANIM] || if (animation && (!webp_info->chunk_counts_[CHUNK_ANIM] ||
!webp_info->chunk_counts[CHUNK_ANMF])) { !webp_info->chunk_counts_[CHUNK_ANMF])) {
LOG_ERROR("No ANIM/ANMF chunk detected in animation file."); LOG_ERROR("No ANIM/ANMF chunk detected in animation file.");
return WEBP_INFO_PARSE_ERROR; return WEBP_INFO_PARSE_ERROR;
} }
@ -1057,17 +1056,17 @@ static WebPInfoStatus Validate(WebPInfo* const webp_info) {
static void ShowSummary(const WebPInfo* const webp_info) { static void ShowSummary(const WebPInfo* const webp_info) {
int i; int i;
printf("Summary:\n"); printf("Summary:\n");
printf("Number of frames: %d\n", webp_info->num_frames); printf("Number of frames: %d\n", webp_info->num_frames_);
printf("Chunk type : VP8 VP8L VP8X ALPH ANIM ANMF(VP8 /VP8L/ALPH) ICCP " printf("Chunk type : VP8 VP8L VP8X ALPH ANIM ANMF(VP8 /VP8L/ALPH) ICCP "
"EXIF XMP\n"); "EXIF XMP\n");
printf("Chunk counts: "); printf("Chunk counts: ");
for (i = 0; i < CHUNK_TYPES; ++i) { for (i = 0; i < CHUNK_TYPES; ++i) {
printf("%4d ", webp_info->chunk_counts[i]); printf("%4d ", webp_info->chunk_counts_[i]);
if (i == CHUNK_ANMF) { if (i == CHUNK_ANMF) {
printf("%4d %4d %4d ", printf("%4d %4d %4d ",
webp_info->anmf_subchunk_counts[0], webp_info->anmf_subchunk_counts_[0],
webp_info->anmf_subchunk_counts[1], webp_info->anmf_subchunk_counts_[1],
webp_info->anmf_subchunk_counts[2]); webp_info->anmf_subchunk_counts_[2]);
} }
} }
printf("\n"); printf("\n");
@ -1090,20 +1089,20 @@ static WebPInfoStatus AnalyzeWebP(WebPInfo* const webp_info,
webp_info_status = ProcessChunk(&chunk_data, webp_info); webp_info_status = ProcessChunk(&chunk_data, webp_info);
} }
if (webp_info_status != WEBP_INFO_OK) goto Error; if (webp_info_status != WEBP_INFO_OK) goto Error;
if (webp_info->show_summary) ShowSummary(webp_info); if (webp_info->show_summary_) ShowSummary(webp_info);
// Final check. // Final check.
webp_info_status = Validate(webp_info); webp_info_status = Validate(webp_info);
Error: Error:
if (!webp_info->quiet) { if (!webp_info->quiet_) {
if (webp_info_status == WEBP_INFO_OK) { if (webp_info_status == WEBP_INFO_OK) {
printf("No error detected.\n"); printf("No error detected.\n");
} else { } else {
printf("Errors detected.\n"); printf("Errors detected.\n");
} }
if (webp_info->num_warnings > 0) { if (webp_info->num_warnings_ > 0) {
printf("There were %d warning(s).\n", webp_info->num_warnings); printf("There were %d warning(s).\n", webp_info->num_warnings_);
} }
} }
return webp_info_status; return webp_info_status;
@ -1121,7 +1120,6 @@ static void Help(void) {
" -bitstream_info .... Parse bitstream header.\n"); " -bitstream_info .... Parse bitstream header.\n");
} }
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int c, quiet = 0, show_diag = 0, show_summary = 0; int c, quiet = 0, show_diag = 0, show_summary = 0;
int parse_bitstream = 0; int parse_bitstream = 0;
@ -1132,7 +1130,7 @@ int main(int argc, const char* argv[]) {
if (argc == 1) { if (argc == 1) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(WEBP_INFO_OK);
} }
// Parse command-line input. // Parse command-line input.
@ -1140,7 +1138,7 @@ int main(int argc, const char* argv[]) {
if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help") || if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help") ||
!strcmp(argv[c], "-H") || !strcmp(argv[c], "-longhelp")) { !strcmp(argv[c], "-H") || !strcmp(argv[c], "-longhelp")) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(WEBP_INFO_OK);
} else if (!strcmp(argv[c], "-quiet")) { } else if (!strcmp(argv[c], "-quiet")) {
quiet = 1; quiet = 1;
} else if (!strcmp(argv[c], "-diag")) { } else if (!strcmp(argv[c], "-diag")) {
@ -1153,7 +1151,7 @@ int main(int argc, const char* argv[]) {
const int version = WebPGetDecoderVersion(); const int version = WebPGetDecoderVersion();
printf("WebP Decoder version: %d.%d.%d\n", printf("WebP Decoder version: %d.%d.%d\n",
(version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff); (version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else { // Assume the remaining are all input files. } else { // Assume the remaining are all input files.
break; break;
} }
@ -1161,7 +1159,7 @@ int main(int argc, const char* argv[]) {
if (c == argc) { if (c == argc) {
Help(); Help();
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(WEBP_INFO_INVALID_COMMAND);
} }
// Process input files one by one. // Process input files one by one.
@ -1169,10 +1167,10 @@ int main(int argc, const char* argv[]) {
WebPData webp_data; WebPData webp_data;
const W_CHAR* in_file = NULL; const W_CHAR* in_file = NULL;
WebPInfoInit(&webp_info); WebPInfoInit(&webp_info);
webp_info.quiet = quiet; webp_info.quiet_ = quiet;
webp_info.show_diagnosis = show_diag; webp_info.show_diagnosis_ = show_diag;
webp_info.show_summary = show_summary; webp_info.show_summary_ = show_summary;
webp_info.parse_bitstream = parse_bitstream; webp_info.parse_bitstream_ = parse_bitstream;
in_file = GET_WARGV(argv, c); in_file = GET_WARGV(argv, c);
if (in_file == NULL || if (in_file == NULL ||
!ReadFileToWebPData((const char*)in_file, &webp_data)) { !ReadFileToWebPData((const char*)in_file, &webp_data)) {
@ -1180,10 +1178,9 @@ int main(int argc, const char* argv[]) {
WFPRINTF(stderr, "Failed to open input file %s.\n", in_file); WFPRINTF(stderr, "Failed to open input file %s.\n", in_file);
continue; continue;
} }
if (!webp_info.quiet) WPRINTF("File: %s\n", in_file); if (!webp_info.quiet_) WPRINTF("File: %s\n", in_file);
webp_info_status = AnalyzeWebP(&webp_info, &webp_data); webp_info_status = AnalyzeWebP(&webp_info, &webp_data);
WebPDataClear(&webp_data); WebPDataClear(&webp_data);
} }
FREE_WARGV_AND_RETURN((webp_info_status == WEBP_INFO_OK) ? EXIT_SUCCESS FREE_WARGV_AND_RETURN(webp_info_status);
: EXIT_FAILURE);
} }

View File

@ -59,7 +59,6 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include "webp/decode.h" #include "webp/decode.h"
#include "webp/mux.h" #include "webp/mux.h"
#include "../examples/example_util.h" #include "../examples/example_util.h"
@ -87,9 +86,9 @@ typedef enum {
} FeatureSubType; } FeatureSubType;
typedef struct { typedef struct {
FeatureSubType subtype; FeatureSubType subtype_;
const char* filename; const char* filename_;
const char* params; const char* params_;
} FeatureArg; } FeatureArg;
typedef enum { typedef enum {
@ -114,14 +113,14 @@ static const char* const kDescriptions[LAST_FEATURE] = {
}; };
typedef struct { typedef struct {
CommandLineArguments cmd_args; CommandLineArguments cmd_args_;
ActionType action_type; ActionType action_type_;
const char* input; const char* input_;
const char* output; const char* output_;
FeatureType type; FeatureType type_;
FeatureArg* args; FeatureArg* args_;
int arg_count; int arg_count_;
} Config; } Config;
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -132,8 +131,8 @@ static int CountOccurrences(const CommandLineArguments* const args,
int i; int i;
int num_occurences = 0; int num_occurences = 0;
for (i = 0; i < args->argc; ++i) { for (i = 0; i < args->argc_; ++i) {
if (!strcmp(args->argv[i], arg)) { if (!strcmp(args->argv_[i], arg)) {
++num_occurences; ++num_occurences;
} }
} }
@ -151,20 +150,16 @@ static const char* ErrorString(WebPMuxError err) {
} }
#define RETURN_IF_ERROR(ERR_MSG) \ #define RETURN_IF_ERROR(ERR_MSG) \
do { \ if (err != WEBP_MUX_OK) { \
if (err != WEBP_MUX_OK) { \ fprintf(stderr, ERR_MSG); \
fprintf(stderr, ERR_MSG); \ return err; \
return err; \ }
} \
} while (0)
#define RETURN_IF_ERROR3(ERR_MSG, FORMAT_STR1, FORMAT_STR2) \ #define RETURN_IF_ERROR3(ERR_MSG, FORMAT_STR1, FORMAT_STR2) \
do { \ if (err != WEBP_MUX_OK) { \
if (err != WEBP_MUX_OK) { \ fprintf(stderr, ERR_MSG, FORMAT_STR1, FORMAT_STR2); \
fprintf(stderr, ERR_MSG, FORMAT_STR1, FORMAT_STR2); \ return err; \
return err; \ }
} \
} while (0)
#define ERROR_GOTO1(ERR_MSG, LABEL) \ #define ERROR_GOTO1(ERR_MSG, LABEL) \
do { \ do { \
@ -527,8 +522,8 @@ static int ParseBgcolorArgs(const char* args, uint32_t* const bgcolor) {
static void DeleteConfig(Config* const config) { static void DeleteConfig(Config* const config) {
if (config != NULL) { if (config != NULL) {
free(config->args); free(config->args_);
ExUtilDeleteCommandLineArguments(&config->cmd_args); ExUtilDeleteCommandLineArguments(&config->cmd_args_);
memset(config, 0, sizeof(*config)); memset(config, 0, sizeof(*config));
} }
} }
@ -605,31 +600,25 @@ static int ValidateCommandLine(const CommandLineArguments* const cmd_args,
return ok; return ok;
} }
#define ACTION_IS_NIL (config->action_type == NIL_ACTION) #define ACTION_IS_NIL (config->action_type_ == NIL_ACTION)
#define FEATURETYPE_IS_NIL (config->type == NIL_FEATURE) #define FEATURETYPE_IS_NIL (config->type_ == NIL_FEATURE)
#define CHECK_NUM_ARGS_AT_LEAST(NUM, LABEL) \ #define CHECK_NUM_ARGS_AT_LEAST(NUM, LABEL) \
do { \ if (argc < i + (NUM)) { \
if (argc < i + (NUM)) { \ fprintf(stderr, "ERROR: Too few arguments for '%s'.\n", argv[i]); \
fprintf(stderr, "ERROR: Too few arguments for '%s'.\n", argv[i]); \ goto LABEL; \
goto LABEL; \ }
} \
} while (0)
#define CHECK_NUM_ARGS_AT_MOST(NUM, LABEL) \ #define CHECK_NUM_ARGS_AT_MOST(NUM, LABEL) \
do { \ if (argc > i + (NUM)) { \
if (argc > i + (NUM)) { \ fprintf(stderr, "ERROR: Too many arguments for '%s'.\n", argv[i]); \
fprintf(stderr, "ERROR: Too many arguments for '%s'.\n", argv[i]); \ goto LABEL; \
goto LABEL; \ }
} \
} while (0)
#define CHECK_NUM_ARGS_EXACTLY(NUM, LABEL) \ #define CHECK_NUM_ARGS_EXACTLY(NUM, LABEL) \
do { \ CHECK_NUM_ARGS_AT_LEAST(NUM, LABEL); \
CHECK_NUM_ARGS_AT_LEAST(NUM, LABEL); \ CHECK_NUM_ARGS_AT_MOST(NUM, LABEL);
CHECK_NUM_ARGS_AT_MOST(NUM, LABEL); \
} while (0)
// Parses command-line arguments to fill up config object. Also performs some // Parses command-line arguments to fill up config object. Also performs some
// semantic checks. unicode_argv contains wchar_t arguments or is null. // semantic checks. unicode_argv contains wchar_t arguments or is null.
@ -637,98 +626,98 @@ static int ParseCommandLine(Config* config, const W_CHAR** const unicode_argv) {
int i = 0; int i = 0;
int feature_arg_index = 0; int feature_arg_index = 0;
int ok = 1; int ok = 1;
int argc = config->cmd_args.argc; int argc = config->cmd_args_.argc_;
const char* const* argv = config->cmd_args.argv; const char* const* argv = config->cmd_args_.argv_;
// Unicode file paths will be used if available. // Unicode file paths will be used if available.
const char* const* wargv = const char* const* wargv =
(unicode_argv != NULL) ? (const char**)(unicode_argv + 1) : argv; (unicode_argv != NULL) ? (const char**)(unicode_argv + 1) : argv;
while (i < argc) { while (i < argc) {
FeatureArg* const arg = &config->args[feature_arg_index]; FeatureArg* const arg = &config->args_[feature_arg_index];
if (argv[i][0] == '-') { // One of the action types or output. if (argv[i][0] == '-') { // One of the action types or output.
if (!strcmp(argv[i], "-set")) { if (!strcmp(argv[i], "-set")) {
if (ACTION_IS_NIL) { if (ACTION_IS_NIL) {
config->action_type = ACTION_SET; config->action_type_ = ACTION_SET;
} else { } else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} }
++i; ++i;
} else if (!strcmp(argv[i], "-duration")) { } else if (!strcmp(argv[i], "-duration")) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
if (ACTION_IS_NIL || config->action_type == ACTION_DURATION) { if (ACTION_IS_NIL || config->action_type_ == ACTION_DURATION) {
config->action_type = ACTION_DURATION; config->action_type_ = ACTION_DURATION;
} else { } else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} }
if (FEATURETYPE_IS_NIL || config->type == FEATURE_DURATION) { if (FEATURETYPE_IS_NIL || config->type_ == FEATURE_DURATION) {
config->type = FEATURE_DURATION; config->type_ = FEATURE_DURATION;
} else { } else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
} }
arg->params = argv[i + 1]; arg->params_ = argv[i + 1];
++feature_arg_index; ++feature_arg_index;
i += 2; i += 2;
} else if (!strcmp(argv[i], "-get")) { } else if (!strcmp(argv[i], "-get")) {
if (ACTION_IS_NIL) { if (ACTION_IS_NIL) {
config->action_type = ACTION_GET; config->action_type_ = ACTION_GET;
} else { } else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} }
++i; ++i;
} else if (!strcmp(argv[i], "-strip")) { } else if (!strcmp(argv[i], "-strip")) {
if (ACTION_IS_NIL) { if (ACTION_IS_NIL) {
config->action_type = ACTION_STRIP; config->action_type_ = ACTION_STRIP;
config->arg_count = 0; config->arg_count_ = 0;
} else { } else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} }
++i; ++i;
} else if (!strcmp(argv[i], "-frame")) { } else if (!strcmp(argv[i], "-frame")) {
CHECK_NUM_ARGS_AT_LEAST(3, ErrParse); CHECK_NUM_ARGS_AT_LEAST(3, ErrParse);
if (ACTION_IS_NIL || config->action_type == ACTION_SET) { if (ACTION_IS_NIL || config->action_type_ == ACTION_SET) {
config->action_type = ACTION_SET; config->action_type_ = ACTION_SET;
} else { } else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} }
if (FEATURETYPE_IS_NIL || config->type == FEATURE_ANMF) { if (FEATURETYPE_IS_NIL || config->type_ == FEATURE_ANMF) {
config->type = FEATURE_ANMF; config->type_ = FEATURE_ANMF;
} else { } else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
} }
arg->subtype = SUBTYPE_ANMF; arg->subtype_ = SUBTYPE_ANMF;
arg->filename = wargv[i + 1]; arg->filename_ = wargv[i + 1];
arg->params = argv[i + 2]; arg->params_ = argv[i + 2];
++feature_arg_index; ++feature_arg_index;
i += 3; i += 3;
} else if (!strcmp(argv[i], "-loop") || !strcmp(argv[i], "-bgcolor")) { } else if (!strcmp(argv[i], "-loop") || !strcmp(argv[i], "-bgcolor")) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
if (ACTION_IS_NIL || config->action_type == ACTION_SET) { if (ACTION_IS_NIL || config->action_type_ == ACTION_SET) {
config->action_type = ACTION_SET; config->action_type_ = ACTION_SET;
} else { } else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} }
if (FEATURETYPE_IS_NIL || config->type == FEATURE_ANMF) { if (FEATURETYPE_IS_NIL || config->type_ == FEATURE_ANMF) {
config->type = FEATURE_ANMF; config->type_ = FEATURE_ANMF;
} else { } else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
} }
arg->subtype = arg->subtype_ =
!strcmp(argv[i], "-loop") ? SUBTYPE_LOOP : SUBTYPE_BGCOLOR; !strcmp(argv[i], "-loop") ? SUBTYPE_LOOP : SUBTYPE_BGCOLOR;
arg->params = argv[i + 1]; arg->params_ = argv[i + 1];
++feature_arg_index; ++feature_arg_index;
i += 2; i += 2;
} else if (!strcmp(argv[i], "-o")) { } else if (!strcmp(argv[i], "-o")) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->output = wargv[i + 1]; config->output_ = wargv[i + 1];
i += 2; i += 2;
} else if (!strcmp(argv[i], "-info")) { } else if (!strcmp(argv[i], "-info")) {
CHECK_NUM_ARGS_EXACTLY(2, ErrParse); CHECK_NUM_ARGS_EXACTLY(2, ErrParse);
if (config->action_type != NIL_ACTION) { if (config->action_type_ != NIL_ACTION) {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
} else { } else {
config->action_type = ACTION_INFO; config->action_type_ = ACTION_INFO;
config->arg_count = 0; config->arg_count_ = 0;
config->input = wargv[i + 1]; config->input_ = wargv[i + 1];
} }
i += 2; i += 2;
} else if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "-help")) { } else if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "-help")) {
@ -746,8 +735,8 @@ static int ParseCommandLine(Config* config, const W_CHAR** const unicode_argv) {
} else if (!strcmp(argv[i], "--")) { } else if (!strcmp(argv[i], "--")) {
if (i < argc - 1) { if (i < argc - 1) {
++i; ++i;
if (config->input == NULL) { if (config->input_ == NULL) {
config->input = wargv[i]; config->input_ = wargv[i];
} else { } else {
ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n", ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n",
argv[i], ErrParse); argv[i], ErrParse);
@ -765,43 +754,43 @@ static int ParseCommandLine(Config* config, const W_CHAR** const unicode_argv) {
if (!strcmp(argv[i], "icc") || !strcmp(argv[i], "exif") || if (!strcmp(argv[i], "icc") || !strcmp(argv[i], "exif") ||
!strcmp(argv[i], "xmp")) { !strcmp(argv[i], "xmp")) {
if (FEATURETYPE_IS_NIL) { if (FEATURETYPE_IS_NIL) {
config->type = (!strcmp(argv[i], "icc")) ? FEATURE_ICCP : config->type_ = (!strcmp(argv[i], "icc")) ? FEATURE_ICCP :
(!strcmp(argv[i], "exif")) ? FEATURE_EXIF : FEATURE_XMP; (!strcmp(argv[i], "exif")) ? FEATURE_EXIF : FEATURE_XMP;
} else { } else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse); ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
} }
if (config->action_type == ACTION_SET) { if (config->action_type_ == ACTION_SET) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
arg->filename = wargv[i + 1]; arg->filename_ = wargv[i + 1];
++feature_arg_index; ++feature_arg_index;
i += 2; i += 2;
} else { } else {
++i; ++i;
} }
} else if (!strcmp(argv[i], "frame") && } else if (!strcmp(argv[i], "frame") &&
(config->action_type == ACTION_GET)) { (config->action_type_ == ACTION_GET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type = FEATURE_ANMF; config->type_ = FEATURE_ANMF;
arg->params = argv[i + 1]; arg->params_ = argv[i + 1];
++feature_arg_index; ++feature_arg_index;
i += 2; i += 2;
} else if (!strcmp(argv[i], "loop") && } else if (!strcmp(argv[i], "loop") &&
(config->action_type == ACTION_SET)) { (config->action_type_ == ACTION_SET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type = FEATURE_LOOP; config->type_ = FEATURE_LOOP;
arg->params = argv[i + 1]; arg->params_ = argv[i + 1];
++feature_arg_index; ++feature_arg_index;
i += 2; i += 2;
} else if (!strcmp(argv[i], "bgcolor") && } else if (!strcmp(argv[i], "bgcolor") &&
(config->action_type == ACTION_SET)) { (config->action_type_ == ACTION_SET)) {
CHECK_NUM_ARGS_AT_LEAST(2, ErrParse); CHECK_NUM_ARGS_AT_LEAST(2, ErrParse);
config->type = FEATURE_BGCOLOR; config->type_ = FEATURE_BGCOLOR;
arg->params = argv[i + 1]; arg->params_ = argv[i + 1];
++feature_arg_index; ++feature_arg_index;
i += 2; i += 2;
} else { // Assume input file. } else { // Assume input file.
if (config->input == NULL) { if (config->input_ == NULL) {
config->input = wargv[i]; config->input_ = wargv[i];
} else { } else {
ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n", ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n",
argv[i], ErrParse); argv[i], ErrParse);
@ -824,21 +813,21 @@ static int ValidateConfig(Config* const config) {
} }
// Feature type. // Feature type.
if (FEATURETYPE_IS_NIL && config->action_type != ACTION_INFO) { if (FEATURETYPE_IS_NIL && config->action_type_ != ACTION_INFO) {
ERROR_GOTO1("ERROR: No feature specified.\n", ErrValidate2); ERROR_GOTO1("ERROR: No feature specified.\n", ErrValidate2);
} }
// Input file. // Input file.
if (config->input == NULL) { if (config->input_ == NULL) {
if (config->action_type != ACTION_SET) { if (config->action_type_ != ACTION_SET) {
ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2); ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2);
} else if (config->type != FEATURE_ANMF) { } else if (config->type_ != FEATURE_ANMF) {
ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2); ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2);
} }
} }
// Output file. // Output file.
if (config->output == NULL && config->action_type != ACTION_INFO) { if (config->output_ == NULL && config->action_type_ != ACTION_INFO) {
ERROR_GOTO1("ERROR: No output file specified.\n", ErrValidate2); ERROR_GOTO1("ERROR: No output file specified.\n", ErrValidate2);
} }
@ -854,17 +843,17 @@ static int InitializeConfig(int argc, const char* argv[], Config* const config,
memset(config, 0, sizeof(*config)); memset(config, 0, sizeof(*config));
ok = ExUtilInitCommandLineArguments(argc, argv, &config->cmd_args); ok = ExUtilInitCommandLineArguments(argc, argv, &config->cmd_args_);
if (!ok) return 0; if (!ok) return 0;
// Validate command-line arguments. // Validate command-line arguments.
if (!ValidateCommandLine(&config->cmd_args, &num_feature_args)) { if (!ValidateCommandLine(&config->cmd_args_, &num_feature_args)) {
ERROR_GOTO1("Exiting due to command-line parsing error.\n", Err1); ERROR_GOTO1("Exiting due to command-line parsing error.\n", Err1);
} }
config->arg_count = num_feature_args; config->arg_count_ = num_feature_args;
config->args = (FeatureArg*)calloc(num_feature_args, sizeof(*config->args)); config->args_ = (FeatureArg*)calloc(num_feature_args, sizeof(*config->args_));
if (config->args == NULL) { if (config->args_ == NULL) {
ERROR_GOTO1("ERROR: Memory allocation error.\n", Err1); ERROR_GOTO1("ERROR: Memory allocation error.\n", Err1);
} }
@ -896,7 +885,7 @@ static int GetFrame(const WebPMux* mux, const Config* config) {
WebPMuxFrameInfo info; WebPMuxFrameInfo info;
WebPDataInit(&info.bitstream); WebPDataInit(&info.bitstream);
num = ExUtilGetInt(config->args[0].params, 10, &parse_error); num = ExUtilGetInt(config->args_[0].params_, 10, &parse_error);
if (num < 0) { if (num < 0) {
ERROR_GOTO1("ERROR: Frame/Fragment index must be non-negative.\n", ErrGet); ERROR_GOTO1("ERROR: Frame/Fragment index must be non-negative.\n", ErrGet);
} }
@ -921,7 +910,7 @@ static int GetFrame(const WebPMux* mux, const Config* config) {
ErrorString(err), ErrGet); ErrorString(err), ErrGet);
} }
ok = WriteWebP(mux_single, config->output); ok = WriteWebP(mux_single, config->output_);
ErrGet: ErrGet:
WebPDataClear(&info.bitstream); WebPDataClear(&info.bitstream);
@ -936,11 +925,11 @@ static int Process(const Config* config) {
WebPMuxError err = WEBP_MUX_OK; WebPMuxError err = WEBP_MUX_OK;
int ok = 1; int ok = 1;
switch (config->action_type) { switch (config->action_type_) {
case ACTION_GET: { case ACTION_GET: {
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
switch (config->type) { switch (config->type_) {
case FEATURE_ANMF: case FEATURE_ANMF:
ok = GetFrame(mux, config); ok = GetFrame(mux, config);
break; break;
@ -948,12 +937,12 @@ static int Process(const Config* config) {
case FEATURE_ICCP: case FEATURE_ICCP:
case FEATURE_EXIF: case FEATURE_EXIF:
case FEATURE_XMP: case FEATURE_XMP:
err = WebPMuxGetChunk(mux, kFourccList[config->type], &chunk); err = WebPMuxGetChunk(mux, kFourccList[config->type_], &chunk);
if (err != WEBP_MUX_OK) { if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not get the %s.\n", ERROR_GOTO3("ERROR (%s): Could not get the %s.\n",
ErrorString(err), kDescriptions[config->type], Err2); ErrorString(err), kDescriptions[config->type_], Err2);
} }
ok = WriteData(config->output, &chunk); ok = WriteData(config->output_, &chunk);
break; break;
default: default:
@ -963,7 +952,7 @@ static int Process(const Config* config) {
break; break;
} }
case ACTION_SET: { case ACTION_SET: {
switch (config->type) { switch (config->type_) {
case FEATURE_ANMF: { case FEATURE_ANMF: {
int i; int i;
WebPMuxAnimParams params = { 0xFFFFFFFF, 0 }; WebPMuxAnimParams params = { 0xFFFFFFFF, 0 };
@ -972,11 +961,11 @@ static int Process(const Config* config) {
ERROR_GOTO2("ERROR (%s): Could not allocate a mux object.\n", ERROR_GOTO2("ERROR (%s): Could not allocate a mux object.\n",
ErrorString(WEBP_MUX_MEMORY_ERROR), Err2); ErrorString(WEBP_MUX_MEMORY_ERROR), Err2);
} }
for (i = 0; i < config->arg_count; ++i) { for (i = 0; i < config->arg_count_; ++i) {
switch (config->args[i].subtype) { switch (config->args_[i].subtype_) {
case SUBTYPE_BGCOLOR: { case SUBTYPE_BGCOLOR: {
uint32_t bgcolor; uint32_t bgcolor;
ok = ParseBgcolorArgs(config->args[i].params, &bgcolor); ok = ParseBgcolorArgs(config->args_[i].params_, &bgcolor);
if (!ok) { if (!ok) {
ERROR_GOTO1("ERROR: Could not parse the background color \n", ERROR_GOTO1("ERROR: Could not parse the background color \n",
Err2); Err2);
@ -987,7 +976,7 @@ static int Process(const Config* config) {
case SUBTYPE_LOOP: { case SUBTYPE_LOOP: {
int parse_error = 0; int parse_error = 0;
const int loop_count = const int loop_count =
ExUtilGetInt(config->args[i].params, 10, &parse_error); ExUtilGetInt(config->args_[i].params_, 10, &parse_error);
if (loop_count < 0 || loop_count > 65535) { if (loop_count < 0 || loop_count > 65535) {
// Note: This is only a 'necessary' condition for loop_count // Note: This is only a 'necessary' condition for loop_count
// to be valid. The 'sufficient' conditioned in checked in // to be valid. The 'sufficient' conditioned in checked in
@ -1003,10 +992,10 @@ static int Process(const Config* config) {
case SUBTYPE_ANMF: { case SUBTYPE_ANMF: {
WebPMuxFrameInfo frame; WebPMuxFrameInfo frame;
frame.id = WEBP_CHUNK_ANMF; frame.id = WEBP_CHUNK_ANMF;
ok = ExUtilReadFileToWebPData(config->args[i].filename, ok = ExUtilReadFileToWebPData(config->args_[i].filename_,
&frame.bitstream); &frame.bitstream);
if (!ok) goto Err2; if (!ok) goto Err2;
ok = ParseFrameArgs(config->args[i].params, &frame); ok = ParseFrameArgs(config->args_[i].params_, &frame);
if (!ok) { if (!ok) {
WebPDataClear(&frame.bitstream); WebPDataClear(&frame.bitstream);
ERROR_GOTO1("ERROR: Could not parse frame properties.\n", ERROR_GOTO1("ERROR: Could not parse frame properties.\n",
@ -1037,15 +1026,15 @@ static int Process(const Config* config) {
case FEATURE_ICCP: case FEATURE_ICCP:
case FEATURE_EXIF: case FEATURE_EXIF:
case FEATURE_XMP: { case FEATURE_XMP: {
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
ok = ExUtilReadFileToWebPData(config->args[0].filename, &chunk); ok = ExUtilReadFileToWebPData(config->args_[0].filename_, &chunk);
if (!ok) goto Err2; if (!ok) goto Err2;
err = WebPMuxSetChunk(mux, kFourccList[config->type], &chunk, 1); err = WebPMuxSetChunk(mux, kFourccList[config->type_], &chunk, 1);
WebPDataClear(&chunk); WebPDataClear(&chunk);
if (err != WEBP_MUX_OK) { if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not set the %s.\n", ERROR_GOTO3("ERROR (%s): Could not set the %s.\n",
ErrorString(err), kDescriptions[config->type], Err2); ErrorString(err), kDescriptions[config->type_], Err2);
} }
break; break;
} }
@ -1053,12 +1042,12 @@ static int Process(const Config* config) {
WebPMuxAnimParams params = { 0xFFFFFFFF, 0 }; WebPMuxAnimParams params = { 0xFFFFFFFF, 0 };
int parse_error = 0; int parse_error = 0;
const int loop_count = const int loop_count =
ExUtilGetInt(config->args[0].params, 10, &parse_error); ExUtilGetInt(config->args_[0].params_, 10, &parse_error);
if (loop_count < 0 || loop_count > 65535 || parse_error) { if (loop_count < 0 || loop_count > 65535 || parse_error) {
ERROR_GOTO1("ERROR: Loop count must be in the range 0 to 65535.\n", ERROR_GOTO1("ERROR: Loop count must be in the range 0 to 65535.\n",
Err2); Err2);
} }
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
ok = (WebPMuxGetAnimationParams(mux, &params) == WEBP_MUX_OK); ok = (WebPMuxGetAnimationParams(mux, &params) == WEBP_MUX_OK);
if (!ok) { if (!ok) {
@ -1077,12 +1066,12 @@ static int Process(const Config* config) {
case FEATURE_BGCOLOR: { case FEATURE_BGCOLOR: {
WebPMuxAnimParams params = { 0xFFFFFFFF, 0 }; WebPMuxAnimParams params = { 0xFFFFFFFF, 0 };
uint32_t bgcolor; uint32_t bgcolor;
ok = ParseBgcolorArgs(config->args[0].params, &bgcolor); ok = ParseBgcolorArgs(config->args_[0].params_, &bgcolor);
if (!ok) { if (!ok) {
ERROR_GOTO1("ERROR: Could not parse the background color.\n", ERROR_GOTO1("ERROR: Could not parse the background color.\n",
Err2); Err2);
} }
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
ok = (WebPMuxGetAnimationParams(mux, &params) == WEBP_MUX_OK); ok = (WebPMuxGetAnimationParams(mux, &params) == WEBP_MUX_OK);
if (!ok) { if (!ok) {
@ -1103,12 +1092,12 @@ static int Process(const Config* config) {
break; break;
} }
} }
ok = WriteWebP(mux, config->output); ok = WriteWebP(mux, config->output_);
break; break;
} }
case ACTION_DURATION: { case ACTION_DURATION: {
int num_frames; int num_frames;
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
err = WebPMuxNumChunks(mux, WEBP_CHUNK_ANMF, &num_frames); err = WebPMuxNumChunks(mux, WEBP_CHUNK_ANMF, &num_frames);
ok = (err == WEBP_MUX_OK); ok = (err == WEBP_MUX_OK);
@ -1118,7 +1107,7 @@ static int Process(const Config* config) {
if (num_frames == 0) { if (num_frames == 0) {
fprintf(stderr, "Doesn't look like the source is animated. " fprintf(stderr, "Doesn't look like the source is animated. "
"Skipping duration setting.\n"); "Skipping duration setting.\n");
ok = WriteWebP(mux, config->output); ok = WriteWebP(mux, config->output_);
if (!ok) goto Err2; if (!ok) goto Err2;
} else { } else {
int i; int i;
@ -1130,11 +1119,11 @@ static int Process(const Config* config) {
for (i = 0; i < num_frames; ++i) durations[i] = -1; for (i = 0; i < num_frames; ++i) durations[i] = -1;
// Parse intervals to process. // Parse intervals to process.
for (i = 0; i < config->arg_count; ++i) { for (i = 0; i < config->arg_count_; ++i) {
int k; int k;
int args[3]; int args[3];
int duration, start, end; int duration, start, end;
const int nb_args = ExUtilGetInts(config->args[i].params, const int nb_args = ExUtilGetInts(config->args_[i].params_,
10, 3, args); 10, 3, args);
ok = (nb_args >= 1); ok = (nb_args >= 1);
if (!ok) goto Err3; if (!ok) goto Err3;
@ -1178,7 +1167,7 @@ static int Process(const Config* config) {
WebPDataClear(&frame.bitstream); WebPDataClear(&frame.bitstream);
} }
WebPMuxDelete(mux); WebPMuxDelete(mux);
ok = WriteWebP(new_mux, config->output); ok = WriteWebP(new_mux, config->output_);
mux = new_mux; // transfer for the WebPMuxDelete() call mux = new_mux; // transfer for the WebPMuxDelete() call
new_mux = NULL; new_mux = NULL;
@ -1190,24 +1179,24 @@ static int Process(const Config* config) {
break; break;
} }
case ACTION_STRIP: { case ACTION_STRIP: {
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
if (config->type == FEATURE_ICCP || config->type == FEATURE_EXIF || if (config->type_ == FEATURE_ICCP || config->type_ == FEATURE_EXIF ||
config->type == FEATURE_XMP) { config->type_ == FEATURE_XMP) {
err = WebPMuxDeleteChunk(mux, kFourccList[config->type]); err = WebPMuxDeleteChunk(mux, kFourccList[config->type_]);
if (err != WEBP_MUX_OK) { if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not strip the %s.\n", ERROR_GOTO3("ERROR (%s): Could not strip the %s.\n",
ErrorString(err), kDescriptions[config->type], Err2); ErrorString(err), kDescriptions[config->type_], Err2);
} }
} else { } else {
ERROR_GOTO1("ERROR: Invalid feature for action 'strip'.\n", Err2); ERROR_GOTO1("ERROR: Invalid feature for action 'strip'.\n", Err2);
break; break;
} }
ok = WriteWebP(mux, config->output); ok = WriteWebP(mux, config->output_);
break; break;
} }
case ACTION_INFO: { case ACTION_INFO: {
ok = CreateMux(config->input, &mux); ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2; if (!ok) goto Err2;
ok = (DisplayInfo(mux) == WEBP_MUX_OK); ok = (DisplayInfo(mux) == WEBP_MUX_OK);
break; break;
@ -1226,7 +1215,6 @@ static int Process(const Config* config) {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Main. // Main.
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
Config config; Config config;
int ok; int ok;
@ -1240,7 +1228,7 @@ int main(int argc, const char* argv[]) {
PrintHelp(); PrintHelp();
} }
DeleteConfig(&config); DeleteConfig(&config);
FREE_WARGV_AND_RETURN(ok ? EXIT_SUCCESS : EXIT_FAILURE); FREE_WARGV_AND_RETURN(!ok);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -7,7 +7,6 @@ noinst_HEADERS += ../src/webp/types.h
libwebpextras_la_SOURCES = libwebpextras_la_SOURCES =
libwebpextras_la_SOURCES += extras.c extras.h quality_estimate.c libwebpextras_la_SOURCES += extras.c extras.h quality_estimate.c
libwebpextras_la_SOURCES += sharpyuv_risk_table.c sharpyuv_risk_table.h
libwebpextras_la_CPPFLAGS = $(AM_CPPFLAGS) libwebpextras_la_CPPFLAGS = $(AM_CPPFLAGS)
libwebpextras_la_LDFLAGS = -lm libwebpextras_la_LDFLAGS = -lm

View File

@ -11,21 +11,15 @@
// //
#include "extras/extras.h" #include "extras/extras.h"
#include "webp/format_constants.h"
#include "src/dsp/dsp.h"
#include <assert.h> #include <assert.h>
#include <limits.h>
#include <string.h> #include <string.h>
#include "extras/sharpyuv_risk_table.h"
#include "sharpyuv/sharpyuv.h"
#include "src/dsp/dsp.h"
#include "src/utils/utils.h"
#include "webp/format_constants.h"
#include "webp/types.h"
#define XTRA_MAJ_VERSION 1 #define XTRA_MAJ_VERSION 1
#define XTRA_MIN_VERSION 5 #define XTRA_MIN_VERSION 3
#define XTRA_REV_VERSION 0 #define XTRA_REV_VERSION 1
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -166,159 +160,3 @@ int WebPUnmultiplyARGB(WebPPicture* pic) {
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// 420 risk metric
#define YUV_FIX 16 // fixed-point precision for RGB->YUV
static const int kYuvHalf = 1 << (YUV_FIX - 1);
// Maps a value in [0, (256 << YUV_FIX) - 1] to [0,
// precomputed_scores_table_sampling - 1]. It is important that the extremal
// values are preserved and 1:1 mapped:
// ConvertValue(0) = 0
// ConvertValue((256 << 16) - 1) = rgb_sampling_size - 1
static int SharpYuvConvertValueToSampledIdx(int v, int rgb_sampling_size) {
v = (v + kYuvHalf) >> YUV_FIX;
v = (v < 0) ? 0 : (v > 255) ? 255 : v;
return (v * (rgb_sampling_size - 1)) / 255;
}
#undef YUV_FIX
// For each pixel, computes the index to look up that color in a precomputed
// risk score table where the YUV space is subsampled to a size of
// precomputed_scores_table_sampling^3 (see sharpyuv_risk_table.h)
static int SharpYuvConvertToYuvSharpnessIndex(
int r, int g, int b, const SharpYuvConversionMatrix* matrix,
int precomputed_scores_table_sampling) {
const int y = SharpYuvConvertValueToSampledIdx(
matrix->rgb_to_y[0] * r + matrix->rgb_to_y[1] * g +
matrix->rgb_to_y[2] * b + matrix->rgb_to_y[3],
precomputed_scores_table_sampling);
const int u = SharpYuvConvertValueToSampledIdx(
matrix->rgb_to_u[0] * r + matrix->rgb_to_u[1] * g +
matrix->rgb_to_u[2] * b + matrix->rgb_to_u[3],
precomputed_scores_table_sampling);
const int v = SharpYuvConvertValueToSampledIdx(
matrix->rgb_to_v[0] * r + matrix->rgb_to_v[1] * g +
matrix->rgb_to_v[2] * b + matrix->rgb_to_v[3],
precomputed_scores_table_sampling);
return y + u * precomputed_scores_table_sampling +
v * precomputed_scores_table_sampling *
precomputed_scores_table_sampling;
}
static void SharpYuvRowToYuvSharpnessIndex(
const uint8_t* r_ptr, const uint8_t* g_ptr, const uint8_t* b_ptr,
int rgb_step, int rgb_bit_depth, int width, uint16_t* dst,
const SharpYuvConversionMatrix* matrix,
int precomputed_scores_table_sampling) {
int i;
assert(rgb_bit_depth == 8);
(void)rgb_bit_depth; // Unused for now.
for (i = 0; i < width;
++i, r_ptr += rgb_step, g_ptr += rgb_step, b_ptr += rgb_step) {
dst[i] =
SharpYuvConvertToYuvSharpnessIndex(r_ptr[0], g_ptr[0], b_ptr[0], matrix,
precomputed_scores_table_sampling);
}
}
#define SAFE_ALLOC(W, H, T) ((T*)WebPSafeMalloc((uint64_t)(W) * (H), sizeof(T)))
static int DoEstimateRisk(const uint8_t* r_ptr, const uint8_t* g_ptr,
const uint8_t* b_ptr, int rgb_step, int rgb_stride,
int rgb_bit_depth, int width, int height,
const SharpYuvOptions* options,
const uint8_t precomputed_scores_table[],
int precomputed_scores_table_sampling,
float* score_out) {
const int sampling3 = precomputed_scores_table_sampling *
precomputed_scores_table_sampling *
precomputed_scores_table_sampling;
const int kNoiseLevel = 4;
double total_score = 0;
double count = 0;
// Rows of indices in
uint16_t* row1 = SAFE_ALLOC(width, 1, uint16_t);
uint16_t* row2 = SAFE_ALLOC(width, 1, uint16_t);
uint16_t* tmp;
int i, j;
if (row1 == NULL || row2 == NULL) {
WebPFree(row1);
WebPFree(row2);
return 0;
}
// Convert the first row ahead.
SharpYuvRowToYuvSharpnessIndex(r_ptr, g_ptr, b_ptr, rgb_step, rgb_bit_depth,
width, row2, options->yuv_matrix,
precomputed_scores_table_sampling);
for (j = 1; j < height; ++j) {
r_ptr += rgb_stride;
g_ptr += rgb_stride;
b_ptr += rgb_stride;
// Swap row 1 and row 2.
tmp = row1;
row1 = row2;
row2 = tmp;
// Convert the row below.
SharpYuvRowToYuvSharpnessIndex(r_ptr, g_ptr, b_ptr, rgb_step, rgb_bit_depth,
width, row2, options->yuv_matrix,
precomputed_scores_table_sampling);
for (i = 0; i < width - 1; ++i) {
const int idx0 = row1[i + 0];
const int idx1 = row1[i + 1];
const int idx2 = row2[i + 0];
const int score = precomputed_scores_table[idx0 + sampling3 * idx1] +
precomputed_scores_table[idx0 + sampling3 * idx2] +
precomputed_scores_table[idx1 + sampling3 * idx2];
if (score > kNoiseLevel) {
total_score += score;
count += 1.0;
}
}
}
if (count > 0.) total_score /= count;
// If less than 1% of pixels were evaluated -> below noise level.
if (100. * count / (width * height) < 1.) total_score = 0.;
// Rescale to [0:100]
total_score = (total_score > 25.) ? 100. : total_score * 100. / 25.;
WebPFree(row1);
WebPFree(row2);
*score_out = (float)total_score;
return 1;
}
#undef SAFE_ALLOC
int SharpYuvEstimate420Risk(const void* r_ptr, const void* g_ptr,
const void* b_ptr, int rgb_step, int rgb_stride,
int rgb_bit_depth, int width, int height,
const SharpYuvOptions* options, float* score) {
if (width < 1 || height < 1 || width == INT_MAX || height == INT_MAX ||
r_ptr == NULL || g_ptr == NULL || b_ptr == NULL || options == NULL ||
score == NULL) {
return 0;
}
if (rgb_bit_depth != 8) {
return 0;
}
if (width <= 4 || height <= 4) {
*score = 0.0f; // too small, no real risk.
return 1;
}
return DoEstimateRisk(
(const uint8_t*)r_ptr, (const uint8_t*)g_ptr, (const uint8_t*)b_ptr,
rgb_step, rgb_stride, rgb_bit_depth, width, height, options,
kSharpYuvPrecomputedRisk, kSharpYuvPrecomputedRiskYuvSampling, score);
}
//------------------------------------------------------------------------------

View File

@ -17,10 +17,9 @@
extern "C" { extern "C" {
#endif #endif
#include "sharpyuv/sharpyuv.h"
#include "webp/encode.h" #include "webp/encode.h"
#define WEBP_EXTRAS_ABI_VERSION 0x0003 // MAJOR(8b) + MINOR(8b) #define WEBP_EXTRAS_ABI_VERSION 0x0002 // MAJOR(8b) + MINOR(8b)
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -71,38 +70,6 @@ WEBP_EXTERN int VP8EstimateQuality(const uint8_t* const data, size_t size);
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Computes a score between 0 and 100 which represents the risk of having visual
// quality loss from converting an RGB image to YUV420.
// A low score, typically < 40, means there is a low risk of artifacts from
// chroma subsampling and a simple averaging algorithm can be used instead of
// the more expensive SharpYuvConvert function.
// A medium score, typically >= 40 and < 70, means that simple chroma
// subsampling will produce artifacts and it may be advisable to use the more
// costly SharpYuvConvert for YUV420 conversion.
// A high score, typically >= 70, means there is a very high risk of artifacts
// from chroma subsampling even with SharpYuvConvert, and best results might be
// achieved by using YUV444.
// If not using SharpYuvConvert, a threshold of about 50 can be used to decide
// between (simple averaging) 420 and 444.
// r_ptr, g_ptr, b_ptr: pointers to the source r, g and b channels. Should point
// to uint8_t buffers if rgb_bit_depth is 8, or uint16_t buffers otherwise.
// rgb_step: distance in bytes between two horizontally adjacent pixels on the
// r, g and b channels. If rgb_bit_depth is > 8, it should be a
// multiple of 2.
// rgb_stride: distance in bytes between two vertically adjacent pixels on the
// r, g, and b channels. If rgb_bit_depth is > 8, it should be a
// multiple of 2.
// rgb_bit_depth: number of bits for each r/g/b value. Only a value of 8 is
// currently supported.
// width, height: width and height of the image in pixels
// Returns 0 on failure.
WEBP_EXTERN int SharpYuvEstimate420Risk(
const void* r_ptr, const void* g_ptr, const void* b_ptr, int rgb_step,
int rgb_stride, int rgb_bit_depth, int width, int height,
const SharpYuvOptions* options, float* score);
//------------------------------------------------------------------------------
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif

View File

@ -227,11 +227,10 @@ static void Help(void) {
WebPGetEnabledInputFileFormats()); WebPGetEnabledInputFileFormats());
} }
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
WebPPicture pic1, pic2; WebPPicture pic1, pic2;
size_t size1 = 0, size2 = 0; size_t size1 = 0, size2 = 0;
int ret = EXIT_FAILURE; int ret = 1;
float disto[5]; float disto[5];
int type = 0; int type = 0;
int c; int c;
@ -247,7 +246,7 @@ int main(int argc, const char* argv[]) {
if (!WebPPictureInit(&pic1) || !WebPPictureInit(&pic2)) { if (!WebPPictureInit(&pic1) || !WebPPictureInit(&pic2)) {
fprintf(stderr, "Can't init pictures\n"); fprintf(stderr, "Can't init pictures\n");
FREE_WARGV_AND_RETURN(EXIT_FAILURE); FREE_WARGV_AND_RETURN(1);
} }
for (c = 1; c < argc; ++c) { for (c = 1; c < argc; ++c) {
@ -263,7 +262,7 @@ int main(int argc, const char* argv[]) {
use_gray = 1; use_gray = 1;
} else if (!strcmp(argv[c], "-h")) { } else if (!strcmp(argv[c], "-h")) {
help = 1; help = 1;
ret = EXIT_SUCCESS; ret = 0;
} else if (!strcmp(argv[c], "-o")) { } else if (!strcmp(argv[c], "-o")) {
if (++c == argc) { if (++c == argc) {
fprintf(stderr, "missing file name after %s option.\n", argv[c - 1]); fprintf(stderr, "missing file name after %s option.\n", argv[c - 1]);
@ -338,8 +337,7 @@ int main(int argc, const char* argv[]) {
fprintf(stderr, "Error during lossless encoding.\n"); fprintf(stderr, "Error during lossless encoding.\n");
goto End; goto End;
} }
ret = ImgIoUtilWriteFile(output, data, data_size) ? EXIT_SUCCESS ret = ImgIoUtilWriteFile(output, data, data_size) ? 0 : 1;
: EXIT_FAILURE;
WebPFree(data); WebPFree(data);
if (ret) goto End; if (ret) goto End;
#else #else
@ -347,10 +345,9 @@ int main(int argc, const char* argv[]) {
(void)data_size; (void)data_size;
fprintf(stderr, "Cannot save the difference map. Please recompile " fprintf(stderr, "Cannot save the difference map. Please recompile "
"without the WEBP_REDUCE_CSP flag.\n"); "without the WEBP_REDUCE_CSP flag.\n");
goto End;
#endif // WEBP_REDUCE_CSP #endif // WEBP_REDUCE_CSP
} }
ret = EXIT_SUCCESS; ret = 0;
End: End:
WebPPictureFree(&pic1); WebPPictureFree(&pic1);

View File

@ -76,7 +76,7 @@ int VP8EstimateQuality(const uint8_t* const data, size_t size) {
GET_BIT(2); // colorspace + clamp type GET_BIT(2); // colorspace + clamp type
// Segment header // Segment header
if (GET_BIT(1)) { // use_segment if (GET_BIT(1)) { // use_segment_
int s; int s;
const int update_map = GET_BIT(1); const int update_map = GET_BIT(1);
if (GET_BIT(1)) { // update data if (GET_BIT(1)) { // update data

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +0,0 @@
// Copyright 2023 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Precomputed data for 420 risk estimation.
#ifndef WEBP_EXTRAS_SHARPYUV_RISK_TABLE_H_
#define WEBP_EXTRAS_SHARPYUV_RISK_TABLE_H_
#include "src/webp/types.h"
extern const int kSharpYuvPrecomputedRiskYuvSampling;
// Table of precomputed risk scores when chroma subsampling images with two
// given colors.
// Since precomputing values for all possible YUV colors would create a huge
// table, the YUV space (i.e. [0, 255]^3) is reduced to
// [0, kSharpYuvPrecomputedRiskYuvSampling-1]^3
// where 255 maps to kSharpYuvPrecomputedRiskYuvSampling-1.
// Table size: kSharpYuvPrecomputedRiskYuvSampling^6 bytes or 114 KiB
extern const uint8_t kSharpYuvPrecomputedRisk[];
#endif // WEBP_EXTRAS_SHARPYUV_RISK_TABLE_H_

View File

@ -15,7 +15,6 @@
// Author: James Zern (jzern@google.com) // Author: James Zern (jzern@google.com)
#include <stdio.h> #include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_CONFIG_H #ifdef HAVE_CONFIG_H
#include "webp/config.h" #include "webp/config.h"
@ -31,7 +30,7 @@
#if defined(WEBP_HAVE_JUST_SDL_H) #if defined(WEBP_HAVE_JUST_SDL_H)
#include <SDL.h> #include <SDL.h>
#else #else
#include <SDL2/SDL.h> #include <SDL/SDL.h>
#endif #endif
static void ProcessEvents(void) { static void ProcessEvents(void) {
@ -50,26 +49,19 @@ static void ProcessEvents(void) {
} }
} }
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
int c; int c;
int ok = 0; int ok = 0;
INIT_WARGV(argc, argv); INIT_WARGV(argc, argv);
if (argc == 1) {
fprintf(stderr, "Usage: %s [-h] image.webp [more_files.webp...]\n",
argv[0]);
goto Error;
}
for (c = 1; c < argc; ++c) { for (c = 1; c < argc; ++c) {
const char* file = NULL; const char* file = NULL;
const uint8_t* webp = NULL; const uint8_t* webp = NULL;
size_t webp_size = 0; size_t webp_size = 0;
if (!strcmp(argv[c], "-h")) { if (!strcmp(argv[c], "-h")) {
printf("Usage: %s [-h] image.webp [more_files.webp...]\n", argv[0]); printf("Usage: %s [-h] image.webp [more_files.webp...]\n", argv[0]);
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else { } else {
file = (const char*)GET_WARGV(argv, c); file = (const char*)GET_WARGV(argv, c);
} }
@ -95,7 +87,7 @@ int main(int argc, char* argv[]) {
Error: Error:
SDL_Quit(); SDL_Quit();
FREE_WARGV_AND_RETURN(ok ? EXIT_SUCCESS : EXIT_FAILURE); FREE_WARGV_AND_RETURN(ok ? 0 : 1);
} }
#else // !WEBP_HAVE_SDL #else // !WEBP_HAVE_SDL

View File

@ -15,7 +15,6 @@
#include "imageio/imageio_util.h" #include "imageio/imageio_util.h"
#include "../examples/unicode.h" #include "../examples/unicode.h"
// Returns EXIT_SUCCESS on success, EXIT_FAILURE on failure.
int main(int argc, const char* argv[]) { int main(int argc, const char* argv[]) {
int c; int c;
int quiet = 0; int quiet = 0;
@ -28,7 +27,7 @@ int main(int argc, const char* argv[]) {
quiet = 1; quiet = 1;
} else if (!strcmp(argv[c], "-help") || !strcmp(argv[c], "-h")) { } else if (!strcmp(argv[c], "-help") || !strcmp(argv[c], "-h")) {
printf("webp_quality [-h][-quiet] webp_files...\n"); printf("webp_quality [-h][-quiet] webp_files...\n");
FREE_WARGV_AND_RETURN(EXIT_SUCCESS); FREE_WARGV_AND_RETURN(0);
} else { } else {
const char* const filename = (const char*)GET_WARGV(argv, c); const char* const filename = (const char*)GET_WARGV(argv, c);
const uint8_t* data = NULL; const uint8_t* data = NULL;
@ -51,5 +50,5 @@ int main(int argc, const char* argv[]) {
free((void*)data); free((void*)data);
} }
} }
FREE_WARGV_AND_RETURN(ok ? EXIT_SUCCESS : EXIT_FAILURE); FREE_WARGV_AND_RETURN(ok ? 0 : 1);
} }

View File

@ -20,75 +20,88 @@
#include "webp_to_sdl.h" #include "webp_to_sdl.h"
#include <stdio.h> #include <stdio.h>
#include "src/webp/decode.h" #include "src/webp/decode.h"
#if defined(WEBP_HAVE_JUST_SDL_H) #if defined(WEBP_HAVE_JUST_SDL_H)
#include <SDL.h> #include <SDL.h>
#else #else
#include <SDL2/SDL.h> #include <SDL/SDL.h>
#endif #endif
static int init_ok = 0; static int init_ok = 0;
int WebPToSDL(const char* data, unsigned int data_size) { int WebPToSDL(const char* data, unsigned int data_size) {
int ok = 0; int ok = 0;
VP8StatusCode status; VP8StatusCode status;
WebPBitstreamFeatures input; WebPDecoderConfig config;
uint8_t* output = NULL; WebPBitstreamFeatures* const input = &config.input;
SDL_Window* window = NULL; WebPDecBuffer* const output = &config.output;
SDL_Renderer* renderer = NULL; SDL_Surface* screen = NULL;
SDL_Texture* texture = NULL; SDL_Surface* surface = NULL;
int width, height;
if (!WebPInitDecoderConfig(&config)) {
fprintf(stderr, "Library version mismatch!\n");
return 0;
}
if (!init_ok) { if (!init_ok) {
SDL_Init(SDL_INIT_VIDEO); SDL_Init(SDL_INIT_VIDEO);
init_ok = 1; init_ok = 1;
} }
status = WebPGetFeatures((uint8_t*)data, (size_t)data_size, &input); status = WebPGetFeatures((uint8_t*)data, (size_t)data_size, &config.input);
if (status != VP8_STATUS_OK) goto Error; if (status != VP8_STATUS_OK) goto Error;
width = input.width;
height = input.height;
SDL_CreateWindowAndRenderer(width, height, 0, &window, &renderer); screen = SDL_SetVideoMode(input->width, input->height, 32, SDL_SWSURFACE);
if (window == NULL || renderer == NULL) { if (screen == NULL) {
fprintf(stderr, "Unable to create window or renderer!\n"); fprintf(stderr, "Unable to set video mode (32bpp %dx%d)!\n",
input->width, input->height);
goto Error; goto Error;
} }
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY,
"linear"); // make the scaled rendering look smoother.
SDL_RenderSetLogicalSize(renderer, width, height);
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_ABGR8888, surface = SDL_CreateRGBSurface(SDL_SWSURFACE,
SDL_TEXTUREACCESS_STREAMING, width, height); input->width, input->height, 32,
if (texture == NULL) { 0x000000ffu, // R mask
fprintf(stderr, "Unable to create %dx%d RGBA texture!\n", width, height); 0x0000ff00u, // G mask
0x00ff0000u, // B mask
0xff000000u); // A mask
if (surface == NULL) {
fprintf(stderr, "Unable to create %dx%d RGBA surface!\n",
input->width, input->height);
goto Error; goto Error;
} }
if (SDL_MUSTLOCK(surface)) SDL_LockSurface(surface);
#if SDL_BYTEORDER == SDL_BIG_ENDIAN #if SDL_BYTEORDER == SDL_BIG_ENDIAN
output = WebPDecodeBGRA((const uint8_t*)data, (size_t)data_size, &width, output->colorspace = MODE_BGRA;
&height);
#else #else
output = WebPDecodeRGBA((const uint8_t*)data, (size_t)data_size, &width, output->colorspace = MODE_RGBA;
&height);
#endif #endif
if (output == NULL) { output->width = surface->w;
output->height = surface->h;
output->u.RGBA.rgba = surface->pixels;
output->u.RGBA.stride = surface->pitch;
output->u.RGBA.size = surface->pitch * surface->h;
output->is_external_memory = 1;
status = WebPDecode((const uint8_t*)data, (size_t)data_size, &config);
if (status != VP8_STATUS_OK) {
fprintf(stderr, "Error decoding image (%d)\n", status); fprintf(stderr, "Error decoding image (%d)\n", status);
goto Error; goto Error;
} }
SDL_UpdateTexture(texture, NULL, output, width * sizeof(uint32_t)); if (SDL_MUSTLOCK(surface)) SDL_UnlockSurface(surface);
SDL_RenderClear(renderer); if (SDL_BlitSurface(surface, NULL, screen, NULL) ||
SDL_RenderCopy(renderer, texture, NULL, NULL); SDL_Flip(screen)) {
SDL_RenderPresent(renderer); goto Error;
}
ok = 1; ok = 1;
Error: Error:
// We should call SDL_DestroyWindow(window) but that makes .js fail. SDL_FreeSurface(surface);
SDL_DestroyRenderer(renderer); SDL_FreeSurface(screen);
SDL_DestroyTexture(texture); WebPFreeDecBuffer(output);
WebPFree(output);
return ok; return ok;
} }

View File

@ -260,20 +260,14 @@ int WebPWritePAM(FILE* fout, const WebPDecBuffer* const buffer) {
// Save 16b mode (RGBA4444, RGB565, ...) for debugging purpose. // Save 16b mode (RGBA4444, RGB565, ...) for debugging purpose.
int WebPWrite16bAsPGM(FILE* fout, const WebPDecBuffer* const buffer) { int WebPWrite16bAsPGM(FILE* fout, const WebPDecBuffer* const buffer) {
uint32_t width, height; const uint32_t width = buffer->width;
uint8_t* rgba; const uint32_t height = buffer->height;
int stride; const uint8_t* rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint32_t bytes_per_px = 2; const uint32_t bytes_per_px = 2;
uint32_t y; uint32_t y;
if (fout == NULL || buffer == NULL) return 0; if (fout == NULL || buffer == NULL || rgba == NULL) return 0;
width = buffer->width;
height = buffer->height;
rgba = buffer->u.RGBA.rgba;
stride = buffer->u.RGBA.stride;
if (rgba == NULL) return 0;
fprintf(fout, "P5\n%u %u\n255\n", width * bytes_per_px, height); fprintf(fout, "P5\n%u %u\n255\n", width * bytes_per_px, height);
for (y = 0; y < height; ++y) { for (y = 0; y < height; ++y) {
@ -301,29 +295,22 @@ static void PutLE32(uint8_t* const dst, uint32_t value) {
#define BMP_HEADER_SIZE 54 #define BMP_HEADER_SIZE 54
#define BMP_HEADER_ALPHA_EXTRA_SIZE 16 // for alpha info #define BMP_HEADER_ALPHA_EXTRA_SIZE 16 // for alpha info
int WebPWriteBMP(FILE* fout, const WebPDecBuffer* const buffer) { int WebPWriteBMP(FILE* fout, const WebPDecBuffer* const buffer) {
int has_alpha, header_size; const int has_alpha = WebPIsAlphaMode(buffer->colorspace);
uint32_t width, height; const int header_size =
uint8_t* rgba; BMP_HEADER_SIZE + (has_alpha ? BMP_HEADER_ALPHA_EXTRA_SIZE : 0);
int stride; const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint32_t bytes_per_px = has_alpha ? 4 : 3;
uint32_t y; uint32_t y;
uint32_t bytes_per_px, line_size, image_size, bmp_stride, total_size; const uint32_t line_size = bytes_per_px * width;
const uint32_t bmp_stride = (line_size + 3) & ~3; // pad to 4
const uint32_t image_size = bmp_stride * height;
const uint32_t total_size = image_size + header_size;
uint8_t bmp_header[BMP_HEADER_SIZE + BMP_HEADER_ALPHA_EXTRA_SIZE] = { 0 }; uint8_t bmp_header[BMP_HEADER_SIZE + BMP_HEADER_ALPHA_EXTRA_SIZE] = { 0 };
if (fout == NULL || buffer == NULL) return 0; if (fout == NULL || buffer == NULL || rgba == NULL) return 0;
has_alpha = WebPIsAlphaMode(buffer->colorspace);
header_size = BMP_HEADER_SIZE + (has_alpha ? BMP_HEADER_ALPHA_EXTRA_SIZE : 0);
width = buffer->width;
height = buffer->height;
rgba = buffer->u.RGBA.rgba;
stride = buffer->u.RGBA.stride;
bytes_per_px = has_alpha ? 4 : 3;
line_size = bytes_per_px * width;
bmp_stride = (line_size + 3) & ~3; // pad to 4
image_size = bmp_stride * height;
total_size = image_size + header_size;
if (rgba == NULL) return 0;
// bitmap file header // bitmap file header
PutLE16(bmp_header + 0, 0x4d42); // signature 'BM' PutLE16(bmp_header + 0, 0x4d42); // signature 'BM'
@ -385,14 +372,17 @@ int WebPWriteBMP(FILE* fout, const WebPDecBuffer* const buffer) {
#define TIFF_HEADER_SIZE (EXTRA_DATA_OFFSET + EXTRA_DATA_SIZE) #define TIFF_HEADER_SIZE (EXTRA_DATA_OFFSET + EXTRA_DATA_SIZE)
int WebPWriteTIFF(FILE* fout, const WebPDecBuffer* const buffer) { int WebPWriteTIFF(FILE* fout, const WebPDecBuffer* const buffer) {
int has_alpha; const int has_alpha = WebPIsAlphaMode(buffer->colorspace);
uint32_t width, height; const uint32_t width = buffer->width;
uint8_t* rgba; const uint32_t height = buffer->height;
int stride; const uint8_t* rgba = buffer->u.RGBA.rgba;
uint8_t bytes_per_px = 0; const int stride = buffer->u.RGBA.stride;
const uint8_t assoc_alpha = 0; const uint8_t bytes_per_px = has_alpha ? 4 : 3;
const uint8_t assoc_alpha =
WebPIsPremultipliedMode(buffer->colorspace) ? 1 : 2;
// For non-alpha case, we omit tag 0x152 (ExtraSamples). // For non-alpha case, we omit tag 0x152 (ExtraSamples).
const uint8_t num_ifd_entries = 0; const uint8_t num_ifd_entries = has_alpha ? NUM_IFD_ENTRIES
: NUM_IFD_ENTRIES - 1;
uint8_t tiff_header[TIFF_HEADER_SIZE] = { uint8_t tiff_header[TIFF_HEADER_SIZE] = {
0x49, 0x49, 0x2a, 0x00, // little endian signature 0x49, 0x49, 0x2a, 0x00, // little endian signature
8, 0, 0, 0, // offset to the unique IFD that follows 8, 0, 0, 0, // offset to the unique IFD that follows
@ -426,20 +416,7 @@ int WebPWriteTIFF(FILE* fout, const WebPDecBuffer* const buffer) {
}; };
uint32_t y; uint32_t y;
if (fout == NULL || buffer == NULL) return 0; if (fout == NULL || buffer == NULL || rgba == NULL) return 0;
has_alpha = WebPIsAlphaMode(buffer->colorspace);
width = buffer->width;
height = buffer->height;
rgba = buffer->u.RGBA.rgba;
stride = buffer->u.RGBA.stride;
if (rgba == NULL) return 0;
// Update bytes_per_px, num_ifd_entries and assoc_alpha.
tiff_header[38] = tiff_header[102] = bytes_per_px = has_alpha ? 4 : 3;
tiff_header[8] = has_alpha ? NUM_IFD_ENTRIES : NUM_IFD_ENTRIES - 1;
tiff_header[186] = WebPIsPremultipliedMode(buffer->colorspace) ? 1 : 2;
// Fill placeholders in IFD: // Fill placeholders in IFD:
PutLE32(tiff_header + 10 + 8, width); PutLE32(tiff_header + 10 + 8, width);

View File

@ -89,11 +89,6 @@ int ImgIoUtilReadFile(const char* const file_name,
} }
fseek(in, 0, SEEK_END); fseek(in, 0, SEEK_END);
file_size = ftell(in); file_size = ftell(in);
if (file_size == (size_t)-1) {
fclose(in);
WFPRINTF(stderr, "error getting size of '%s'\n", (const W_CHAR*)file_name);
return 0;
}
fseek(in, 0, SEEK_SET); fseek(in, 0, SEEK_SET);
// we allocate one extra byte for the \0 terminator // we allocate one extra byte for the \0 terminator
file_data = (uint8_t*)WebPMalloc(file_size + 1); file_data = (uint8_t*)WebPMalloc(file_size + 1);

View File

@ -206,18 +206,8 @@ struct my_error_mgr {
static void my_error_exit(j_common_ptr dinfo) { static void my_error_exit(j_common_ptr dinfo) {
struct my_error_mgr* myerr = (struct my_error_mgr*)dinfo->err; struct my_error_mgr* myerr = (struct my_error_mgr*)dinfo->err;
// The following code is disabled in fuzzing mode because:
// - the logs can be flooded due to invalid JPEG files
// - msg_code is wrongfully seen as uninitialized by msan when the libjpeg
// dependency is not built with sanitizers enabled
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
const int msg_code = myerr->pub.msg_code;
fprintf(stderr, "libjpeg error: "); fprintf(stderr, "libjpeg error: ");
dinfo->err->output_message(dinfo); dinfo->err->output_message(dinfo);
if (msg_code == JERR_INPUT_EOF || msg_code == JERR_FILE_READ) {
fprintf(stderr, "`jpegtran -copy all` MAY be able to process this file.\n");
}
#endif
longjmp(myerr->setjmp_buffer, 1); longjmp(myerr->setjmp_buffer, 1);
} }

View File

@ -139,8 +139,6 @@ static const struct {
{ "Raw profile type xmp", ProcessRawProfile, METADATA_OFFSET(xmp) }, { "Raw profile type xmp", ProcessRawProfile, METADATA_OFFSET(xmp) },
// Exiftool puts exif data in APP1 chunk, too. // Exiftool puts exif data in APP1 chunk, too.
{ "Raw profile type APP1", ProcessRawProfile, METADATA_OFFSET(exif) }, { "Raw profile type APP1", ProcessRawProfile, METADATA_OFFSET(exif) },
// ImageMagick uses lowercase app1.
{ "Raw profile type app1", ProcessRawProfile, METADATA_OFFSET(exif) },
// XMP Specification Part 3, Section 3 #PNG // XMP Specification Part 3, Section 3 #PNG
{ "XML:com.adobe.xmp", MetadataCopy, METADATA_OFFSET(xmp) }, { "XML:com.adobe.xmp", MetadataCopy, METADATA_OFFSET(xmp) },
{ NULL, NULL, 0 }, { NULL, NULL, 0 },
@ -161,20 +159,6 @@ static int ExtractMetadataFromPNG(png_structp png,
png_textp text = NULL; png_textp text = NULL;
const png_uint_32 num = png_get_text(png, info, &text, NULL); const png_uint_32 num = png_get_text(png, info, &text, NULL);
png_uint_32 i; png_uint_32 i;
#ifdef PNG_eXIf_SUPPORTED
// Look for an 'eXIf' tag. Preference is given to this tag as it's newer
// than the TextualData tags.
{
png_bytep exif;
png_uint_32 len;
if (png_get_eXIf_1(png, info, &len, &exif) == PNG_INFO_eXIf) {
if (!MetadataCopy((const char*)exif, len, &metadata->exif)) return 0;
}
}
#endif // PNG_eXIf_SUPPORTED
// Look for EXIF / XMP metadata. // Look for EXIF / XMP metadata.
for (i = 0; i < num; ++i, ++text) { for (i = 0; i < num; ++i, ++text) {
int j; int j;
@ -208,7 +192,6 @@ static int ExtractMetadataFromPNG(png_structp png,
} }
} }
} }
#ifdef PNG_iCCP_SUPPORTED
// Look for an ICC profile. // Look for an ICC profile.
{ {
png_charp name; png_charp name;
@ -225,7 +208,6 @@ static int ExtractMetadataFromPNG(png_structp png,
if (!MetadataCopy((const char*)profile, len, &metadata->iccp)) return 0; if (!MetadataCopy((const char*)profile, len, &metadata->iccp)) return 0;
} }
} }
#endif // PNG_iCCP_SUPPORTED
} }
return 1; return 1;
} }

View File

@ -41,7 +41,6 @@ readonly TARGETDIR="${TOPDIR}/WebP.framework"
readonly DECTARGETDIR="${TOPDIR}/WebPDecoder.framework" readonly DECTARGETDIR="${TOPDIR}/WebPDecoder.framework"
readonly MUXTARGETDIR="${TOPDIR}/WebPMux.framework" readonly MUXTARGETDIR="${TOPDIR}/WebPMux.framework"
readonly DEMUXTARGETDIR="${TOPDIR}/WebPDemux.framework" readonly DEMUXTARGETDIR="${TOPDIR}/WebPDemux.framework"
readonly SHARPYUVTARGETDIR="${TOPDIR}/SharpYuv.framework"
readonly DEVELOPER=$(xcode-select --print-path) readonly DEVELOPER=$(xcode-select --print-path)
readonly PLATFORMSROOT="${DEVELOPER}/Platforms" readonly PLATFORMSROOT="${DEVELOPER}/Platforms"
readonly LIPO=$(xcrun -sdk iphoneos${SDK} -find lipo) readonly LIPO=$(xcrun -sdk iphoneos${SDK} -find lipo)
@ -53,7 +52,7 @@ DEMUXLIBLIST=''
if [[ -z "${SDK}" ]]; then if [[ -z "${SDK}" ]]; then
echo "iOS SDK not available" echo "iOS SDK not available"
exit 1 exit 1
elif [[ ${SDK%%.*} -gt 8 && "${XCODE%%.*}" -lt 16 ]]; then elif [[ ${SDK%%.*} -gt 8 ]]; then
EXTRA_CFLAGS="-fembed-bitcode" EXTRA_CFLAGS="-fembed-bitcode"
elif [[ ${SDK%%.*} -le 6 ]]; then elif [[ ${SDK%%.*} -le 6 ]]; then
echo "You need iOS SDK version 6.0 or above" echo "You need iOS SDK version 6.0 or above"
@ -64,8 +63,7 @@ echo "Xcode Version: ${XCODE}"
echo "iOS SDK Version: ${SDK}" echo "iOS SDK Version: ${SDK}"
if [[ -e "${BUILDDIR}" || -e "${TARGETDIR}" || -e "${DECTARGETDIR}" \ if [[ -e "${BUILDDIR}" || -e "${TARGETDIR}" || -e "${DECTARGETDIR}" \
|| -e "${MUXTARGETDIR}" || -e "${DEMUXTARGETDIR}" \ || -e "${MUXTARGETDIR}" || -e "${DEMUXTARGETDIR}" ]]; then
|| -e "${SHARPYUVTARGETDIR}" ]]; then
cat << EOF cat << EOF
WARNING: The following directories will be deleted: WARNING: The following directories will be deleted:
WARNING: ${BUILDDIR} WARNING: ${BUILDDIR}
@ -73,16 +71,14 @@ WARNING: ${TARGETDIR}
WARNING: ${DECTARGETDIR} WARNING: ${DECTARGETDIR}
WARNING: ${MUXTARGETDIR} WARNING: ${MUXTARGETDIR}
WARNING: ${DEMUXTARGETDIR} WARNING: ${DEMUXTARGETDIR}
WARNING: ${SHARPYUVTARGETDIR}
WARNING: The build will continue in 5 seconds... WARNING: The build will continue in 5 seconds...
EOF EOF
sleep 5 sleep 5
fi fi
rm -rf ${BUILDDIR} ${TARGETDIR} ${DECTARGETDIR} \ rm -rf ${BUILDDIR} ${TARGETDIR} ${DECTARGETDIR} \
${MUXTARGETDIR} ${DEMUXTARGETDIR} ${SHARPYUVTARGETDIR} ${MUXTARGETDIR} ${DEMUXTARGETDIR}
mkdir -p ${BUILDDIR} ${TARGETDIR}/Headers/ ${DECTARGETDIR}/Headers/ \ mkdir -p ${BUILDDIR} ${TARGETDIR}/Headers/ ${DECTARGETDIR}/Headers/ \
${MUXTARGETDIR}/Headers/ ${DEMUXTARGETDIR}/Headers/ \ ${MUXTARGETDIR}/Headers/ ${DEMUXTARGETDIR}/Headers/
${SHARPYUVTARGETDIR}/Headers/
if [[ ! -e ${SRCDIR}/configure ]]; then if [[ ! -e ${SRCDIR}/configure ]]; then
if ! (cd ${SRCDIR} && sh autogen.sh); then if ! (cd ${SRCDIR} && sh autogen.sh); then
@ -138,14 +134,13 @@ for PLATFORM in ${PLATFORMS}; do
set +x set +x
# Build only the libraries, skip the examples. # Build only the libraries, skip the examples.
make V=0 -C sharpyuv install make V=0 -C sharpyuv
make V=0 -C src install make V=0 -C src install
LIBLIST+=" ${ROOTDIR}/lib/libwebp.a" LIBLIST+=" ${ROOTDIR}/lib/libwebp.a"
DECLIBLIST+=" ${ROOTDIR}/lib/libwebpdecoder.a" DECLIBLIST+=" ${ROOTDIR}/lib/libwebpdecoder.a"
MUXLIBLIST+=" ${ROOTDIR}/lib/libwebpmux.a" MUXLIBLIST+=" ${ROOTDIR}/lib/libwebpmux.a"
DEMUXLIBLIST+=" ${ROOTDIR}/lib/libwebpdemux.a" DEMUXLIBLIST+=" ${ROOTDIR}/lib/libwebpdemux.a"
SHARPYUVLIBLIST+=" ${ROOTDIR}/lib/libsharpyuv.a"
make clean make clean
@ -170,9 +165,4 @@ cp -a ${SRCDIR}/src/webp/{decode,types,mux_types,demux}.h \
${DEMUXTARGETDIR}/Headers/ ${DEMUXTARGETDIR}/Headers/
${LIPO} -create ${DEMUXLIBLIST} -output ${DEMUXTARGETDIR}/WebPDemux ${LIPO} -create ${DEMUXLIBLIST} -output ${DEMUXTARGETDIR}/WebPDemux
echo "SHARPYUVLIBLIST = ${SHARPYUVLIBLIST}"
cp -a ${SRCDIR}/sharpyuv/{sharpyuv,sharpyuv_csp}.h \
${SHARPYUVTARGETDIR}/Headers/
${LIPO} -create ${SHARPYUVLIBLIST} -output ${SHARPYUVTARGETDIR}/SharpYuv
echo "SUCCESS" echo "SUCCESS"

View File

@ -37,13 +37,13 @@ else
endif endif
# SDL flags: use sdl-config if it exists # SDL flags: use sdl-config if it exists
SDL_CONFIG = $(shell sdl2-config --version 2> /dev/null) SDL_CONFIG = $(shell sdl-config --version 2> /dev/null)
ifneq ($(SDL_CONFIG),) ifneq ($(SDL_CONFIG),)
SDL_LIBS = $(shell sdl2-config --libs) SDL_LIBS = $(shell sdl-config --libs)
SDL_FLAGS = $(shell sdl2-config --cflags) SDL_FLAGS = $(shell sdl-config --cflags)
else else
# use best-guess # use best-guess
SDL_LIBS = -lSDL2 SDL_LIBS = -lSDL
SDL_FLAGS = SDL_FLAGS =
endif endif
@ -276,7 +276,6 @@ UTILS_DEC_OBJS = \
src/utils/color_cache_utils.o \ src/utils/color_cache_utils.o \
src/utils/filters_utils.o \ src/utils/filters_utils.o \
src/utils/huffman_utils.o \ src/utils/huffman_utils.o \
src/utils/palette.o \
src/utils/quant_levels_dec_utils.o \ src/utils/quant_levels_dec_utils.o \
src/utils/random_utils.o \ src/utils/random_utils.o \
src/utils/rescaler_utils.o \ src/utils/rescaler_utils.o \
@ -291,7 +290,6 @@ UTILS_ENC_OBJS = \
EXTRA_OBJS = \ EXTRA_OBJS = \
extras/extras.o \ extras/extras.o \
extras/quality_estimate.o \ extras/quality_estimate.o \
extras/sharpyuv_risk_table.o \
LIBWEBPDECODER_OBJS = $(DEC_OBJS) $(DSP_DEC_OBJS) $(UTILS_DEC_OBJS) LIBWEBPDECODER_OBJS = $(DEC_OBJS) $(DSP_DEC_OBJS) $(UTILS_DEC_OBJS)
LIBWEBP_OBJS = $(LIBWEBPDECODER_OBJS) $(ENC_OBJS) \ LIBWEBP_OBJS = $(LIBWEBPDECODER_OBJS) $(ENC_OBJS) \
@ -345,7 +343,6 @@ HDRS = \
src/utils/filters_utils.h \ src/utils/filters_utils.h \
src/utils/huffman_utils.h \ src/utils/huffman_utils.h \
src/utils/huffman_encode_utils.h \ src/utils/huffman_encode_utils.h \
src/utils/palette.h \
src/utils/quant_levels_utils.h \ src/utils/quant_levels_utils.h \
src/utils/quant_levels_dec_utils.h \ src/utils/quant_levels_dec_utils.h \
src/utils/random_utils.h \ src/utils/random_utils.h \

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH CWEBP 1 "September 17, 2024" .TH CWEBP 1 "March 17, 2022"
.SH NAME .SH NAME
cwebp \- compress an image file to a WebP file cwebp \- compress an image file to a WebP file
.SH SYNOPSIS .SH SYNOPSIS
@ -135,9 +135,7 @@ are used, \fB\-size\fP value will prevail.
Set a maximum number of passes to use during the dichotomy used by Set a maximum number of passes to use during the dichotomy used by
options \fB\-size\fP or \fB\-psnr\fP. Maximum value is 10, default is 1. options \fB\-size\fP or \fB\-psnr\fP. Maximum value is 10, default is 1.
If options \fB\-size\fP or \fB\-psnr\fP were used, but \fB\-pass\fP wasn't If options \fB\-size\fP or \fB\-psnr\fP were used, but \fB\-pass\fP wasn't
specified, a default value of '6' passes will be used. If \fB\-pass\fP is specified, a default value of '6' passes will be used.
specified, but neither \fB-size\fP nor \fB-psnr\fP are, a target PSNR of 40dB
will be used.
.TP .TP
.BI \-qrange " int int .BI \-qrange " int int
Specifies the permissible interval for the quality factor. This is particularly Specifies the permissible interval for the quality factor. This is particularly
@ -180,8 +178,8 @@ Disable strong filtering (if filtering is being used thanks to the
\fB\-f\fP option) and use simple filtering instead. \fB\-f\fP option) and use simple filtering instead.
.TP .TP
.B \-sharp_yuv .B \-sharp_yuv
Use more accurate and sharper RGB->YUV conversion. Note that this process is Use more accurate and sharper RGB->YUV conversion if needed. Note that this
slower than the default 'fast' RGB->YUV conversion. process is slower than the default 'fast' RGB->YUV conversion.
.TP .TP
.BI \-sns " int .BI \-sns " int
Specify the amplitude of the spatial noise shaping. Spatial noise shaping Specify the amplitude of the spatial noise shaping. Spatial noise shaping
@ -204,8 +202,7 @@ In the VP8 format, the so\-called control partition has a limit of 512k and
is used to store the following information: whether the macroblock is skipped, is used to store the following information: whether the macroblock is skipped,
which segment it belongs to, whether it is coded as intra 4x4 or intra 16x16 which segment it belongs to, whether it is coded as intra 4x4 or intra 16x16
mode, and finally the prediction modes to use for each of the sub\-blocks. mode, and finally the prediction modes to use for each of the sub\-blocks.
For a very large image, 512k only leaves room for a few bits per 16x16 For a very large image, 512k only leaves room to few bits per 16x16 macroblock.
macroblock.
The absolute minimum is 4 bits per macroblock. Skip, segment, and mode The absolute minimum is 4 bits per macroblock. Skip, segment, and mode
information can use up almost all these 4 bits (although the case is unlikely), information can use up almost all these 4 bits (although the case is unlikely),
which is problematic for very large images. The partition_limit factor controls which is problematic for very large images. The partition_limit factor controls
@ -214,8 +211,7 @@ useful in case the 512k limit is reached and the following message is displayed:
\fIError code: 6 (PARTITION0_OVERFLOW: Partition #0 is too big to fit 512k)\fP. \fIError code: 6 (PARTITION0_OVERFLOW: Partition #0 is too big to fit 512k)\fP.
If using \fB\-partition_limit\fP is not enough to meet the 512k constraint, one If using \fB\-partition_limit\fP is not enough to meet the 512k constraint, one
should use less segments in order to save more header bits per macroblock. should use less segments in order to save more header bits per macroblock.
See the \fB\-segments\fP option. Note the \fB-m\fP and \fB-q\fP options also See the \fB\-segments\fP option.
influence the encoder's decisions and ability to hit this limit.
.SS LOGGING OPTIONS .SS LOGGING OPTIONS
These options control the level of output: These options control the level of output:
@ -299,12 +295,12 @@ Note: each input format may not support all combinations.
.B \-noasm .B \-noasm
Disable all assembly optimizations. Disable all assembly optimizations.
.SH EXIT STATUS .SH BUGS
If there were no problems during execution, \fBcwebp\fP exits with the value of Please report all bugs to the issue tracker:
the C constant \fBEXIT_SUCCESS\fP. This is usually zero. https://bugs.chromium.org/p/webp
.PP .br
If an error occurs, \fBcwebp\fP exits with the value of the C constant Patches welcome! See this page to get started:
\fBEXIT_FAILURE\fP. This is usually one. https://www.webmproject.org/code/contribute/submitting\-patches/
.SH EXAMPLES .SH EXAMPLES
cwebp \-q 50 -lossless picture.png \-o picture_lossless.webp cwebp \-q 50 -lossless picture.png \-o picture_lossless.webp
@ -324,13 +320,6 @@ https://chromium.googlesource.com/webm/libwebp
This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>, This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>,
for the Debian project (and may be used by others). for the Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR dwebp (1), .BR dwebp (1),
.BR gif2webp (1) .BR gif2webp (1)

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH DWEBP 1 "July 18, 2024" .TH DWEBP 1 "November 17, 2021"
.SH NAME .SH NAME
dwebp \- decompress a WebP file to an image file dwebp \- decompress a WebP file to an image file
.SH SYNOPSIS .SH SYNOPSIS
@ -108,12 +108,12 @@ Print extra information (decoding time in particular).
.B \-noasm .B \-noasm
Disable all assembly optimizations. Disable all assembly optimizations.
.SH EXIT STATUS .SH BUGS
If there were no problems during execution, \fBdwebp\fP exits with the value of Please report all bugs to the issue tracker:
the C constant \fBEXIT_SUCCESS\fP. This is usually zero. https://bugs.chromium.org/p/webp
.PP .br
If an error occurs, \fBdwebp\fP exits with the value of the C constant Patches welcome! See this page to get started:
\fBEXIT_FAILURE\fP. This is usually one. https://www.webmproject.org/code/contribute/submitting\-patches/
.SH EXAMPLES .SH EXAMPLES
dwebp picture.webp \-o output.png dwebp picture.webp \-o output.png
@ -133,13 +133,6 @@ https://chromium.googlesource.com/webm/libwebp
This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>, This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>,
for the Debian project (and may be used by others). for the Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR cwebp (1), .BR cwebp (1),
.BR gif2webp (1), .BR gif2webp (1),

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH GIF2WEBP 1 "November 4, 2024" .TH GIF2WEBP 1 "November 17, 2021"
.SH NAME .SH NAME
gif2webp \- Convert a GIF image to WebP gif2webp \- Convert a GIF image to WebP
.SH SYNOPSIS .SH SYNOPSIS
@ -39,18 +39,6 @@ Encode the image using lossy compression.
Mixed compression mode: optimize compression of the image by picking either Mixed compression mode: optimize compression of the image by picking either
lossy or lossless compression for each frame heuristically. lossy or lossless compression for each frame heuristically.
.TP .TP
.BI \-near_lossless " int
Specify the level of near\-lossless image preprocessing. This option adjusts
pixel values to help compressibility, but has minimal impact on the visual
quality. It triggers lossless compression mode automatically. The range is 0
(maximum preprocessing) to 100 (no preprocessing, the default). The typical
value is around 60. Note that lossy with \fB\-q 100\fP can at times yield
better results.
.TP
.B \-sharp_yuv
Use more accurate and sharper RGB->YUV conversion. Note that this process is
slower than the default 'fast' RGB->YUV conversion.
.TP
.BI \-q " float .BI \-q " float
Specify the compression factor for RGB channels between 0 and 100. The default Specify the compression factor for RGB channels between 0 and 100. The default
is 75. is 75.
@ -138,12 +126,12 @@ Print extra information.
.B \-quiet .B \-quiet
Do not print anything. Do not print anything.
.SH EXIT STATUS .SH BUGS
If there were no problems during execution, \fBgif2webp\fP exits with the value Please report all bugs to the issue tracker:
of the C constant \fBEXIT_SUCCESS\fP. This is usually zero. https://bugs.chromium.org/p/webp
.PP .br
If an error occurs, \fBgif2webp\fP exits with the value of the C constant Patches welcome! See this page to get started:
\fBEXIT_FAILURE\fP. This is usually one. https://www.webmproject.org/code/contribute/submitting\-patches/
.SH EXAMPLES .SH EXAMPLES
gif2webp picture.gif \-o picture.webp gif2webp picture.gif \-o picture.webp
@ -167,13 +155,6 @@ https://chromium.googlesource.com/webm/libwebp
This manual page was written by Urvang Joshi <urvang@google.com>, for the This manual page was written by Urvang Joshi <urvang@google.com>, for the
Debian project (and may be used by others). Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR cwebp (1), .BR cwebp (1),
.BR dwebp (1), .BR dwebp (1),

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH IMG2WEBP 1 "November 26, 2024" .TH IMG2WEBP 1 "March 17, 2023"
.SH NAME .SH NAME
img2webp \- create animated WebP file from a sequence of input images. img2webp \- create animated WebP file from a sequence of input images.
.SH SYNOPSIS .SH SYNOPSIS
@ -53,8 +53,8 @@ value is around 60. Note that lossy with \fB\-q 100\fP can at times yield
better results. better results.
.TP .TP
.B \-sharp_yuv .B \-sharp_yuv
Use more accurate and sharper RGB->YUV conversion. Note that this process is Use more accurate and sharper RGB->YUV conversion if needed. Note that this
slower than the default 'fast' RGB->YUV conversion. process is slower than the default 'fast' RGB->YUV conversion.
.TP .TP
.BI \-loop " int .BI \-loop " int
Specifies the number of times the animation should loop. Using '0' Specifies the number of times the animation should loop. Using '0'
@ -88,27 +88,18 @@ Specify the compression factor between 0 and 100. The default is 75.
Specify the compression method to use. This parameter controls the Specify the compression method to use. This parameter controls the
trade off between encoding speed and the compressed file size and quality. trade off between encoding speed and the compressed file size and quality.
Possible values range from 0 to 6. Default value is 4. Possible values range from 0 to 6. Default value is 4.
When higher values are used, the encoder will spend more time inspecting
additional encoding possibilities and decide on the quality gain.
Lower value can result in faster processing time at the expense of
larger file size and lower compression quality.
.TP
.B \-exact, \-noexact
Preserve or alter RGB values in transparent area. The default is
\fB-noexact\fP, to help compressibility. Note \fB\-noexact\fP may cause
artifacts in frames compressed with \fB\-lossy\fP.
.SH EXIT STATUS
If there were no problems during execution, \fBimg2webp\fP exits with the value
of the C constant \fBEXIT_SUCCESS\fP. This is usually zero.
.PP
If an error occurs, \fBimg2webp\fP exits with the value of the C constant
\fBEXIT_FAILURE\fP. This is usually one.
.SH EXAMPLE .SH EXAMPLE
img2webp -loop 2 in0.png -lossy in1.jpg -d 80 in2.tiff -o out.webp img2webp -loop 2 in0.png -lossy in1.jpg -d 80 in2.tiff -o out.webp
.br .br
.SH BUGS
Please report all bugs to the issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH AUTHORS .SH AUTHORS
\fBimg2webp\fP is a part of libwebp and was written by the WebP team. \fBimg2webp\fP is a part of libwebp and was written by the WebP team.
.br .br
@ -118,13 +109,6 @@ https://chromium.googlesource.com/webm/libwebp
This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>, This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>,
for the Debian project (and may be used by others). for the Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR webpmux (1), .BR webpmux (1),
.BR gif2webp (1) .BR gif2webp (1)

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH VWEBP 1 "July 18, 2024" .TH VWEBP 1 "November 17, 2021"
.SH NAME .SH NAME
vwebp \- decompress a WebP file and display it in a window vwebp \- decompress a WebP file and display it in a window
.SH SYNOPSIS .SH SYNOPSIS
@ -72,12 +72,12 @@ Disable blending and disposal process, for debugging purposes.
.B 'q' / 'Q' / ESC .B 'q' / 'Q' / ESC
Quit. Quit.
.SH EXIT STATUS .SH BUGS
If there were no problems during execution, \fBvwebp\fP exits with the value of Please report all bugs to the issue tracker:
the C constant \fBEXIT_SUCCESS\fP. This is usually zero. https://bugs.chromium.org/p/webp
.PP .br
If an error occurs, \fBvwebp\fP exits with the value of the C constant Patches welcome! See this page to get started:
\fBEXIT_FAILURE\fP. This is usually one. https://www.webmproject.org/code/contribute/submitting\-patches/
.SH EXAMPLES .SH EXAMPLES
vwebp picture.webp vwebp picture.webp
@ -94,13 +94,6 @@ https://chromium.googlesource.com/webm/libwebp
.PP .PP
This manual page was written for the Debian project (and may be used by others). This manual page was written for the Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR dwebp (1) .BR dwebp (1)
.br .br

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH WEBPINFO 1 "July 18, 2024" .TH WEBPINFO 1 "November 17, 2021"
.SH NAME .SH NAME
webpinfo \- print out the chunk level structure of WebP files webpinfo \- print out the chunk level structure of WebP files
along with basic integrity checks. along with basic integrity checks.
@ -47,12 +47,12 @@ Detailed usage instructions.
Input files in WebP format. Input files must come last, following Input files in WebP format. Input files must come last, following
options (if any). There can be multiple input files. options (if any). There can be multiple input files.
.SH EXIT STATUS .SH BUGS
If there were no problems during execution, \fBwebpinfo\fP exits with the value Please report all bugs to the issue tracker:
of the C constant \fBEXIT_SUCCESS\fP. This is usually zero. https://bugs.chromium.org/p/webp
.PP .br
If an error occurs, \fBwebpinfo\fP exits with the value of the C constant Patches welcome! See this page to get started:
\fBEXIT_FAILURE\fP. This is usually one. https://www.webmproject.org/code/contribute/submitting\-patches/
.SH EXAMPLES .SH EXAMPLES
.br .br
@ -73,13 +73,6 @@ https://chromium.googlesource.com/webm/libwebp
This manual page was written by Hui Su <huisu@google.com>, This manual page was written by Hui Su <huisu@google.com>,
for the Debian project (and may be used by others). for the Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR webpmux (1) .BR webpmux (1)
.br .br

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*- .\" Hey, EMACS: -*- nroff -*-
.TH WEBPMUX 1 "July 18, 2024" .TH WEBPMUX 1 "November 17, 2021"
.SH NAME .SH NAME
webpmux \- create animated WebP files from non\-animated WebP images, extract webpmux \- create animated WebP files from non\-animated WebP images, extract
frames from animated WebP images, and manage XMP/EXIF metadata and ICC profile. frames from animated WebP images, and manage XMP/EXIF metadata and ICC profile.
@ -186,12 +186,12 @@ Output file in WebP format.
.TP .TP
The nature of EXIF, XMP and ICC data is not checked and is assumed to be valid. The nature of EXIF, XMP and ICC data is not checked and is assumed to be valid.
.SH EXIT STATUS .SH BUGS
If there were no problems during execution, \fBwebpmux\fP exits with the value Please report all bugs to the issue tracker:
of the C constant \fBEXIT_SUCCESS\fP. This is usually zero. https://bugs.chromium.org/p/webp
.PP .br
If an error occurs, \fBwebpmux\fP exits with the value of the C constant Patches welcome! See this page to get started:
\fBEXIT_FAILURE\fP. This is usually one. https://www.webmproject.org/code/contribute/submitting\-patches/
.SH EXAMPLES .SH EXAMPLES
.P .P
@ -262,13 +262,6 @@ https://chromium.googlesource.com/webm/libwebp
This manual page was written by Vikas Arora <vikaas.arora@gmail.com>, This manual page was written by Vikas Arora <vikaas.arora@gmail.com>,
for the Debian project (and may be used by others). for the Debian project (and may be used by others).
.SH REPORTING BUGS
Please report all bugs to the issue tracker:
https://issues.webmproject.org
.br
Patches welcome! See this page to get started:
https://www.webmproject.org/code/contribute/submitting\-patches/
.SH SEE ALSO .SH SEE ALSO
.BR cwebp (1), .BR cwebp (1),
.BR dwebp (1), .BR dwebp (1),

View File

@ -33,7 +33,7 @@ libsharpyuv_la_SOURCES += sharpyuv_gamma.c sharpyuv_gamma.h
libsharpyuv_la_SOURCES += sharpyuv.c sharpyuv.h libsharpyuv_la_SOURCES += sharpyuv.c sharpyuv.h
libsharpyuv_la_CPPFLAGS = $(AM_CPPFLAGS) libsharpyuv_la_CPPFLAGS = $(AM_CPPFLAGS)
libsharpyuv_la_LDFLAGS = -no-undefined -version-info 1:1:1 -lm libsharpyuv_la_LDFLAGS = -no-undefined -version-info 0:1:0 -lm
libsharpyuv_la_LIBADD = libsharpyuv_la_LIBADD =
libsharpyuv_la_LIBADD += libsharpyuv_sse2.la libsharpyuv_la_LIBADD += libsharpyuv_sse2.la
libsharpyuv_la_LIBADD += libsharpyuv_neon.la libsharpyuv_la_LIBADD += libsharpyuv_neon.la

View File

@ -6,8 +6,8 @@
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
VS_VERSION_INFO VERSIONINFO VS_VERSION_INFO VERSIONINFO
FILEVERSION 0,0,4,1 FILEVERSION 0,0,2,1
PRODUCTVERSION 0,0,4,1 PRODUCTVERSION 0,0,2,1
FILEFLAGSMASK 0x3fL FILEFLAGSMASK 0x3fL
#ifdef _DEBUG #ifdef _DEBUG
FILEFLAGS 0x1L FILEFLAGS 0x1L
@ -24,12 +24,12 @@ BEGIN
BEGIN BEGIN
VALUE "CompanyName", "Google, Inc." VALUE "CompanyName", "Google, Inc."
VALUE "FileDescription", "libsharpyuv DLL" VALUE "FileDescription", "libsharpyuv DLL"
VALUE "FileVersion", "0.4.1" VALUE "FileVersion", "0.2.1"
VALUE "InternalName", "libsharpyuv.dll" VALUE "InternalName", "libsharpyuv.dll"
VALUE "LegalCopyright", "Copyright (C) 2024" VALUE "LegalCopyright", "Copyright (C) 2023"
VALUE "OriginalFilename", "libsharpyuv.dll" VALUE "OriginalFilename", "libsharpyuv.dll"
VALUE "ProductName", "SharpYuv Library" VALUE "ProductName", "SharpYuv Library"
VALUE "ProductVersion", "0.4.1" VALUE "ProductVersion", "0.2.1"
END END
END END
BLOCK "VarFileInfo" BLOCK "VarFileInfo"

View File

@ -75,48 +75,41 @@ static int RGBToGray(int64_t r, int64_t g, int64_t b) {
} }
static uint32_t ScaleDown(uint16_t a, uint16_t b, uint16_t c, uint16_t d, static uint32_t ScaleDown(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
int rgb_bit_depth, int rgb_bit_depth) {
SharpYuvTransferFunctionType transfer_type) {
const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth); const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth);
const uint32_t A = SharpYuvGammaToLinear(a, bit_depth, transfer_type); const uint32_t A = SharpYuvGammaToLinear(a, bit_depth);
const uint32_t B = SharpYuvGammaToLinear(b, bit_depth, transfer_type); const uint32_t B = SharpYuvGammaToLinear(b, bit_depth);
const uint32_t C = SharpYuvGammaToLinear(c, bit_depth, transfer_type); const uint32_t C = SharpYuvGammaToLinear(c, bit_depth);
const uint32_t D = SharpYuvGammaToLinear(d, bit_depth, transfer_type); const uint32_t D = SharpYuvGammaToLinear(d, bit_depth);
return SharpYuvLinearToGamma((A + B + C + D + 2) >> 2, bit_depth, return SharpYuvLinearToGamma((A + B + C + D + 2) >> 2, bit_depth);
transfer_type);
} }
static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int w, static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int w,
int rgb_bit_depth, int rgb_bit_depth) {
SharpYuvTransferFunctionType transfer_type) {
const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth); const int bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth);
int i = 0; int i;
do { for (i = 0; i < w; ++i) {
const uint32_t R = const uint32_t R = SharpYuvGammaToLinear(src[0 * w + i], bit_depth);
SharpYuvGammaToLinear(src[0 * w + i], bit_depth, transfer_type); const uint32_t G = SharpYuvGammaToLinear(src[1 * w + i], bit_depth);
const uint32_t G = const uint32_t B = SharpYuvGammaToLinear(src[2 * w + i], bit_depth);
SharpYuvGammaToLinear(src[1 * w + i], bit_depth, transfer_type);
const uint32_t B =
SharpYuvGammaToLinear(src[2 * w + i], bit_depth, transfer_type);
const uint32_t Y = RGBToGray(R, G, B); const uint32_t Y = RGBToGray(R, G, B);
dst[i] = (fixed_y_t)SharpYuvLinearToGamma(Y, bit_depth, transfer_type); dst[i] = (fixed_y_t)SharpYuvLinearToGamma(Y, bit_depth);
} while (++i < w); }
} }
static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2, static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2,
fixed_t* dst, int uv_w, int rgb_bit_depth, fixed_t* dst, int uv_w, int rgb_bit_depth) {
SharpYuvTransferFunctionType transfer_type) { int i;
int i = 0; for (i = 0; i < uv_w; ++i) {
do {
const int r = const int r =
ScaleDown(src1[0 * uv_w + 0], src1[0 * uv_w + 1], src2[0 * uv_w + 0], ScaleDown(src1[0 * uv_w + 0], src1[0 * uv_w + 1], src2[0 * uv_w + 0],
src2[0 * uv_w + 1], rgb_bit_depth, transfer_type); src2[0 * uv_w + 1], rgb_bit_depth);
const int g = const int g =
ScaleDown(src1[2 * uv_w + 0], src1[2 * uv_w + 1], src2[2 * uv_w + 0], ScaleDown(src1[2 * uv_w + 0], src1[2 * uv_w + 1], src2[2 * uv_w + 0],
src2[2 * uv_w + 1], rgb_bit_depth, transfer_type); src2[2 * uv_w + 1], rgb_bit_depth);
const int b = const int b =
ScaleDown(src1[4 * uv_w + 0], src1[4 * uv_w + 1], src2[4 * uv_w + 0], ScaleDown(src1[4 * uv_w + 0], src1[4 * uv_w + 1], src2[4 * uv_w + 0],
src2[4 * uv_w + 1], rgb_bit_depth, transfer_type); src2[4 * uv_w + 1], rgb_bit_depth);
const int W = RGBToGray(r, g, b); const int W = RGBToGray(r, g, b);
dst[0 * uv_w] = (fixed_t)(r - W); dst[0 * uv_w] = (fixed_t)(r - W);
dst[1 * uv_w] = (fixed_t)(g - W); dst[1 * uv_w] = (fixed_t)(g - W);
@ -124,15 +117,15 @@ static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2,
dst += 1; dst += 1;
src1 += 2; src1 += 2;
src2 += 2; src2 += 2;
} while (++i < uv_w); }
} }
static void StoreGray(const fixed_y_t* rgb, fixed_y_t* y, int w) { static void StoreGray(const fixed_y_t* rgb, fixed_y_t* y, int w) {
int i = 0; int i;
assert(w > 0); assert(w > 0);
do { for (i = 0; i < w; ++i) {
y[i] = RGBToGray(rgb[0 * w + i], rgb[1 * w + i], rgb[2 * w + i]); y[i] = RGBToGray(rgb[0 * w + i], rgb[1 * w + i], rgb[2 * w + i]);
} while (++i < w); }
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -158,9 +151,9 @@ static void ImportOneRow(const uint8_t* const r_ptr,
// Convert the rgb_step from a number of bytes to a number of uint8_t or // Convert the rgb_step from a number of bytes to a number of uint8_t or
// uint16_t values depending the bit depth. // uint16_t values depending the bit depth.
const int step = (rgb_bit_depth > 8) ? rgb_step / 2 : rgb_step; const int step = (rgb_bit_depth > 8) ? rgb_step / 2 : rgb_step;
int i = 0; int i;
const int w = (pic_width + 1) & ~1; const int w = (pic_width + 1) & ~1;
do { for (i = 0; i < pic_width; ++i) {
const int off = i * step; const int off = i * step;
const int shift = GetPrecisionShift(rgb_bit_depth); const int shift = GetPrecisionShift(rgb_bit_depth);
if (rgb_bit_depth == 8) { if (rgb_bit_depth == 8) {
@ -172,7 +165,7 @@ static void ImportOneRow(const uint8_t* const r_ptr,
dst[i + 1 * w] = Shift(((uint16_t*)g_ptr)[off], shift); dst[i + 1 * w] = Shift(((uint16_t*)g_ptr)[off], shift);
dst[i + 2 * w] = Shift(((uint16_t*)b_ptr)[off], shift); dst[i + 2 * w] = Shift(((uint16_t*)b_ptr)[off], shift);
} }
} while (++i < pic_width); }
if (pic_width & 1) { // replicate rightmost pixel if (pic_width & 1) { // replicate rightmost pixel
dst[pic_width + 0 * w] = dst[pic_width + 0 * w - 1]; dst[pic_width + 0 * w] = dst[pic_width + 0 * w - 1];
dst[pic_width + 1 * w] = dst[pic_width + 1 * w - 1]; dst[pic_width + 1 * w] = dst[pic_width + 1 * w - 1];
@ -240,11 +233,8 @@ static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
const int sfix = GetPrecisionShift(rgb_bit_depth); const int sfix = GetPrecisionShift(rgb_bit_depth);
const int yuv_max = (1 << yuv_bit_depth) - 1; const int yuv_max = (1 << yuv_bit_depth) - 1;
best_uv = best_uv_base; for (best_uv = best_uv_base, j = 0; j < height; ++j) {
j = 0; for (i = 0; i < width; ++i) {
do {
i = 0;
do {
const int off = (i >> 1); const int off = (i >> 1);
const int W = best_y[i]; const int W = best_y[i];
const int r = best_uv[off + 0 * uv_w] + W; const int r = best_uv[off + 0 * uv_w] + W;
@ -256,22 +246,19 @@ static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
} else { } else {
((uint16_t*)y_ptr)[i] = clip(y, yuv_max); ((uint16_t*)y_ptr)[i] = clip(y, yuv_max);
} }
} while (++i < width); }
best_y += w; best_y += w;
best_uv += (j & 1) * 3 * uv_w; best_uv += (j & 1) * 3 * uv_w;
y_ptr += y_stride; y_ptr += y_stride;
} while (++j < height); }
for (best_uv = best_uv_base, j = 0; j < uv_h; ++j) {
best_uv = best_uv_base; for (i = 0; i < uv_w; ++i) {
j = 0; const int off = i;
do {
i = 0;
do {
// Note r, g and b values here are off by W, but a constant offset on all // Note r, g and b values here are off by W, but a constant offset on all
// 3 components doesn't change the value of u and v with a YCbCr matrix. // 3 components doesn't change the value of u and v with a YCbCr matrix.
const int r = best_uv[i + 0 * uv_w]; const int r = best_uv[off + 0 * uv_w];
const int g = best_uv[i + 1 * uv_w]; const int g = best_uv[off + 1 * uv_w];
const int b = best_uv[i + 2 * uv_w]; const int b = best_uv[off + 2 * uv_w];
const int u = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_u, sfix); const int u = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_u, sfix);
const int v = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_v, sfix); const int v = RGBToYUVComponent(r, g, b, yuv_matrix->rgb_to_v, sfix);
if (yuv_bit_depth <= 8) { if (yuv_bit_depth <= 8) {
@ -281,11 +268,11 @@ static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
((uint16_t*)u_ptr)[i] = clip(u, yuv_max); ((uint16_t*)u_ptr)[i] = clip(u, yuv_max);
((uint16_t*)v_ptr)[i] = clip(v, yuv_max); ((uint16_t*)v_ptr)[i] = clip(v, yuv_max);
} }
} while (++i < uv_w); }
best_uv += 3 * uv_w; best_uv += 3 * uv_w;
u_ptr += u_stride; u_ptr += u_stride;
v_ptr += v_stride; v_ptr += v_stride;
} while (++j < uv_h); }
return 1; return 1;
} }
@ -298,7 +285,7 @@ static void* SafeMalloc(uint64_t nmemb, size_t size) {
return malloc((size_t)total_size); return malloc((size_t)total_size);
} }
#define SAFE_ALLOC(W, H, T) ((T*)SafeMalloc((uint64_t)(W) * (H), sizeof(T))) #define SAFE_ALLOC(W, H, T) ((T*)SafeMalloc((W) * (H), sizeof(T)))
static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr, static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
const uint8_t* b_ptr, int rgb_step, int rgb_stride, const uint8_t* b_ptr, int rgb_step, int rgb_stride,
@ -306,14 +293,12 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
uint8_t* u_ptr, int u_stride, uint8_t* v_ptr, uint8_t* u_ptr, int u_stride, uint8_t* v_ptr,
int v_stride, int yuv_bit_depth, int width, int v_stride, int yuv_bit_depth, int width,
int height, int height,
const SharpYuvConversionMatrix* yuv_matrix, const SharpYuvConversionMatrix* yuv_matrix) {
SharpYuvTransferFunctionType transfer_type) {
// we expand the right/bottom border if needed // we expand the right/bottom border if needed
const int w = (width + 1) & ~1; const int w = (width + 1) & ~1;
const int h = (height + 1) & ~1; const int h = (height + 1) & ~1;
const int uv_w = w >> 1; const int uv_w = w >> 1;
const int uv_h = h >> 1; const int uv_h = h >> 1;
const int y_bit_depth = rgb_bit_depth + GetPrecisionShift(rgb_bit_depth);
uint64_t prev_diff_y_sum = ~0; uint64_t prev_diff_y_sum = ~0;
int j, iter; int j, iter;
@ -361,9 +346,9 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
StoreGray(src1, best_y + 0, w); StoreGray(src1, best_y + 0, w);
StoreGray(src2, best_y + w, w); StoreGray(src2, best_y + w, w);
UpdateW(src1, target_y, w, rgb_bit_depth, transfer_type); UpdateW(src1, target_y, w, rgb_bit_depth);
UpdateW(src2, target_y + w, w, rgb_bit_depth, transfer_type); UpdateW(src2, target_y + w, w, rgb_bit_depth);
UpdateChroma(src1, src2, target_uv, uv_w, rgb_bit_depth, transfer_type); UpdateChroma(src1, src2, target_uv, uv_w, rgb_bit_depth);
memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv)); memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv));
best_y += 2 * w; best_y += 2 * w;
best_uv += 3 * uv_w; best_uv += 3 * uv_w;
@ -384,8 +369,7 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
best_uv = best_uv_base; best_uv = best_uv_base;
target_y = target_y_base; target_y = target_y_base;
target_uv = target_uv_base; target_uv = target_uv_base;
j = 0; for (j = 0; j < h; j += 2) {
do {
fixed_y_t* const src1 = tmp_buffer + 0 * w; fixed_y_t* const src1 = tmp_buffer + 0 * w;
fixed_y_t* const src2 = tmp_buffer + 3 * w; fixed_y_t* const src2 = tmp_buffer + 3 * w;
{ {
@ -396,21 +380,21 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
cur_uv = next_uv; cur_uv = next_uv;
} }
UpdateW(src1, best_rgb_y + 0 * w, w, rgb_bit_depth, transfer_type); UpdateW(src1, best_rgb_y + 0 * w, w, rgb_bit_depth);
UpdateW(src2, best_rgb_y + 1 * w, w, rgb_bit_depth, transfer_type); UpdateW(src2, best_rgb_y + 1 * w, w, rgb_bit_depth);
UpdateChroma(src1, src2, best_rgb_uv, uv_w, rgb_bit_depth, transfer_type); UpdateChroma(src1, src2, best_rgb_uv, uv_w, rgb_bit_depth);
// update two rows of Y and one row of RGB // update two rows of Y and one row of RGB
diff_y_sum += diff_y_sum +=
SharpYuvUpdateY(target_y, best_rgb_y, best_y, 2 * w, y_bit_depth); SharpYuvUpdateY(target_y, best_rgb_y, best_y, 2 * w,
rgb_bit_depth + GetPrecisionShift(rgb_bit_depth));
SharpYuvUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w); SharpYuvUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w);
best_y += 2 * w; best_y += 2 * w;
best_uv += 3 * uv_w; best_uv += 3 * uv_w;
target_y += 2 * w; target_y += 2 * w;
target_uv += 3 * uv_w; target_uv += 3 * uv_w;
j += 2; }
} while (j < h);
// test exit condition // test exit condition
if (iter > 0) { if (iter > 0) {
if (diff_y_sum < diff_y_threshold) break; if (diff_y_sum < diff_y_threshold) break;
@ -434,7 +418,6 @@ static int DoSharpArgbToYuv(const uint8_t* r_ptr, const uint8_t* g_ptr,
free(tmp_buffer); free(tmp_buffer);
return ok; return ok;
} }
#undef SAFE_ALLOC #undef SAFE_ALLOC
#if defined(WEBP_USE_THREAD) && !defined(_WIN32) #if defined(WEBP_USE_THREAD) && !defined(_WIN32)
@ -479,42 +462,12 @@ void SharpYuvInit(VP8CPUInfo cpu_info_func) {
UNLOCK_ACCESS_AND_RETURN; UNLOCK_ACCESS_AND_RETURN;
} }
int SharpYuvConvert(const void* r_ptr, const void* g_ptr, const void* b_ptr, int SharpYuvConvert(const void* r_ptr, const void* g_ptr,
int rgb_step, int rgb_stride, int rgb_bit_depth, const void* b_ptr, int rgb_step, int rgb_stride,
void* y_ptr, int y_stride, void* u_ptr, int u_stride, int rgb_bit_depth, void* y_ptr, int y_stride,
void* v_ptr, int v_stride, int yuv_bit_depth, int width, void* u_ptr, int u_stride, void* v_ptr,
int v_stride, int yuv_bit_depth, int width,
int height, const SharpYuvConversionMatrix* yuv_matrix) { int height, const SharpYuvConversionMatrix* yuv_matrix) {
SharpYuvOptions options;
options.yuv_matrix = yuv_matrix;
options.transfer_type = kSharpYuvTransferFunctionSrgb;
return SharpYuvConvertWithOptions(
r_ptr, g_ptr, b_ptr, rgb_step, rgb_stride, rgb_bit_depth, y_ptr, y_stride,
u_ptr, u_stride, v_ptr, v_stride, yuv_bit_depth, width, height, &options);
}
int SharpYuvOptionsInitInternal(const SharpYuvConversionMatrix* yuv_matrix,
SharpYuvOptions* options, int version) {
const int major = (version >> 24);
const int minor = (version >> 16) & 0xff;
if (options == NULL || yuv_matrix == NULL ||
(major == SHARPYUV_VERSION_MAJOR && major == 0 &&
minor != SHARPYUV_VERSION_MINOR) ||
(major != SHARPYUV_VERSION_MAJOR)) {
return 0;
}
options->yuv_matrix = yuv_matrix;
options->transfer_type = kSharpYuvTransferFunctionSrgb;
return 1;
}
int SharpYuvConvertWithOptions(const void* r_ptr, const void* g_ptr,
const void* b_ptr, int rgb_step, int rgb_stride,
int rgb_bit_depth, void* y_ptr, int y_stride,
void* u_ptr, int u_stride, void* v_ptr,
int v_stride, int yuv_bit_depth, int width,
int height, const SharpYuvOptions* options) {
const SharpYuvConversionMatrix* yuv_matrix = options->yuv_matrix;
SharpYuvTransferFunctionType transfer_type = options->transfer_type;
SharpYuvConversionMatrix scaled_matrix; SharpYuvConversionMatrix scaled_matrix;
const int rgb_max = (1 << rgb_bit_depth) - 1; const int rgb_max = (1 << rgb_bit_depth) - 1;
const int rgb_round = 1 << (rgb_bit_depth - 1); const int rgb_round = 1 << (rgb_bit_depth - 1);
@ -533,7 +486,7 @@ int SharpYuvConvertWithOptions(const void* r_ptr, const void* g_ptr,
if (yuv_bit_depth != 8 && yuv_bit_depth != 10 && yuv_bit_depth != 12) { if (yuv_bit_depth != 8 && yuv_bit_depth != 10 && yuv_bit_depth != 12) {
return 0; return 0;
} }
if (rgb_bit_depth > 8 && (rgb_step % 2 != 0 || rgb_stride % 2 != 0)) { if (rgb_bit_depth > 8 && (rgb_step % 2 != 0 || rgb_stride %2 != 0)) {
// Step/stride should be even for uint16_t buffers. // Step/stride should be even for uint16_t buffers.
return 0; return 0;
} }
@ -565,11 +518,10 @@ int SharpYuvConvertWithOptions(const void* r_ptr, const void* g_ptr,
scaled_matrix.rgb_to_u[3] = Shift(yuv_matrix->rgb_to_u[3], sfix); scaled_matrix.rgb_to_u[3] = Shift(yuv_matrix->rgb_to_u[3], sfix);
scaled_matrix.rgb_to_v[3] = Shift(yuv_matrix->rgb_to_v[3], sfix); scaled_matrix.rgb_to_v[3] = Shift(yuv_matrix->rgb_to_v[3], sfix);
return DoSharpArgbToYuv( return DoSharpArgbToYuv(r_ptr, g_ptr, b_ptr, rgb_step, rgb_stride,
(const uint8_t*)r_ptr, (const uint8_t*)g_ptr, (const uint8_t*)b_ptr, rgb_bit_depth, y_ptr, y_stride, u_ptr, u_stride,
rgb_step, rgb_stride, rgb_bit_depth, (uint8_t*)y_ptr, y_stride, v_ptr, v_stride, yuv_bit_depth, width, height,
(uint8_t*)u_ptr, u_stride, (uint8_t*)v_ptr, v_stride, yuv_bit_depth, &scaled_matrix);
width, height, &scaled_matrix, transfer_type);
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -22,36 +22,21 @@ extern "C" {
#else #else
// This explicitly marks library functions and allows for changing the // This explicitly marks library functions and allows for changing the
// signature for e.g., Windows DLL builds. // signature for e.g., Windows DLL builds.
#if defined(_WIN32) && defined(WEBP_DLL) #if defined(__GNUC__) && __GNUC__ >= 4
#define SHARPYUV_EXTERN __declspec(dllexport)
#elif defined(__GNUC__) && __GNUC__ >= 4
#define SHARPYUV_EXTERN extern __attribute__((visibility("default"))) #define SHARPYUV_EXTERN extern __attribute__((visibility("default")))
#else #else
#if defined(_MSC_VER) && defined(WEBP_DLL)
#define SHARPYUV_EXTERN __declspec(dllexport)
#else
#define SHARPYUV_EXTERN extern #define SHARPYUV_EXTERN extern
#endif /* defined(_WIN32) && defined(WEBP_DLL) */ #endif /* _MSC_VER && WEBP_DLL */
#endif /* __GNUC__ >= 4 */
#endif /* WEBP_EXTERN */ #endif /* WEBP_EXTERN */
#endif /* SHARPYUV_EXTERN */ #endif /* SHARPYUV_EXTERN */
#ifndef SHARPYUV_INLINE
#ifdef WEBP_INLINE
#define SHARPYUV_INLINE WEBP_INLINE
#else
#ifndef _MSC_VER
#if defined(__cplusplus) || !defined(__STRICT_ANSI__) || \
(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
#define SHARPYUV_INLINE inline
#else
#define SHARPYUV_INLINE
#endif
#else
#define SHARPYUV_INLINE __forceinline
#endif /* _MSC_VER */
#endif /* WEBP_INLINE */
#endif /* SHARPYUV_INLINE */
// SharpYUV API version following the convention from semver.org // SharpYUV API version following the convention from semver.org
#define SHARPYUV_VERSION_MAJOR 0 #define SHARPYUV_VERSION_MAJOR 0
#define SHARPYUV_VERSION_MINOR 4 #define SHARPYUV_VERSION_MINOR 2
#define SHARPYUV_VERSION_PATCH 1 #define SHARPYUV_VERSION_PATCH 1
// Version as a uint32_t. The major number is the high 8 bits. // Version as a uint32_t. The major number is the high 8 bits.
// The minor number is the middle 8 bits. The patch number is the low 16 bits. // The minor number is the middle 8 bits. The patch number is the low 16 bits.
@ -66,50 +51,16 @@ extern "C" {
SHARPYUV_EXTERN int SharpYuvGetVersion(void); SHARPYUV_EXTERN int SharpYuvGetVersion(void);
// RGB to YUV conversion matrix, in 16 bit fixed point. // RGB to YUV conversion matrix, in 16 bit fixed point.
// y_ = rgb_to_y[0] * r + rgb_to_y[1] * g + rgb_to_y[2] * b + rgb_to_y[3] // y = rgb_to_y[0] * r + rgb_to_y[1] * g + rgb_to_y[2] * b + rgb_to_y[3]
// u_ = rgb_to_u[0] * r + rgb_to_u[1] * g + rgb_to_u[2] * b + rgb_to_u[3] // u = rgb_to_u[0] * r + rgb_to_u[1] * g + rgb_to_u[2] * b + rgb_to_u[3]
// v_ = rgb_to_v[0] * r + rgb_to_v[1] * g + rgb_to_v[2] * b + rgb_to_v[3] // v = rgb_to_v[0] * r + rgb_to_v[1] * g + rgb_to_v[2] * b + rgb_to_v[3]
// Then the values are divided by 1<<16 and rounded. // Then y, u and v values are divided by 1<<16 and rounded.
// y = (y_ + (1 << 15)) >> 16
// u = (u_ + (1 << 15)) >> 16
// v = (v_ + (1 << 15)) >> 16
//
// Typically, the offset values rgb_to_y[3], rgb_to_u[3] and rgb_to_v[3] depend
// on the input's bit depth, e.g., rgb_to_u[3] = 1 << (rgb_bit_depth - 1 + 16).
// See also sharpyuv_csp.h to get a predefined matrix or generate a matrix.
typedef struct { typedef struct {
int rgb_to_y[4]; int rgb_to_y[4];
int rgb_to_u[4]; int rgb_to_u[4];
int rgb_to_v[4]; int rgb_to_v[4];
} SharpYuvConversionMatrix; } SharpYuvConversionMatrix;
typedef struct SharpYuvOptions SharpYuvOptions;
// Enums for transfer functions, as defined in H.273,
// https://www.itu.int/rec/T-REC-H.273-202107-I/en
typedef enum SharpYuvTransferFunctionType {
// 0 is reserved
kSharpYuvTransferFunctionBt709 = 1,
// 2 is unspecified
// 3 is reserved
kSharpYuvTransferFunctionBt470M = 4,
kSharpYuvTransferFunctionBt470Bg = 5,
kSharpYuvTransferFunctionBt601 = 6,
kSharpYuvTransferFunctionSmpte240 = 7,
kSharpYuvTransferFunctionLinear = 8,
kSharpYuvTransferFunctionLog100 = 9,
kSharpYuvTransferFunctionLog100_Sqrt10 = 10,
kSharpYuvTransferFunctionIec61966 = 11,
kSharpYuvTransferFunctionBt1361 = 12,
kSharpYuvTransferFunctionSrgb = 13,
kSharpYuvTransferFunctionBt2020_10Bit = 14,
kSharpYuvTransferFunctionBt2020_12Bit = 15,
kSharpYuvTransferFunctionSmpte2084 = 16, // PQ
kSharpYuvTransferFunctionSmpte428 = 17,
kSharpYuvTransferFunctionHlg = 18,
kSharpYuvTransferFunctionNum
} SharpYuvTransferFunctionType;
// Converts RGB to YUV420 using a downsampling algorithm that minimizes // Converts RGB to YUV420 using a downsampling algorithm that minimizes
// artefacts caused by chroma subsampling. // artefacts caused by chroma subsampling.
// This is slower than standard downsampling (averaging of 4 UV values). // This is slower than standard downsampling (averaging of 4 UV values).
@ -134,10 +85,6 @@ typedef enum SharpYuvTransferFunctionType {
// adjacent pixels on the y, u and v channels. If yuv_bit_depth > 8, they // adjacent pixels on the y, u and v channels. If yuv_bit_depth > 8, they
// should be multiples of 2. // should be multiples of 2.
// width, height: width and height of the image in pixels // width, height: width and height of the image in pixels
// yuv_matrix: RGB to YUV conversion matrix. The matrix values typically
// depend on the input's rgb_bit_depth.
// This function calls SharpYuvConvertWithOptions with a default transfer
// function of kSharpYuvTransferFunctionSrgb.
SHARPYUV_EXTERN int SharpYuvConvert(const void* r_ptr, const void* g_ptr, SHARPYUV_EXTERN int SharpYuvConvert(const void* r_ptr, const void* g_ptr,
const void* b_ptr, int rgb_step, const void* b_ptr, int rgb_step,
int rgb_stride, int rgb_bit_depth, int rgb_stride, int rgb_bit_depth,
@ -146,31 +93,6 @@ SHARPYUV_EXTERN int SharpYuvConvert(const void* r_ptr, const void* g_ptr,
int yuv_bit_depth, int width, int height, int yuv_bit_depth, int width, int height,
const SharpYuvConversionMatrix* yuv_matrix); const SharpYuvConversionMatrix* yuv_matrix);
struct SharpYuvOptions {
// This matrix cannot be NULL and can be initialized by
// SharpYuvComputeConversionMatrix.
const SharpYuvConversionMatrix* yuv_matrix;
SharpYuvTransferFunctionType transfer_type;
};
// Internal, version-checked, entry point
SHARPYUV_EXTERN int SharpYuvOptionsInitInternal(const SharpYuvConversionMatrix*,
SharpYuvOptions*, int);
// Should always be called, to initialize a fresh SharpYuvOptions
// structure before modification. SharpYuvOptionsInit() must have succeeded
// before using the 'options' object.
static SHARPYUV_INLINE int SharpYuvOptionsInit(
const SharpYuvConversionMatrix* yuv_matrix, SharpYuvOptions* options) {
return SharpYuvOptionsInitInternal(yuv_matrix, options, SHARPYUV_VERSION);
}
SHARPYUV_EXTERN int SharpYuvConvertWithOptions(
const void* r_ptr, const void* g_ptr, const void* b_ptr, int rgb_step,
int rgb_stride, int rgb_bit_depth, void* y_ptr, int y_stride, void* u_ptr,
int u_stride, void* v_ptr, int v_stride, int yuv_bit_depth, int width,
int height, const SharpYuvOptions* options);
// TODO(b/194336375): Add YUV444 to YUV420 conversion. Maybe also add 422 // TODO(b/194336375): Add YUV444 to YUV420 conversion. Maybe also add 422
// support (it's rarely used in practice, especially for images). // support (it's rarely used in practice, especially for images).

View File

@ -22,16 +22,16 @@ void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
const float kr = yuv_color_space->kr; const float kr = yuv_color_space->kr;
const float kb = yuv_color_space->kb; const float kb = yuv_color_space->kb;
const float kg = 1.0f - kr - kb; const float kg = 1.0f - kr - kb;
const float cb = 0.5f / (1.0f - kb); const float cr = 0.5f / (1.0f - kb);
const float cr = 0.5f / (1.0f - kr); const float cb = 0.5f / (1.0f - kr);
const int shift = yuv_color_space->bit_depth - 8; const int shift = yuv_color_space->bit_depth - 8;
const float denom = (float)((1 << yuv_color_space->bit_depth) - 1); const float denom = (float)((1 << yuv_color_space->bit_depth) - 1);
float scale_y = 1.0f; float scale_y = 1.0f;
float add_y = 0.0f; float add_y = 0.0f;
float scale_u = cb; float scale_u = cr;
float scale_v = cr; float scale_v = cb;
float add_uv = (float)(128 << shift); float add_uv = (float)(128 << shift);
assert(yuv_color_space->bit_depth >= 8); assert(yuv_color_space->bit_depth >= 8);
@ -59,35 +59,31 @@ void SharpYuvComputeConversionMatrix(const SharpYuvColorSpace* yuv_color_space,
} }
// Matrices are in YUV_FIX fixed point precision. // Matrices are in YUV_FIX fixed point precision.
// WebP's matrix, similar but not identical to kRec601LimitedMatrix // WebP's matrix, similar but not identical to kRec601LimitedMatrix.
// Derived using the following formulas:
// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
static const SharpYuvConversionMatrix kWebpMatrix = { static const SharpYuvConversionMatrix kWebpMatrix = {
{16839, 33059, 6420, 16 << 16}, {16839, 33059, 6420, 16 << 16},
{-9719, -19081, 28800, 128 << 16}, {-9719, -19081, 28800, 128 << 16},
{28800, -24116, -4684, 128 << 16}, {28800, -24116, -4684, 128 << 16},
}; };
// Kr=0.2990f Kb=0.1140f bit_depth=8 range=kSharpYuvRangeLimited // Kr=0.2990f Kb=0.1140f bits=8 range=kSharpYuvRangeLimited
static const SharpYuvConversionMatrix kRec601LimitedMatrix = { static const SharpYuvConversionMatrix kRec601LimitedMatrix = {
{16829, 33039, 6416, 16 << 16}, {16829, 33039, 6416, 16 << 16},
{-9714, -19071, 28784, 128 << 16}, {-9714, -19071, 28784, 128 << 16},
{28784, -24103, -4681, 128 << 16}, {28784, -24103, -4681, 128 << 16},
}; };
// Kr=0.2990f Kb=0.1140f bit_depth=8 range=kSharpYuvRangeFull // Kr=0.2990f Kb=0.1140f bits=8 range=kSharpYuvRangeFull
static const SharpYuvConversionMatrix kRec601FullMatrix = { static const SharpYuvConversionMatrix kRec601FullMatrix = {
{19595, 38470, 7471, 0}, {19595, 38470, 7471, 0},
{-11058, -21710, 32768, 128 << 16}, {-11058, -21710, 32768, 128 << 16},
{32768, -27439, -5329, 128 << 16}, {32768, -27439, -5329, 128 << 16},
}; };
// Kr=0.2126f Kb=0.0722f bit_depth=8 range=kSharpYuvRangeLimited // Kr=0.2126f Kb=0.0722f bits=8 range=kSharpYuvRangeLimited
static const SharpYuvConversionMatrix kRec709LimitedMatrix = { static const SharpYuvConversionMatrix kRec709LimitedMatrix = {
{11966, 40254, 4064, 16 << 16}, {11966, 40254, 4064, 16 << 16},
{-6596, -22189, 28784, 128 << 16}, {-6596, -22189, 28784, 128 << 16},
{28784, -26145, -2639, 128 << 16}, {28784, -26145, -2639, 128 << 16},
}; };
// Kr=0.2126f Kb=0.0722f bit_depth=8 range=kSharpYuvRangeFull // Kr=0.2126f Kb=0.0722f bits=8 range=kSharpYuvRangeFull
static const SharpYuvConversionMatrix kRec709FullMatrix = { static const SharpYuvConversionMatrix kRec709FullMatrix = {
{13933, 46871, 4732, 0}, {13933, 46871, 4732, 0},
{-7509, -25259, 32768, 128 << 16}, {-7509, -25259, 32768, 128 << 16},

View File

@ -41,15 +41,10 @@ SHARPYUV_EXTERN void SharpYuvComputeConversionMatrix(
// Enums for precomputed conversion matrices. // Enums for precomputed conversion matrices.
typedef enum { typedef enum {
// WebP's matrix, similar but not identical to kSharpYuvMatrixRec601Limited
kSharpYuvMatrixWebp = 0, kSharpYuvMatrixWebp = 0,
// Kr=0.2990f Kb=0.1140f bit_depth=8 range=kSharpYuvRangeLimited
kSharpYuvMatrixRec601Limited, kSharpYuvMatrixRec601Limited,
// Kr=0.2990f Kb=0.1140f bit_depth=8 range=kSharpYuvRangeFull
kSharpYuvMatrixRec601Full, kSharpYuvMatrixRec601Full,
// Kr=0.2126f Kb=0.0722f bit_depth=8 range=kSharpYuvRangeLimited
kSharpYuvMatrixRec709Limited, kSharpYuvMatrixRec709Limited,
// Kr=0.2126f Kb=0.0722f bit_depth=8 range=kSharpYuvRangeFull
kSharpYuvMatrixRec709Full, kSharpYuvMatrixRec709Full,
kSharpYuvMatrixNum kSharpYuvMatrixNum
} SharpYuvMatrixType; } SharpYuvMatrixType;

View File

@ -17,7 +17,6 @@
#include <stdlib.h> #include <stdlib.h>
#include "sharpyuv/sharpyuv_cpu.h" #include "sharpyuv/sharpyuv_cpu.h"
#include "src/webp/types.h"
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@ -70,7 +69,8 @@ uint64_t (*SharpYuvUpdateY)(const uint16_t* src, const uint16_t* ref,
void (*SharpYuvUpdateRGB)(const int16_t* src, const int16_t* ref, int16_t* dst, void (*SharpYuvUpdateRGB)(const int16_t* src, const int16_t* ref, int16_t* dst,
int len); int len);
void (*SharpYuvFilterRow)(const int16_t* A, const int16_t* B, int len, void (*SharpYuvFilterRow)(const int16_t* A, const int16_t* B, int len,
const uint16_t* best_y, uint16_t* out, int bit_depth); const uint16_t* best_y, uint16_t* out,
int bit_depth);
extern VP8CPUInfo SharpYuvGetCPUInfo; extern VP8CPUInfo SharpYuvGetCPUInfo;
extern void InitSharpYuvSSE2(void); extern void InitSharpYuvSSE2(void);

View File

@ -12,7 +12,6 @@
#include "sharpyuv/sharpyuv_gamma.h" #include "sharpyuv/sharpyuv_gamma.h"
#include <assert.h> #include <assert.h>
#include <float.h>
#include <math.h> #include <math.h>
#include "src/webp/types.h" #include "src/webp/types.h"
@ -98,7 +97,7 @@ static WEBP_INLINE uint32_t FixedPointInterpolation(int v, uint32_t* tab,
return result; return result;
} }
static uint32_t ToLinearSrgb(uint16_t v, int bit_depth) { uint32_t SharpYuvGammaToLinear(uint16_t v, int bit_depth) {
const int shift = GAMMA_TO_LINEAR_TAB_BITS - bit_depth; const int shift = GAMMA_TO_LINEAR_TAB_BITS - bit_depth;
if (shift > 0) { if (shift > 0) {
return kGammaToLinearTabS[v << shift]; return kGammaToLinearTabS[v << shift];
@ -106,314 +105,9 @@ static uint32_t ToLinearSrgb(uint16_t v, int bit_depth) {
return FixedPointInterpolation(v, kGammaToLinearTabS, -shift, 0); return FixedPointInterpolation(v, kGammaToLinearTabS, -shift, 0);
} }
static uint16_t FromLinearSrgb(uint32_t value, int bit_depth) { uint16_t SharpYuvLinearToGamma(uint32_t value, int bit_depth) {
return FixedPointInterpolation( return FixedPointInterpolation(
value, kLinearToGammaTabS, value, kLinearToGammaTabS,
(GAMMA_TO_LINEAR_BITS - LINEAR_TO_GAMMA_TAB_BITS), (GAMMA_TO_LINEAR_BITS - LINEAR_TO_GAMMA_TAB_BITS),
bit_depth - GAMMA_TO_LINEAR_BITS); bit_depth - GAMMA_TO_LINEAR_BITS);
} }
////////////////////////////////////////////////////////////////////////////////
#define CLAMP(x, low, high) \
(((x) < (low)) ? (low) : (((high) < (x)) ? (high) : (x)))
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
static WEBP_INLINE float Roundf(float x) {
if (x < 0)
return (float)ceil((double)(x - 0.5f));
else
return (float)floor((double)(x + 0.5f));
}
static WEBP_INLINE float Powf(float base, float exp) {
return (float)pow((double)base, (double)exp);
}
static WEBP_INLINE float Log10f(float x) { return (float)log10((double)x); }
static float ToLinear709(float gamma) {
if (gamma < 0.f) {
return 0.f;
} else if (gamma < 4.5f * 0.018053968510807f) {
return gamma / 4.5f;
} else if (gamma < 1.f) {
return Powf((gamma + 0.09929682680944f) / 1.09929682680944f, 1.f / 0.45f);
}
return 1.f;
}
static float FromLinear709(float linear) {
if (linear < 0.f) {
return 0.f;
} else if (linear < 0.018053968510807f) {
return linear * 4.5f;
} else if (linear < 1.f) {
return 1.09929682680944f * Powf(linear, 0.45f) - 0.09929682680944f;
}
return 1.f;
}
static float ToLinear470M(float gamma) {
return Powf(CLAMP(gamma, 0.f, 1.f), 2.2f);
}
static float FromLinear470M(float linear) {
return Powf(CLAMP(linear, 0.f, 1.f), 1.f / 2.2f);
}
static float ToLinear470Bg(float gamma) {
return Powf(CLAMP(gamma, 0.f, 1.f), 2.8f);
}
static float FromLinear470Bg(float linear) {
return Powf(CLAMP(linear, 0.f, 1.f), 1.f / 2.8f);
}
static float ToLinearSmpte240(float gamma) {
if (gamma < 0.f) {
return 0.f;
} else if (gamma < 4.f * 0.022821585529445f) {
return gamma / 4.f;
} else if (gamma < 1.f) {
return Powf((gamma + 0.111572195921731f) / 1.111572195921731f, 1.f / 0.45f);
}
return 1.f;
}
static float FromLinearSmpte240(float linear) {
if (linear < 0.f) {
return 0.f;
} else if (linear < 0.022821585529445f) {
return linear * 4.f;
} else if (linear < 1.f) {
return 1.111572195921731f * Powf(linear, 0.45f) - 0.111572195921731f;
}
return 1.f;
}
static float ToLinearLog100(float gamma) {
// The function is non-bijective so choose the middle of [0, 0.01].
const float mid_interval = 0.01f / 2.f;
return (gamma <= 0.0f) ? mid_interval
: Powf(10.0f, 2.f * (MIN(gamma, 1.f) - 1.0f));
}
static float FromLinearLog100(float linear) {
return (linear < 0.01f) ? 0.0f : 1.0f + Log10f(MIN(linear, 1.f)) / 2.0f;
}
static float ToLinearLog100Sqrt10(float gamma) {
// The function is non-bijective so choose the middle of [0, 0.00316227766f[.
const float mid_interval = 0.00316227766f / 2.f;
return (gamma <= 0.0f) ? mid_interval
: Powf(10.0f, 2.5f * (MIN(gamma, 1.f) - 1.0f));
}
static float FromLinearLog100Sqrt10(float linear) {
return (linear < 0.00316227766f) ? 0.0f
: 1.0f + Log10f(MIN(linear, 1.f)) / 2.5f;
}
static float ToLinearIec61966(float gamma) {
if (gamma <= -4.5f * 0.018053968510807f) {
return Powf((-gamma + 0.09929682680944f) / -1.09929682680944f, 1.f / 0.45f);
} else if (gamma < 4.5f * 0.018053968510807f) {
return gamma / 4.5f;
}
return Powf((gamma + 0.09929682680944f) / 1.09929682680944f, 1.f / 0.45f);
}
static float FromLinearIec61966(float linear) {
if (linear <= -0.018053968510807f) {
return -1.09929682680944f * Powf(-linear, 0.45f) + 0.09929682680944f;
} else if (linear < 0.018053968510807f) {
return linear * 4.5f;
}
return 1.09929682680944f * Powf(linear, 0.45f) - 0.09929682680944f;
}
static float ToLinearBt1361(float gamma) {
if (gamma < -0.25f) {
return -0.25f;
} else if (gamma < 0.f) {
return Powf((gamma - 0.02482420670236f) / -0.27482420670236f, 1.f / 0.45f) /
-4.f;
} else if (gamma < 4.5f * 0.018053968510807f) {
return gamma / 4.5f;
} else if (gamma < 1.f) {
return Powf((gamma + 0.09929682680944f) / 1.09929682680944f, 1.f / 0.45f);
}
return 1.f;
}
static float FromLinearBt1361(float linear) {
if (linear < -0.25f) {
return -0.25f;
} else if (linear < 0.f) {
return -0.27482420670236f * Powf(-4.f * linear, 0.45f) + 0.02482420670236f;
} else if (linear < 0.018053968510807f) {
return linear * 4.5f;
} else if (linear < 1.f) {
return 1.09929682680944f * Powf(linear, 0.45f) - 0.09929682680944f;
}
return 1.f;
}
static float ToLinearPq(float gamma) {
if (gamma > 0.f) {
const float pow_gamma = Powf(gamma, 32.f / 2523.f);
const float num = MAX(pow_gamma - 107.f / 128.f, 0.0f);
const float den = MAX(2413.f / 128.f - 2392.f / 128.f * pow_gamma, FLT_MIN);
return Powf(num / den, 4096.f / 653.f);
}
return 0.f;
}
static float FromLinearPq(float linear) {
if (linear > 0.f) {
const float pow_linear = Powf(linear, 653.f / 4096.f);
const float num = 107.f / 128.f + 2413.f / 128.f * pow_linear;
const float den = 1.0f + 2392.f / 128.f * pow_linear;
return Powf(num / den, 2523.f / 32.f);
}
return 0.f;
}
static float ToLinearSmpte428(float gamma) {
return Powf(MAX(gamma, 0.f), 2.6f) / 0.91655527974030934f;
}
static float FromLinearSmpte428(float linear) {
return Powf(0.91655527974030934f * MAX(linear, 0.f), 1.f / 2.6f);
}
// Conversion in BT.2100 requires RGB info. Simplify to gamma correction here.
static float ToLinearHlg(float gamma) {
if (gamma < 0.f) {
return 0.f;
} else if (gamma <= 0.5f) {
return Powf((gamma * gamma) * (1.f / 3.f), 1.2f);
}
return Powf((expf((gamma - 0.55991073f) / 0.17883277f) + 0.28466892f) / 12.0f,
1.2f);
}
static float FromLinearHlg(float linear) {
linear = Powf(linear, 1.f / 1.2f);
if (linear < 0.f) {
return 0.f;
} else if (linear <= (1.f / 12.f)) {
return sqrtf(3.f * linear);
}
return 0.17883277f * logf(12.f * linear - 0.28466892f) + 0.55991073f;
}
uint32_t SharpYuvGammaToLinear(uint16_t v, int bit_depth,
SharpYuvTransferFunctionType transfer_type) {
float v_float, linear;
if (transfer_type == kSharpYuvTransferFunctionSrgb) {
return ToLinearSrgb(v, bit_depth);
}
v_float = (float)v / ((1 << bit_depth) - 1);
switch (transfer_type) {
case kSharpYuvTransferFunctionBt709:
case kSharpYuvTransferFunctionBt601:
case kSharpYuvTransferFunctionBt2020_10Bit:
case kSharpYuvTransferFunctionBt2020_12Bit:
linear = ToLinear709(v_float);
break;
case kSharpYuvTransferFunctionBt470M:
linear = ToLinear470M(v_float);
break;
case kSharpYuvTransferFunctionBt470Bg:
linear = ToLinear470Bg(v_float);
break;
case kSharpYuvTransferFunctionSmpte240:
linear = ToLinearSmpte240(v_float);
break;
case kSharpYuvTransferFunctionLinear:
return v;
case kSharpYuvTransferFunctionLog100:
linear = ToLinearLog100(v_float);
break;
case kSharpYuvTransferFunctionLog100_Sqrt10:
linear = ToLinearLog100Sqrt10(v_float);
break;
case kSharpYuvTransferFunctionIec61966:
linear = ToLinearIec61966(v_float);
break;
case kSharpYuvTransferFunctionBt1361:
linear = ToLinearBt1361(v_float);
break;
case kSharpYuvTransferFunctionSmpte2084:
linear = ToLinearPq(v_float);
break;
case kSharpYuvTransferFunctionSmpte428:
linear = ToLinearSmpte428(v_float);
break;
case kSharpYuvTransferFunctionHlg:
linear = ToLinearHlg(v_float);
break;
default:
assert(0);
linear = 0;
break;
}
return (uint32_t)Roundf(linear * ((1 << 16) - 1));
}
uint16_t SharpYuvLinearToGamma(uint32_t v, int bit_depth,
SharpYuvTransferFunctionType transfer_type) {
float v_float, linear;
if (transfer_type == kSharpYuvTransferFunctionSrgb) {
return FromLinearSrgb(v, bit_depth);
}
v_float = (float)v / ((1 << 16) - 1);
switch (transfer_type) {
case kSharpYuvTransferFunctionBt709:
case kSharpYuvTransferFunctionBt601:
case kSharpYuvTransferFunctionBt2020_10Bit:
case kSharpYuvTransferFunctionBt2020_12Bit:
linear = FromLinear709(v_float);
break;
case kSharpYuvTransferFunctionBt470M:
linear = FromLinear470M(v_float);
break;
case kSharpYuvTransferFunctionBt470Bg:
linear = FromLinear470Bg(v_float);
break;
case kSharpYuvTransferFunctionSmpte240:
linear = FromLinearSmpte240(v_float);
break;
case kSharpYuvTransferFunctionLinear:
return v;
case kSharpYuvTransferFunctionLog100:
linear = FromLinearLog100(v_float);
break;
case kSharpYuvTransferFunctionLog100_Sqrt10:
linear = FromLinearLog100Sqrt10(v_float);
break;
case kSharpYuvTransferFunctionIec61966:
linear = FromLinearIec61966(v_float);
break;
case kSharpYuvTransferFunctionBt1361:
linear = FromLinearBt1361(v_float);
break;
case kSharpYuvTransferFunctionSmpte2084:
linear = FromLinearPq(v_float);
break;
case kSharpYuvTransferFunctionSmpte428:
linear = FromLinearSmpte428(v_float);
break;
case kSharpYuvTransferFunctionHlg:
linear = FromLinearHlg(v_float);
break;
default:
assert(0);
linear = 0;
break;
}
return (uint16_t)Roundf(linear * ((1 << bit_depth) - 1));
}

View File

@ -12,7 +12,6 @@
#ifndef WEBP_SHARPYUV_SHARPYUV_GAMMA_H_ #ifndef WEBP_SHARPYUV_SHARPYUV_GAMMA_H_
#define WEBP_SHARPYUV_SHARPYUV_GAMMA_H_ #define WEBP_SHARPYUV_SHARPYUV_GAMMA_H_
#include "sharpyuv/sharpyuv.h"
#include "src/webp/types.h" #include "src/webp/types.h"
#ifdef __cplusplus #ifdef __cplusplus
@ -23,13 +22,11 @@ extern "C" {
// SharpYuvGammaToLinear or SharpYuvLinearToGamma. // SharpYuvGammaToLinear or SharpYuvLinearToGamma.
void SharpYuvInitGammaTables(void); void SharpYuvInitGammaTables(void);
// Converts a 'bit_depth'-bit gamma color value to a 16-bit linear value. // Converts a gamma color value on 'bit_depth' bits to a 16 bit linear value.
uint32_t SharpYuvGammaToLinear(uint16_t v, int bit_depth, uint32_t SharpYuvGammaToLinear(uint16_t v, int bit_depth);
SharpYuvTransferFunctionType transfer_type);
// Converts a 16-bit linear color value to a 'bit_depth'-bit gamma value. // Converts a 16 bit linear color value to a gamma value on 'bit_depth' bits.
uint16_t SharpYuvLinearToGamma(uint32_t value, int bit_depth, uint16_t SharpYuvLinearToGamma(uint32_t value, int bit_depth);
SharpYuvTransferFunctionType transfer_type);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"

View File

@ -36,7 +36,7 @@ libwebp_la_LIBADD += utils/libwebputils.la
# other than the ones listed on the command line, i.e., after linking, it will # other than the ones listed on the command line, i.e., after linking, it will
# not have unresolved symbols. Some platforms (Windows among them) require all # not have unresolved symbols. Some platforms (Windows among them) require all
# symbols in shared libraries to be resolved at library creation. # symbols in shared libraries to be resolved at library creation.
libwebp_la_LDFLAGS = -no-undefined -version-info 8:10:1 libwebp_la_LDFLAGS = -no-undefined -version-info 8:7:1
libwebpincludedir = $(includedir)/webp libwebpincludedir = $(includedir)/webp
pkgconfig_DATA = libwebp.pc pkgconfig_DATA = libwebp.pc
@ -48,7 +48,7 @@ if BUILD_LIBWEBPDECODER
libwebpdecoder_la_LIBADD += dsp/libwebpdspdecode.la libwebpdecoder_la_LIBADD += dsp/libwebpdspdecode.la
libwebpdecoder_la_LIBADD += utils/libwebputilsdecode.la libwebpdecoder_la_LIBADD += utils/libwebputilsdecode.la
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 4:10:1 libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 4:7:1
pkgconfig_DATA += libwebpdecoder.pc pkgconfig_DATA += libwebpdecoder.pc
endif endif

View File

@ -13,20 +13,18 @@
#include <stdlib.h> #include <stdlib.h>
#include "src/dec/alphai_dec.h" #include "src/dec/alphai_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h" #include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h" #include "src/dec/vp8li_dec.h"
#include "src/dsp/dsp.h" #include "src/dsp/dsp.h"
#include "src/utils/quant_levels_dec_utils.h" #include "src/utils/quant_levels_dec_utils.h"
#include "src/utils/utils.h" #include "src/utils/utils.h"
#include "src/webp/format_constants.h" #include "src/webp/format_constants.h"
#include "src/webp/types.h"
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// ALPHDecoder object. // ALPHDecoder object.
// Allocates a new alpha decoder instance. // Allocates a new alpha decoder instance.
WEBP_NODISCARD static ALPHDecoder* ALPHNew(void) { static ALPHDecoder* ALPHNew(void) {
ALPHDecoder* const dec = (ALPHDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); ALPHDecoder* const dec = (ALPHDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
return dec; return dec;
} }
@ -34,8 +32,8 @@ WEBP_NODISCARD static ALPHDecoder* ALPHNew(void) {
// Clears and deallocates an alpha decoder instance. // Clears and deallocates an alpha decoder instance.
static void ALPHDelete(ALPHDecoder* const dec) { static void ALPHDelete(ALPHDecoder* const dec) {
if (dec != NULL) { if (dec != NULL) {
VP8LDelete(dec->vp8l_dec); VP8LDelete(dec->vp8l_dec_);
dec->vp8l_dec = NULL; dec->vp8l_dec_ = NULL;
WebPSafeFree(dec); WebPSafeFree(dec);
} }
} }
@ -47,43 +45,41 @@ static void ALPHDelete(ALPHDecoder* const dec) {
// header for alpha data stored using lossless compression. // header for alpha data stored using lossless compression.
// Returns false in case of error in alpha header (data too short, invalid // Returns false in case of error in alpha header (data too short, invalid
// compression method or filter, error in lossless header data etc). // compression method or filter, error in lossless header data etc).
WEBP_NODISCARD static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data, static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
size_t data_size, const VP8Io* const src_io, size_t data_size, const VP8Io* const src_io,
uint8_t* output) { uint8_t* output) {
int ok = 0; int ok = 0;
const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN; const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN;
const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN; const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN;
int rsrv; int rsrv;
VP8Io* const io = &dec->io; VP8Io* const io = &dec->io_;
assert(data != NULL && output != NULL && src_io != NULL); assert(data != NULL && output != NULL && src_io != NULL);
VP8FiltersInit(); VP8FiltersInit();
dec->output = output; dec->output_ = output;
dec->width = src_io->width; dec->width_ = src_io->width;
dec->height = src_io->height; dec->height_ = src_io->height;
assert(dec->width > 0 && dec->height > 0); assert(dec->width_ > 0 && dec->height_ > 0);
if (data_size <= ALPHA_HEADER_LEN) { if (data_size <= ALPHA_HEADER_LEN) {
return 0; return 0;
} }
dec->method = (data[0] >> 0) & 0x03; dec->method_ = (data[0] >> 0) & 0x03;
dec->filter = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03); dec->filter_ = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03);
dec->pre_processing = (data[0] >> 4) & 0x03; dec->pre_processing_ = (data[0] >> 4) & 0x03;
rsrv = (data[0] >> 6) & 0x03; rsrv = (data[0] >> 6) & 0x03;
if (dec->method < ALPHA_NO_COMPRESSION || if (dec->method_ < ALPHA_NO_COMPRESSION ||
dec->method > ALPHA_LOSSLESS_COMPRESSION || dec->method_ > ALPHA_LOSSLESS_COMPRESSION ||
dec->filter >= WEBP_FILTER_LAST || dec->filter_ >= WEBP_FILTER_LAST ||
dec->pre_processing > ALPHA_PREPROCESSED_LEVELS || dec->pre_processing_ > ALPHA_PREPROCESSED_LEVELS ||
rsrv != 0) { rsrv != 0) {
return 0; return 0;
} }
// Copy the necessary parameters from src_io to io // Copy the necessary parameters from src_io to io
if (!VP8InitIo(io)) { VP8InitIo(io);
return 0;
}
WebPInitCustomIo(NULL, io); WebPInitCustomIo(NULL, io);
io->opaque = dec; io->opaque = dec;
io->width = src_io->width; io->width = src_io->width;
@ -96,11 +92,11 @@ WEBP_NODISCARD static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
io->crop_bottom = src_io->crop_bottom; io->crop_bottom = src_io->crop_bottom;
// No need to copy the scaling parameters. // No need to copy the scaling parameters.
if (dec->method == ALPHA_NO_COMPRESSION) { if (dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t alpha_decoded_size = dec->width * dec->height; const size_t alpha_decoded_size = dec->width_ * dec->height_;
ok = (alpha_data_size >= alpha_decoded_size); ok = (alpha_data_size >= alpha_decoded_size);
} else { } else {
assert(dec->method == ALPHA_LOSSLESS_COMPRESSION); assert(dec->method_ == ALPHA_LOSSLESS_COMPRESSION);
ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size); ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size);
} }
@ -111,70 +107,76 @@ WEBP_NODISCARD static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
// starting from row number 'row'. It assumes that rows up to (row - 1) have // starting from row number 'row'. It assumes that rows up to (row - 1) have
// already been decoded. // already been decoded.
// Returns false in case of bitstream error. // Returns false in case of bitstream error.
WEBP_NODISCARD static int ALPHDecode(VP8Decoder* const dec, int row, static int ALPHDecode(VP8Decoder* const dec, int row, int num_rows) {
int num_rows) { ALPHDecoder* const alph_dec = dec->alph_dec_;
ALPHDecoder* const alph_dec = dec->alph_dec; const int width = alph_dec->width_;
const int width = alph_dec->width; const int height = alph_dec->io_.crop_bottom;
const int height = alph_dec->io.crop_bottom; if (alph_dec->method_ == ALPHA_NO_COMPRESSION) {
if (alph_dec->method == ALPHA_NO_COMPRESSION) {
int y; int y;
const uint8_t* prev_line = dec->alpha_prev_line; const uint8_t* prev_line = dec->alpha_prev_line_;
const uint8_t* deltas = dec->alpha_data + ALPHA_HEADER_LEN + row * width; const uint8_t* deltas = dec->alpha_data_ + ALPHA_HEADER_LEN + row * width;
uint8_t* dst = dec->alpha_plane + row * width; uint8_t* dst = dec->alpha_plane_ + row * width;
assert(deltas <= &dec->alpha_data[dec->alpha_data_size]); assert(deltas <= &dec->alpha_data_[dec->alpha_data_size_]);
assert(WebPUnfilters[alph_dec->filter] != NULL); if (alph_dec->filter_ != WEBP_FILTER_NONE) {
for (y = 0; y < num_rows; ++y) { assert(WebPUnfilters[alph_dec->filter_] != NULL);
WebPUnfilters[alph_dec->filter](prev_line, deltas, dst, width); for (y = 0; y < num_rows; ++y) {
prev_line = dst; WebPUnfilters[alph_dec->filter_](prev_line, deltas, dst, width);
dst += width; prev_line = dst;
deltas += width; dst += width;
deltas += width;
}
} else {
for (y = 0; y < num_rows; ++y) {
memcpy(dst, deltas, width * sizeof(*dst));
prev_line = dst;
dst += width;
deltas += width;
}
} }
dec->alpha_prev_line = prev_line; dec->alpha_prev_line_ = prev_line;
} else { // alph_dec->method == ALPHA_LOSSLESS_COMPRESSION } else { // alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION
assert(alph_dec->vp8l_dec != NULL); assert(alph_dec->vp8l_dec_ != NULL);
if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) { if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) {
return 0; return 0;
} }
} }
if (row + num_rows >= height) { if (row + num_rows >= height) {
dec->is_alpha_decoded = 1; dec->is_alpha_decoded_ = 1;
} }
return 1; return 1;
} }
WEBP_NODISCARD static int AllocateAlphaPlane(VP8Decoder* const dec, static int AllocateAlphaPlane(VP8Decoder* const dec, const VP8Io* const io) {
const VP8Io* const io) {
const int stride = io->width; const int stride = io->width;
const int height = io->crop_bottom; const int height = io->crop_bottom;
const uint64_t alpha_size = (uint64_t)stride * height; const uint64_t alpha_size = (uint64_t)stride * height;
assert(dec->alpha_plane_mem == NULL); assert(dec->alpha_plane_mem_ == NULL);
dec->alpha_plane_mem = dec->alpha_plane_mem_ =
(uint8_t*)WebPSafeMalloc(alpha_size, sizeof(*dec->alpha_plane)); (uint8_t*)WebPSafeMalloc(alpha_size, sizeof(*dec->alpha_plane_));
if (dec->alpha_plane_mem == NULL) { if (dec->alpha_plane_mem_ == NULL) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, return 0;
"Alpha decoder initialization failed.");
} }
dec->alpha_plane = dec->alpha_plane_mem; dec->alpha_plane_ = dec->alpha_plane_mem_;
dec->alpha_prev_line = NULL; dec->alpha_prev_line_ = NULL;
return 1; return 1;
} }
void WebPDeallocateAlphaMemory(VP8Decoder* const dec) { void WebPDeallocateAlphaMemory(VP8Decoder* const dec) {
assert(dec != NULL); assert(dec != NULL);
WebPSafeFree(dec->alpha_plane_mem); WebPSafeFree(dec->alpha_plane_mem_);
dec->alpha_plane_mem = NULL; dec->alpha_plane_mem_ = NULL;
dec->alpha_plane = NULL; dec->alpha_plane_ = NULL;
ALPHDelete(dec->alph_dec); ALPHDelete(dec->alph_dec_);
dec->alph_dec = NULL; dec->alph_dec_ = NULL;
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Main entry point. // Main entry point.
WEBP_NODISCARD const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec, const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
const VP8Io* const io, const VP8Io* const io,
int row, int num_rows) { int row, int num_rows) {
const int width = io->width; const int width = io->width;
const int height = io->crop_bottom; const int height = io->crop_bottom;
@ -184,46 +186,37 @@ WEBP_NODISCARD const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
return NULL; return NULL;
} }
if (!dec->is_alpha_decoded) { if (!dec->is_alpha_decoded_) {
if (dec->alph_dec == NULL) { // Initialize decoder. if (dec->alph_dec_ == NULL) { // Initialize decoder.
dec->alph_dec = ALPHNew(); dec->alph_dec_ = ALPHNew();
if (dec->alph_dec == NULL) { if (dec->alph_dec_ == NULL) return NULL;
VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"Alpha decoder initialization failed.");
return NULL;
}
if (!AllocateAlphaPlane(dec, io)) goto Error; if (!AllocateAlphaPlane(dec, io)) goto Error;
if (!ALPHInit(dec->alph_dec, dec->alpha_data, dec->alpha_data_size, if (!ALPHInit(dec->alph_dec_, dec->alpha_data_, dec->alpha_data_size_,
io, dec->alpha_plane)) { io, dec->alpha_plane_)) {
VP8LDecoder* const vp8l_dec = dec->alph_dec->vp8l_dec;
VP8SetError(dec,
(vp8l_dec == NULL) ? VP8_STATUS_OUT_OF_MEMORY
: vp8l_dec->status,
"Alpha decoder initialization failed.");
goto Error; goto Error;
} }
// if we allowed use of alpha dithering, check whether it's needed at all // if we allowed use of alpha dithering, check whether it's needed at all
if (dec->alph_dec->pre_processing != ALPHA_PREPROCESSED_LEVELS) { if (dec->alph_dec_->pre_processing_ != ALPHA_PREPROCESSED_LEVELS) {
dec->alpha_dithering = 0; // disable dithering dec->alpha_dithering_ = 0; // disable dithering
} else { } else {
num_rows = height - row; // decode everything in one pass num_rows = height - row; // decode everything in one pass
} }
} }
assert(dec->alph_dec != NULL); assert(dec->alph_dec_ != NULL);
assert(row + num_rows <= height); assert(row + num_rows <= height);
if (!ALPHDecode(dec, row, num_rows)) goto Error; if (!ALPHDecode(dec, row, num_rows)) goto Error;
if (dec->is_alpha_decoded) { // finished? if (dec->is_alpha_decoded_) { // finished?
ALPHDelete(dec->alph_dec); ALPHDelete(dec->alph_dec_);
dec->alph_dec = NULL; dec->alph_dec_ = NULL;
if (dec->alpha_dithering > 0) { if (dec->alpha_dithering_ > 0) {
uint8_t* const alpha = dec->alpha_plane + io->crop_top * width uint8_t* const alpha = dec->alpha_plane_ + io->crop_top * width
+ io->crop_left; + io->crop_left;
if (!WebPDequantizeLevels(alpha, if (!WebPDequantizeLevels(alpha,
io->crop_right - io->crop_left, io->crop_right - io->crop_left,
io->crop_bottom - io->crop_top, io->crop_bottom - io->crop_top,
width, dec->alpha_dithering)) { width, dec->alpha_dithering_)) {
goto Error; goto Error;
} }
} }
@ -231,7 +224,7 @@ WEBP_NODISCARD const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
} }
// Return a pointer to the current decoded row. // Return a pointer to the current decoded row.
return dec->alpha_plane + row * width; return dec->alpha_plane_ + row * width;
Error: Error:
WebPDeallocateAlphaMemory(dec); WebPDeallocateAlphaMemory(dec);

View File

@ -25,24 +25,24 @@ struct VP8LDecoder; // Defined in dec/vp8li.h.
typedef struct ALPHDecoder ALPHDecoder; typedef struct ALPHDecoder ALPHDecoder;
struct ALPHDecoder { struct ALPHDecoder {
int width; int width_;
int height; int height_;
int method; int method_;
WEBP_FILTER_TYPE filter; WEBP_FILTER_TYPE filter_;
int pre_processing; int pre_processing_;
struct VP8LDecoder* vp8l_dec; struct VP8LDecoder* vp8l_dec_;
VP8Io io; VP8Io io_;
int use_8b_decode; // Although alpha channel requires only 1 byte per int use_8b_decode_; // Although alpha channel requires only 1 byte per
// pixel, sometimes VP8LDecoder may need to allocate // pixel, sometimes VP8LDecoder may need to allocate
// 4 bytes per pixel internally during decode. // 4 bytes per pixel internally during decode.
uint8_t* output; uint8_t* output_;
const uint8_t* prev_line; // last output row (or NULL) const uint8_t* prev_line_; // last output row (or NULL)
}; };
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// internal functions. Not public. // internal functions. Not public.
// Deallocate memory associated to dec->alpha_plane decoding // Deallocate memory associated to dec->alpha_plane_ decoding
void WebPDeallocateAlphaMemory(VP8Decoder* const dec); void WebPDeallocateAlphaMemory(VP8Decoder* const dec);
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -26,9 +26,10 @@ static const uint8_t kModeBpp[MODE_LAST] = {
4, 4, 4, 2, // pre-multiplied modes 4, 4, 4, 2, // pre-multiplied modes
1, 1 }; 1, 1 };
// Check that webp_csp_mode is within the bounds of WEBP_CSP_MODE.
// Convert to an integer to handle both the unsigned/signed enum cases // Convert to an integer to handle both the unsigned/signed enum cases
// without the need for casting to remove type limit warnings. // without the need for casting to remove type limit warnings.
int IsValidColorspace(int webp_csp_mode) { static int IsValidColorspace(int webp_csp_mode) {
return (webp_csp_mode >= MODE_RGB && webp_csp_mode < MODE_LAST); return (webp_csp_mode >= MODE_RGB && webp_csp_mode < MODE_LAST);
} }
@ -74,7 +75,7 @@ static VP8StatusCode CheckDecBuffer(const WebPDecBuffer* const buffer) {
const WebPRGBABuffer* const buf = &buffer->u.RGBA; const WebPRGBABuffer* const buf = &buffer->u.RGBA;
const int stride = abs(buf->stride); const int stride = abs(buf->stride);
const uint64_t size = const uint64_t size =
MIN_BUFFER_SIZE((uint64_t)width * kModeBpp[mode], height, stride); MIN_BUFFER_SIZE(width * kModeBpp[mode], height, stride);
ok &= (size <= buf->size); ok &= (size <= buf->size);
ok &= (stride >= width * kModeBpp[mode]); ok &= (stride >= width * kModeBpp[mode]);
ok &= (buf->rgba != NULL); ok &= (buf->rgba != NULL);

View File

@ -51,7 +51,4 @@ enum { MB_FEATURE_TREE_PROBS = 3,
NUM_PROBAS = 11 NUM_PROBAS = 11
}; };
// Check that webp_csp_mode is within the bounds of WEBP_CSP_MODE.
int IsValidColorspace(int webp_csp_mode);
#endif // WEBP_DEC_COMMON_DEC_H_ #endif // WEBP_DEC_COMMON_DEC_H_

View File

@ -72,11 +72,11 @@ static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx) { const VP8ThreadContext* ctx) {
int j; int j;
int mb_x; int mb_x;
const int mb_y = ctx->mb_y; const int mb_y = ctx->mb_y_;
const int cache_id = ctx->id; const int cache_id = ctx->id_;
uint8_t* const y_dst = dec->yuv_b + Y_OFF; uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b + U_OFF; uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b + V_OFF; uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
// Initialize left-most block. // Initialize left-most block.
for (j = 0; j < 16; ++j) { for (j = 0; j < 16; ++j) {
@ -99,8 +99,8 @@ static void ReconstructRow(const VP8Decoder* const dec,
} }
// Reconstruct one row. // Reconstruct one row.
for (mb_x = 0; mb_x < dec->mb_w; ++mb_x) { for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
const VP8MBData* const block = ctx->mb_data + mb_x; const VP8MBData* const block = ctx->mb_data_ + mb_x;
// Rotate in the left samples from previously decoded block. We move four // Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter. // pixels at a time for alignment reason, and because of in-loop filter.
@ -115,9 +115,9 @@ static void ReconstructRow(const VP8Decoder* const dec,
} }
{ {
// bring top samples into the cache // bring top samples into the cache
VP8TopSamples* const top_yuv = dec->yuv_t + mb_x; VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x;
const int16_t* const coeffs = block->coeffs; const int16_t* const coeffs = block->coeffs_;
uint32_t bits = block->non_zero_y; uint32_t bits = block->non_zero_y_;
int n; int n;
if (mb_y > 0) { if (mb_y > 0) {
@ -127,11 +127,11 @@ static void ReconstructRow(const VP8Decoder* const dec,
} }
// predict and add residuals // predict and add residuals
if (block->is_i4x4) { // 4x4 if (block->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16); uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (mb_y > 0) { if (mb_y > 0) {
if (mb_x >= dec->mb_w - 1) { // on rightmost border if (mb_x >= dec->mb_w_ - 1) { // on rightmost border
memset(top_right, top_yuv[0].y[15], sizeof(*top_right)); memset(top_right, top_yuv[0].y[15], sizeof(*top_right));
} else { } else {
memcpy(top_right, top_yuv[1].y, sizeof(*top_right)); memcpy(top_right, top_yuv[1].y, sizeof(*top_right));
@ -143,11 +143,11 @@ static void ReconstructRow(const VP8Decoder* const dec,
// predict and add residuals for all 4x4 blocks in turn. // predict and add residuals for all 4x4 blocks in turn.
for (n = 0; n < 16; ++n, bits <<= 2) { for (n = 0; n < 16; ++n, bits <<= 2) {
uint8_t* const dst = y_dst + kScan[n]; uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[block->imodes[n]](dst); VP8PredLuma4[block->imodes_[n]](dst);
DoTransform(bits, coeffs + n * 16, dst); DoTransform(bits, coeffs + n * 16, dst);
} }
} else { // 16x16 } else { // 16x16
const int pred_func = CheckMode(mb_x, mb_y, block->imodes[0]); const int pred_func = CheckMode(mb_x, mb_y, block->imodes_[0]);
VP8PredLuma16[pred_func](y_dst); VP8PredLuma16[pred_func](y_dst);
if (bits != 0) { if (bits != 0) {
for (n = 0; n < 16; ++n, bits <<= 2) { for (n = 0; n < 16; ++n, bits <<= 2) {
@ -157,8 +157,8 @@ static void ReconstructRow(const VP8Decoder* const dec,
} }
{ {
// Chroma // Chroma
const uint32_t bits_uv = block->non_zero_uv; const uint32_t bits_uv = block->non_zero_uv_;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode); const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_);
VP8PredChroma8[pred_func](u_dst); VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst); VP8PredChroma8[pred_func](v_dst);
DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst); DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst);
@ -166,25 +166,25 @@ static void ReconstructRow(const VP8Decoder* const dec,
} }
// stash away top samples for next block // stash away top samples for next block
if (mb_y < dec->mb_h - 1) { if (mb_y < dec->mb_h_ - 1) {
memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16); memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16);
memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8); memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8);
memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8); memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8);
} }
} }
// Transfer reconstructed samples from yuv_b cache to final destination. // Transfer reconstructed samples from yuv_b_ cache to final destination.
{ {
const int y_offset = cache_id * 16 * dec->cache_y_stride; const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride; const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y + mb_x * 16 + y_offset; uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u + mb_x * 8 + uv_offset; uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v + mb_x * 8 + uv_offset; uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset;
for (j = 0; j < 16; ++j) { for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride, y_dst + j * BPS, 16); memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
} }
for (j = 0; j < 8; ++j) { for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride, u_dst + j * BPS, 8); memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride, v_dst + j * BPS, 8); memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
} }
} }
} }
@ -201,40 +201,40 @@ static void ReconstructRow(const VP8Decoder* const dec,
static const uint8_t kFilterExtraRows[3] = { 0, 2, 8 }; static const uint8_t kFilterExtraRows[3] = { 0, 2, 8 };
static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) { static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
const VP8ThreadContext* const ctx = &dec->thread_ctx; const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int cache_id = ctx->id; const int cache_id = ctx->id_;
const int y_bps = dec->cache_y_stride; const int y_bps = dec->cache_y_stride_;
const VP8FInfo* const f_info = ctx->f_info + mb_x; const VP8FInfo* const f_info = ctx->f_info_ + mb_x;
uint8_t* const y_dst = dec->cache_y + cache_id * 16 * y_bps + mb_x * 16; uint8_t* const y_dst = dec->cache_y_ + cache_id * 16 * y_bps + mb_x * 16;
const int ilevel = f_info->f_ilevel; const int ilevel = f_info->f_ilevel_;
const int limit = f_info->f_limit; const int limit = f_info->f_limit_;
if (limit == 0) { if (limit == 0) {
return; return;
} }
assert(limit >= 3); assert(limit >= 3);
if (dec->filter_type == 1) { // simple if (dec->filter_type_ == 1) { // simple
if (mb_x > 0) { if (mb_x > 0) {
VP8SimpleHFilter16(y_dst, y_bps, limit + 4); VP8SimpleHFilter16(y_dst, y_bps, limit + 4);
} }
if (f_info->f_inner) { if (f_info->f_inner_) {
VP8SimpleHFilter16i(y_dst, y_bps, limit); VP8SimpleHFilter16i(y_dst, y_bps, limit);
} }
if (mb_y > 0) { if (mb_y > 0) {
VP8SimpleVFilter16(y_dst, y_bps, limit + 4); VP8SimpleVFilter16(y_dst, y_bps, limit + 4);
} }
if (f_info->f_inner) { if (f_info->f_inner_) {
VP8SimpleVFilter16i(y_dst, y_bps, limit); VP8SimpleVFilter16i(y_dst, y_bps, limit);
} }
} else { // complex } else { // complex
const int uv_bps = dec->cache_uv_stride; const int uv_bps = dec->cache_uv_stride_;
uint8_t* const u_dst = dec->cache_u + cache_id * 8 * uv_bps + mb_x * 8; uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v + cache_id * 8 * uv_bps + mb_x * 8; uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8;
const int hev_thresh = f_info->hev_thresh; const int hev_thresh = f_info->hev_thresh_;
if (mb_x > 0) { if (mb_x > 0) {
VP8HFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh); VP8HFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh);
VP8HFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh); VP8HFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh);
} }
if (f_info->f_inner) { if (f_info->f_inner_) {
VP8HFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh); VP8HFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh);
VP8HFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh); VP8HFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh);
} }
@ -242,7 +242,7 @@ static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
VP8VFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh); VP8VFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh);
VP8VFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh); VP8VFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh);
} }
if (f_info->f_inner) { if (f_info->f_inner_) {
VP8VFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh); VP8VFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh);
VP8VFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh); VP8VFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh);
} }
@ -252,9 +252,9 @@ static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
// Filter the decoded macroblock row (if needed) // Filter the decoded macroblock row (if needed)
static void FilterRow(const VP8Decoder* const dec) { static void FilterRow(const VP8Decoder* const dec) {
int mb_x; int mb_x;
const int mb_y = dec->thread_ctx.mb_y; const int mb_y = dec->thread_ctx_.mb_y_;
assert(dec->thread_ctx.filter_row); assert(dec->thread_ctx_.filter_row_);
for (mb_x = dec->tl_mb_x; mb_x < dec->br_mb_x; ++mb_x) { for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) {
DoFilter(dec, mb_x, mb_y); DoFilter(dec, mb_x, mb_y);
} }
} }
@ -263,51 +263,51 @@ static void FilterRow(const VP8Decoder* const dec) {
// Precompute the filtering strength for each segment and each i4x4/i16x16 mode. // Precompute the filtering strength for each segment and each i4x4/i16x16 mode.
static void PrecomputeFilterStrengths(VP8Decoder* const dec) { static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
if (dec->filter_type > 0) { if (dec->filter_type_ > 0) {
int s; int s;
const VP8FilterHeader* const hdr = &dec->filter_hdr; const VP8FilterHeader* const hdr = &dec->filter_hdr_;
for (s = 0; s < NUM_MB_SEGMENTS; ++s) { for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
int i4x4; int i4x4;
// First, compute the initial level // First, compute the initial level
int base_level; int base_level;
if (dec->segment_hdr.use_segment) { if (dec->segment_hdr_.use_segment_) {
base_level = dec->segment_hdr.filter_strength[s]; base_level = dec->segment_hdr_.filter_strength_[s];
if (!dec->segment_hdr.absolute_delta) { if (!dec->segment_hdr_.absolute_delta_) {
base_level += hdr->level; base_level += hdr->level_;
} }
} else { } else {
base_level = hdr->level; base_level = hdr->level_;
} }
for (i4x4 = 0; i4x4 <= 1; ++i4x4) { for (i4x4 = 0; i4x4 <= 1; ++i4x4) {
VP8FInfo* const info = &dec->fstrengths[s][i4x4]; VP8FInfo* const info = &dec->fstrengths_[s][i4x4];
int level = base_level; int level = base_level;
if (hdr->use_lf_delta) { if (hdr->use_lf_delta_) {
level += hdr->ref_lf_delta[0]; level += hdr->ref_lf_delta_[0];
if (i4x4) { if (i4x4) {
level += hdr->mode_lf_delta[0]; level += hdr->mode_lf_delta_[0];
} }
} }
level = (level < 0) ? 0 : (level > 63) ? 63 : level; level = (level < 0) ? 0 : (level > 63) ? 63 : level;
if (level > 0) { if (level > 0) {
int ilevel = level; int ilevel = level;
if (hdr->sharpness > 0) { if (hdr->sharpness_ > 0) {
if (hdr->sharpness > 4) { if (hdr->sharpness_ > 4) {
ilevel >>= 2; ilevel >>= 2;
} else { } else {
ilevel >>= 1; ilevel >>= 1;
} }
if (ilevel > 9 - hdr->sharpness) { if (ilevel > 9 - hdr->sharpness_) {
ilevel = 9 - hdr->sharpness; ilevel = 9 - hdr->sharpness_;
} }
} }
if (ilevel < 1) ilevel = 1; if (ilevel < 1) ilevel = 1;
info->f_ilevel = ilevel; info->f_ilevel_ = ilevel;
info->f_limit = 2 * level + ilevel; info->f_limit_ = 2 * level + ilevel;
info->hev_thresh = (level >= 40) ? 2 : (level >= 15) ? 1 : 0; info->hev_thresh_ = (level >= 40) ? 2 : (level >= 15) ? 1 : 0;
} else { } else {
info->f_limit = 0; // no filtering info->f_limit_ = 0; // no filtering
} }
info->f_inner = i4x4; info->f_inner_ = i4x4;
} }
} }
} }
@ -321,7 +321,7 @@ static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
#define DITHER_AMP_TAB_SIZE 12 #define DITHER_AMP_TAB_SIZE 12
static const uint8_t kQuantToDitherAmp[DITHER_AMP_TAB_SIZE] = { static const uint8_t kQuantToDitherAmp[DITHER_AMP_TAB_SIZE] = {
// roughly, it's dqm->uv_mat[1] // roughly, it's dqm->uv_mat_[1]
8, 7, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1 8, 7, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1
}; };
@ -336,24 +336,24 @@ void VP8InitDithering(const WebPDecoderOptions* const options,
int s; int s;
int all_amp = 0; int all_amp = 0;
for (s = 0; s < NUM_MB_SEGMENTS; ++s) { for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8QuantMatrix* const dqm = &dec->dqm[s]; VP8QuantMatrix* const dqm = &dec->dqm_[s];
if (dqm->uv_quant < DITHER_AMP_TAB_SIZE) { if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) {
const int idx = (dqm->uv_quant < 0) ? 0 : dqm->uv_quant; const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_;
dqm->dither = (f * kQuantToDitherAmp[idx]) >> 3; dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3;
} }
all_amp |= dqm->dither; all_amp |= dqm->dither_;
} }
if (all_amp != 0) { if (all_amp != 0) {
VP8InitRandom(&dec->dithering_rg, 1.0f); VP8InitRandom(&dec->dithering_rg_, 1.0f);
dec->dither = 1; dec->dither_ = 1;
} }
} }
// potentially allow alpha dithering // potentially allow alpha dithering
dec->alpha_dithering = options->alpha_dithering_strength; dec->alpha_dithering_ = options->alpha_dithering_strength;
if (dec->alpha_dithering > 100) { if (dec->alpha_dithering_ > 100) {
dec->alpha_dithering = 100; dec->alpha_dithering_ = 100;
} else if (dec->alpha_dithering < 0) { } else if (dec->alpha_dithering_ < 0) {
dec->alpha_dithering = 0; dec->alpha_dithering_ = 0;
} }
} }
} }
@ -370,17 +370,17 @@ static void Dither8x8(VP8Random* const rg, uint8_t* dst, int bps, int amp) {
static void DitherRow(VP8Decoder* const dec) { static void DitherRow(VP8Decoder* const dec) {
int mb_x; int mb_x;
assert(dec->dither); assert(dec->dither_);
for (mb_x = dec->tl_mb_x; mb_x < dec->br_mb_x; ++mb_x) { for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) {
const VP8ThreadContext* const ctx = &dec->thread_ctx; const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const VP8MBData* const data = ctx->mb_data + mb_x; const VP8MBData* const data = ctx->mb_data_ + mb_x;
const int cache_id = ctx->id; const int cache_id = ctx->id_;
const int uv_bps = dec->cache_uv_stride; const int uv_bps = dec->cache_uv_stride_;
if (data->dither >= MIN_DITHER_AMP) { if (data->dither_ >= MIN_DITHER_AMP) {
uint8_t* const u_dst = dec->cache_u + cache_id * 8 * uv_bps + mb_x * 8; uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v + cache_id * 8 * uv_bps + mb_x * 8; uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8;
Dither8x8(&dec->dithering_rg, u_dst, uv_bps, data->dither); Dither8x8(&dec->dithering_rg_, u_dst, uv_bps, data->dither_);
Dither8x8(&dec->dithering_rg, v_dst, uv_bps, data->dither); Dither8x8(&dec->dithering_rg_, v_dst, uv_bps, data->dither_);
} }
} }
} }
@ -403,29 +403,29 @@ static int FinishRow(void* arg1, void* arg2) {
VP8Decoder* const dec = (VP8Decoder*)arg1; VP8Decoder* const dec = (VP8Decoder*)arg1;
VP8Io* const io = (VP8Io*)arg2; VP8Io* const io = (VP8Io*)arg2;
int ok = 1; int ok = 1;
const VP8ThreadContext* const ctx = &dec->thread_ctx; const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int cache_id = ctx->id; const int cache_id = ctx->id_;
const int extra_y_rows = kFilterExtraRows[dec->filter_type]; const int extra_y_rows = kFilterExtraRows[dec->filter_type_];
const int ysize = extra_y_rows * dec->cache_y_stride; const int ysize = extra_y_rows * dec->cache_y_stride_;
const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride; const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride_;
const int y_offset = cache_id * 16 * dec->cache_y_stride; const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride; const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const ydst = dec->cache_y - ysize + y_offset; uint8_t* const ydst = dec->cache_y_ - ysize + y_offset;
uint8_t* const udst = dec->cache_u - uvsize + uv_offset; uint8_t* const udst = dec->cache_u_ - uvsize + uv_offset;
uint8_t* const vdst = dec->cache_v - uvsize + uv_offset; uint8_t* const vdst = dec->cache_v_ - uvsize + uv_offset;
const int mb_y = ctx->mb_y; const int mb_y = ctx->mb_y_;
const int is_first_row = (mb_y == 0); const int is_first_row = (mb_y == 0);
const int is_last_row = (mb_y >= dec->br_mb_y - 1); const int is_last_row = (mb_y >= dec->br_mb_y_ - 1);
if (dec->mt_method == 2) { if (dec->mt_method_ == 2) {
ReconstructRow(dec, ctx); ReconstructRow(dec, ctx);
} }
if (ctx->filter_row) { if (ctx->filter_row_) {
FilterRow(dec); FilterRow(dec);
} }
if (dec->dither) { if (dec->dither_) {
DitherRow(dec); DitherRow(dec);
} }
@ -438,9 +438,9 @@ static int FinishRow(void* arg1, void* arg2) {
io->u = udst; io->u = udst;
io->v = vdst; io->v = vdst;
} else { } else {
io->y = dec->cache_y + y_offset; io->y = dec->cache_y_ + y_offset;
io->u = dec->cache_u + uv_offset; io->u = dec->cache_u_ + uv_offset;
io->v = dec->cache_v + uv_offset; io->v = dec->cache_v_ + uv_offset;
} }
if (!is_last_row) { if (!is_last_row) {
@ -449,9 +449,9 @@ static int FinishRow(void* arg1, void* arg2) {
if (y_end > io->crop_bottom) { if (y_end > io->crop_bottom) {
y_end = io->crop_bottom; // make sure we don't overflow on last row. y_end = io->crop_bottom; // make sure we don't overflow on last row.
} }
// If dec->alpha_data is not NULL, we have some alpha plane present. // If dec->alpha_data_ is not NULL, we have some alpha plane present.
io->a = NULL; io->a = NULL;
if (dec->alpha_data != NULL && y_start < y_end) { if (dec->alpha_data_ != NULL && y_start < y_end) {
io->a = VP8DecompressAlphaRows(dec, io, y_start, y_end - y_start); io->a = VP8DecompressAlphaRows(dec, io, y_start, y_end - y_start);
if (io->a == NULL) { if (io->a == NULL) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
@ -462,9 +462,9 @@ static int FinishRow(void* arg1, void* arg2) {
const int delta_y = io->crop_top - y_start; const int delta_y = io->crop_top - y_start;
y_start = io->crop_top; y_start = io->crop_top;
assert(!(delta_y & 1)); assert(!(delta_y & 1));
io->y += dec->cache_y_stride * delta_y; io->y += dec->cache_y_stride_ * delta_y;
io->u += dec->cache_uv_stride * (delta_y >> 1); io->u += dec->cache_uv_stride_ * (delta_y >> 1);
io->v += dec->cache_uv_stride * (delta_y >> 1); io->v += dec->cache_uv_stride_ * (delta_y >> 1);
if (io->a != NULL) { if (io->a != NULL) {
io->a += io->width * delta_y; io->a += io->width * delta_y;
} }
@ -483,11 +483,11 @@ static int FinishRow(void* arg1, void* arg2) {
} }
} }
// rotate top samples if needed // rotate top samples if needed
if (cache_id + 1 == dec->num_caches) { if (cache_id + 1 == dec->num_caches_) {
if (!is_last_row) { if (!is_last_row) {
memcpy(dec->cache_y - ysize, ydst + 16 * dec->cache_y_stride, ysize); memcpy(dec->cache_y_ - ysize, ydst + 16 * dec->cache_y_stride_, ysize);
memcpy(dec->cache_u - uvsize, udst + 8 * dec->cache_uv_stride, uvsize); memcpy(dec->cache_u_ - uvsize, udst + 8 * dec->cache_uv_stride_, uvsize);
memcpy(dec->cache_v - uvsize, vdst + 8 * dec->cache_uv_stride, uvsize); memcpy(dec->cache_v_ - uvsize, vdst + 8 * dec->cache_uv_stride_, uvsize);
} }
} }
@ -500,43 +500,43 @@ static int FinishRow(void* arg1, void* arg2) {
int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) { int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1; int ok = 1;
VP8ThreadContext* const ctx = &dec->thread_ctx; VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int filter_row = const int filter_row =
(dec->filter_type > 0) && (dec->filter_type_ > 0) &&
(dec->mb_y >= dec->tl_mb_y) && (dec->mb_y <= dec->br_mb_y); (dec->mb_y_ >= dec->tl_mb_y_) && (dec->mb_y_ <= dec->br_mb_y_);
if (dec->mt_method == 0) { if (dec->mt_method_ == 0) {
// ctx->id and ctx->f_info are already set // ctx->id_ and ctx->f_info_ are already set
ctx->mb_y = dec->mb_y; ctx->mb_y_ = dec->mb_y_;
ctx->filter_row = filter_row; ctx->filter_row_ = filter_row;
ReconstructRow(dec, ctx); ReconstructRow(dec, ctx);
ok = FinishRow(dec, io); ok = FinishRow(dec, io);
} else { } else {
WebPWorker* const worker = &dec->worker; WebPWorker* const worker = &dec->worker_;
// Finish previous job *before* updating context // Finish previous job *before* updating context
ok &= WebPGetWorkerInterface()->Sync(worker); ok &= WebPGetWorkerInterface()->Sync(worker);
assert(worker->status == OK); assert(worker->status_ == OK);
if (ok) { // spawn a new deblocking/output job if (ok) { // spawn a new deblocking/output job
ctx->io = *io; ctx->io_ = *io;
ctx->id = dec->cache_id; ctx->id_ = dec->cache_id_;
ctx->mb_y = dec->mb_y; ctx->mb_y_ = dec->mb_y_;
ctx->filter_row = filter_row; ctx->filter_row_ = filter_row;
if (dec->mt_method == 2) { // swap macroblock data if (dec->mt_method_ == 2) { // swap macroblock data
VP8MBData* const tmp = ctx->mb_data; VP8MBData* const tmp = ctx->mb_data_;
ctx->mb_data = dec->mb_data; ctx->mb_data_ = dec->mb_data_;
dec->mb_data = tmp; dec->mb_data_ = tmp;
} else { } else {
// perform reconstruction directly in main thread // perform reconstruction directly in main thread
ReconstructRow(dec, ctx); ReconstructRow(dec, ctx);
} }
if (filter_row) { // swap filter info if (filter_row) { // swap filter info
VP8FInfo* const tmp = ctx->f_info; VP8FInfo* const tmp = ctx->f_info_;
ctx->f_info = dec->f_info; ctx->f_info_ = dec->f_info_;
dec->f_info = tmp; dec->f_info_ = tmp;
} }
// (reconstruct)+filter in parallel // (reconstruct)+filter in parallel
WebPGetWorkerInterface()->Launch(worker); WebPGetWorkerInterface()->Launch(worker);
if (++dec->cache_id == dec->num_caches) { if (++dec->cache_id_ == dec->num_caches_) {
dec->cache_id = 0; dec->cache_id_ = 0;
} }
} }
} }
@ -551,12 +551,12 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// Note: Afterward, we must call teardown() no matter what. // Note: Afterward, we must call teardown() no matter what.
if (io->setup != NULL && !io->setup(io)) { if (io->setup != NULL && !io->setup(io)) {
VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed"); VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed");
return dec->status; return dec->status_;
} }
// Disable filtering per user request // Disable filtering per user request
if (io->bypass_filtering) { if (io->bypass_filtering) {
dec->filter_type = 0; dec->filter_type_ = 0;
} }
// Define the area where we can skip in-loop filtering, in case of cropping. // Define the area where we can skip in-loop filtering, in case of cropping.
@ -569,29 +569,29 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// top-left corner of the picture (MB #0). We must filter all the previous // top-left corner of the picture (MB #0). We must filter all the previous
// macroblocks. // macroblocks.
{ {
const int extra_pixels = kFilterExtraRows[dec->filter_type]; const int extra_pixels = kFilterExtraRows[dec->filter_type_];
if (dec->filter_type == 2) { if (dec->filter_type_ == 2) {
// For complex filter, we need to preserve the dependency chain. // For complex filter, we need to preserve the dependency chain.
dec->tl_mb_x = 0; dec->tl_mb_x_ = 0;
dec->tl_mb_y = 0; dec->tl_mb_y_ = 0;
} else { } else {
// For simple filter, we can filter only the cropped region. // For simple filter, we can filter only the cropped region.
// We include 'extra_pixels' on the other side of the boundary, since // We include 'extra_pixels' on the other side of the boundary, since
// vertical or horizontal filtering of the previous macroblock can // vertical or horizontal filtering of the previous macroblock can
// modify some abutting pixels. // modify some abutting pixels.
dec->tl_mb_x = (io->crop_left - extra_pixels) >> 4; dec->tl_mb_x_ = (io->crop_left - extra_pixels) >> 4;
dec->tl_mb_y = (io->crop_top - extra_pixels) >> 4; dec->tl_mb_y_ = (io->crop_top - extra_pixels) >> 4;
if (dec->tl_mb_x < 0) dec->tl_mb_x = 0; if (dec->tl_mb_x_ < 0) dec->tl_mb_x_ = 0;
if (dec->tl_mb_y < 0) dec->tl_mb_y = 0; if (dec->tl_mb_y_ < 0) dec->tl_mb_y_ = 0;
} }
// We need some 'extra' pixels on the right/bottom. // We need some 'extra' pixels on the right/bottom.
dec->br_mb_y = (io->crop_bottom + 15 + extra_pixels) >> 4; dec->br_mb_y_ = (io->crop_bottom + 15 + extra_pixels) >> 4;
dec->br_mb_x = (io->crop_right + 15 + extra_pixels) >> 4; dec->br_mb_x_ = (io->crop_right + 15 + extra_pixels) >> 4;
if (dec->br_mb_x > dec->mb_w) { if (dec->br_mb_x_ > dec->mb_w_) {
dec->br_mb_x = dec->mb_w; dec->br_mb_x_ = dec->mb_w_;
} }
if (dec->br_mb_y > dec->mb_h) { if (dec->br_mb_y_ > dec->mb_h_) {
dec->br_mb_y = dec->mb_h; dec->br_mb_y_ = dec->mb_h_;
} }
} }
PrecomputeFilterStrengths(dec); PrecomputeFilterStrengths(dec);
@ -600,8 +600,8 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) { int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1; int ok = 1;
if (dec->mt_method > 0) { if (dec->mt_method_ > 0) {
ok = WebPGetWorkerInterface()->Sync(&dec->worker); ok = WebPGetWorkerInterface()->Sync(&dec->worker_);
} }
if (io->teardown != NULL) { if (io->teardown != NULL) {
@ -639,20 +639,20 @@ int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) {
// Initialize multi/single-thread worker // Initialize multi/single-thread worker
static int InitThreadContext(VP8Decoder* const dec) { static int InitThreadContext(VP8Decoder* const dec) {
dec->cache_id = 0; dec->cache_id_ = 0;
if (dec->mt_method > 0) { if (dec->mt_method_ > 0) {
WebPWorker* const worker = &dec->worker; WebPWorker* const worker = &dec->worker_;
if (!WebPGetWorkerInterface()->Reset(worker)) { if (!WebPGetWorkerInterface()->Reset(worker)) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"thread initialization failed."); "thread initialization failed.");
} }
worker->data1 = dec; worker->data1 = dec;
worker->data2 = (void*)&dec->thread_ctx.io; worker->data2 = (void*)&dec->thread_ctx_.io_;
worker->hook = FinishRow; worker->hook = FinishRow;
dec->num_caches = dec->num_caches_ =
(dec->filter_type > 0) ? MT_CACHE_LINES : MT_CACHE_LINES - 1; (dec->filter_type_ > 0) ? MT_CACHE_LINES : MT_CACHE_LINES - 1;
} else { } else {
dec->num_caches = ST_CACHE_LINES; dec->num_caches_ = ST_CACHE_LINES;
} }
return 1; return 1;
} }
@ -680,25 +680,25 @@ int VP8GetThreadMethod(const WebPDecoderOptions* const options,
// Memory setup // Memory setup
static int AllocateMemory(VP8Decoder* const dec) { static int AllocateMemory(VP8Decoder* const dec) {
const int num_caches = dec->num_caches; const int num_caches = dec->num_caches_;
const int mb_w = dec->mb_w; const int mb_w = dec->mb_w_;
// Note: we use 'size_t' when there's no overflow risk, uint64_t otherwise. // Note: we use 'size_t' when there's no overflow risk, uint64_t otherwise.
const size_t intra_pred_mode_size = 4 * mb_w * sizeof(uint8_t); const size_t intra_pred_mode_size = 4 * mb_w * sizeof(uint8_t);
const size_t top_size = sizeof(VP8TopSamples) * mb_w; const size_t top_size = sizeof(VP8TopSamples) * mb_w;
const size_t mb_info_size = (mb_w + 1) * sizeof(VP8MB); const size_t mb_info_size = (mb_w + 1) * sizeof(VP8MB);
const size_t f_info_size = const size_t f_info_size =
(dec->filter_type > 0) ? (dec->filter_type_ > 0) ?
mb_w * (dec->mt_method > 0 ? 2 : 1) * sizeof(VP8FInfo) mb_w * (dec->mt_method_ > 0 ? 2 : 1) * sizeof(VP8FInfo)
: 0; : 0;
const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b); const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b_);
const size_t mb_data_size = const size_t mb_data_size =
(dec->mt_method == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data); (dec->mt_method_ == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data_);
const size_t cache_height = (16 * num_caches const size_t cache_height = (16 * num_caches
+ kFilterExtraRows[dec->filter_type]) * 3 / 2; + kFilterExtraRows[dec->filter_type_]) * 3 / 2;
const size_t cache_size = top_size * cache_height; const size_t cache_size = top_size * cache_height;
// alpha_size is the only one that scales as width x height. // alpha_size is the only one that scales as width x height.
const uint64_t alpha_size = (dec->alpha_data != NULL) ? const uint64_t alpha_size = (dec->alpha_data_ != NULL) ?
(uint64_t)dec->pic_hdr.width * dec->pic_hdr.height : 0ULL; (uint64_t)dec->pic_hdr_.width_ * dec->pic_hdr_.height_ : 0ULL;
const uint64_t needed = (uint64_t)intra_pred_mode_size const uint64_t needed = (uint64_t)intra_pred_mode_size
+ top_size + mb_info_size + f_info_size + top_size + mb_info_size + f_info_size
+ yuv_size + mb_data_size + yuv_size + mb_data_size
@ -706,77 +706,77 @@ static int AllocateMemory(VP8Decoder* const dec) {
uint8_t* mem; uint8_t* mem;
if (!CheckSizeOverflow(needed)) return 0; // check for overflow if (!CheckSizeOverflow(needed)) return 0; // check for overflow
if (needed > dec->mem_size) { if (needed > dec->mem_size_) {
WebPSafeFree(dec->mem); WebPSafeFree(dec->mem_);
dec->mem_size = 0; dec->mem_size_ = 0;
dec->mem = WebPSafeMalloc(needed, sizeof(uint8_t)); dec->mem_ = WebPSafeMalloc(needed, sizeof(uint8_t));
if (dec->mem == NULL) { if (dec->mem_ == NULL) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"no memory during frame initialization."); "no memory during frame initialization.");
} }
// down-cast is ok, thanks to WebPSafeMalloc() above. // down-cast is ok, thanks to WebPSafeMalloc() above.
dec->mem_size = (size_t)needed; dec->mem_size_ = (size_t)needed;
} }
mem = (uint8_t*)dec->mem; mem = (uint8_t*)dec->mem_;
dec->intra_t = mem; dec->intra_t_ = mem;
mem += intra_pred_mode_size; mem += intra_pred_mode_size;
dec->yuv_t = (VP8TopSamples*)mem; dec->yuv_t_ = (VP8TopSamples*)mem;
mem += top_size; mem += top_size;
dec->mb_info = ((VP8MB*)mem) + 1; dec->mb_info_ = ((VP8MB*)mem) + 1;
mem += mb_info_size; mem += mb_info_size;
dec->f_info = f_info_size ? (VP8FInfo*)mem : NULL; dec->f_info_ = f_info_size ? (VP8FInfo*)mem : NULL;
mem += f_info_size; mem += f_info_size;
dec->thread_ctx.id = 0; dec->thread_ctx_.id_ = 0;
dec->thread_ctx.f_info = dec->f_info; dec->thread_ctx_.f_info_ = dec->f_info_;
if (dec->filter_type > 0 && dec->mt_method > 0) { if (dec->filter_type_ > 0 && dec->mt_method_ > 0) {
// secondary cache line. The deblocking process need to make use of the // secondary cache line. The deblocking process need to make use of the
// filtering strength from previous macroblock row, while the new ones // filtering strength from previous macroblock row, while the new ones
// are being decoded in parallel. We'll just swap the pointers. // are being decoded in parallel. We'll just swap the pointers.
dec->thread_ctx.f_info += mb_w; dec->thread_ctx_.f_info_ += mb_w;
} }
mem = (uint8_t*)WEBP_ALIGN(mem); mem = (uint8_t*)WEBP_ALIGN(mem);
assert((yuv_size & WEBP_ALIGN_CST) == 0); assert((yuv_size & WEBP_ALIGN_CST) == 0);
dec->yuv_b = mem; dec->yuv_b_ = mem;
mem += yuv_size; mem += yuv_size;
dec->mb_data = (VP8MBData*)mem; dec->mb_data_ = (VP8MBData*)mem;
dec->thread_ctx.mb_data = (VP8MBData*)mem; dec->thread_ctx_.mb_data_ = (VP8MBData*)mem;
if (dec->mt_method == 2) { if (dec->mt_method_ == 2) {
dec->thread_ctx.mb_data += mb_w; dec->thread_ctx_.mb_data_ += mb_w;
} }
mem += mb_data_size; mem += mb_data_size;
dec->cache_y_stride = 16 * mb_w; dec->cache_y_stride_ = 16 * mb_w;
dec->cache_uv_stride = 8 * mb_w; dec->cache_uv_stride_ = 8 * mb_w;
{ {
const int extra_rows = kFilterExtraRows[dec->filter_type]; const int extra_rows = kFilterExtraRows[dec->filter_type_];
const int extra_y = extra_rows * dec->cache_y_stride; const int extra_y = extra_rows * dec->cache_y_stride_;
const int extra_uv = (extra_rows / 2) * dec->cache_uv_stride; const int extra_uv = (extra_rows / 2) * dec->cache_uv_stride_;
dec->cache_y = mem + extra_y; dec->cache_y_ = mem + extra_y;
dec->cache_u = dec->cache_y dec->cache_u_ = dec->cache_y_
+ 16 * num_caches * dec->cache_y_stride + extra_uv; + 16 * num_caches * dec->cache_y_stride_ + extra_uv;
dec->cache_v = dec->cache_u dec->cache_v_ = dec->cache_u_
+ 8 * num_caches * dec->cache_uv_stride + extra_uv; + 8 * num_caches * dec->cache_uv_stride_ + extra_uv;
dec->cache_id = 0; dec->cache_id_ = 0;
} }
mem += cache_size; mem += cache_size;
// alpha plane // alpha plane
dec->alpha_plane = alpha_size ? mem : NULL; dec->alpha_plane_ = alpha_size ? mem : NULL;
mem += alpha_size; mem += alpha_size;
assert(mem <= (uint8_t*)dec->mem + dec->mem_size); assert(mem <= (uint8_t*)dec->mem_ + dec->mem_size_);
// note: left/top-info is initialized once for all. // note: left/top-info is initialized once for all.
memset(dec->mb_info - 1, 0, mb_info_size); memset(dec->mb_info_ - 1, 0, mb_info_size);
VP8InitScanline(dec); // initialize left too. VP8InitScanline(dec); // initialize left too.
// initialize top // initialize top
memset(dec->intra_t, B_DC_PRED, intra_pred_mode_size); memset(dec->intra_t_, B_DC_PRED, intra_pred_mode_size);
return 1; return 1;
} }
@ -784,16 +784,16 @@ static int AllocateMemory(VP8Decoder* const dec) {
static void InitIo(VP8Decoder* const dec, VP8Io* io) { static void InitIo(VP8Decoder* const dec, VP8Io* io) {
// prepare 'io' // prepare 'io'
io->mb_y = 0; io->mb_y = 0;
io->y = dec->cache_y; io->y = dec->cache_y_;
io->u = dec->cache_u; io->u = dec->cache_u_;
io->v = dec->cache_v; io->v = dec->cache_v_;
io->y_stride = dec->cache_y_stride; io->y_stride = dec->cache_y_stride_;
io->uv_stride = dec->cache_uv_stride; io->uv_stride = dec->cache_uv_stride_;
io->a = NULL; io->a = NULL;
} }
int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io) { int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io) {
if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches. if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches_.
if (!AllocateMemory(dec)) return 0; if (!AllocateMemory(dec)) return 0;
InitIo(dec, io); InitIo(dec, io);
VP8DspInit(); // Init critical function pointers and look-up tables. VP8DspInit(); // Init critical function pointers and look-up tables.

File diff suppressed because it is too large Load Diff

View File

@ -12,9 +12,7 @@
// Author: Skal (pascal.massimino@gmail.com) // Author: Skal (pascal.massimino@gmail.com)
#include <assert.h> #include <assert.h>
#include <stddef.h>
#include <stdlib.h> #include <stdlib.h>
#include "src/dec/vp8i_dec.h" #include "src/dec/vp8i_dec.h"
#include "src/dec/webpi_dec.h" #include "src/dec/webpi_dec.h"
#include "src/dsp/dsp.h" #include "src/dsp/dsp.h"
@ -27,9 +25,9 @@
static int EmitYUV(const VP8Io* const io, WebPDecParams* const p) { static int EmitYUV(const VP8Io* const io, WebPDecParams* const p) {
WebPDecBuffer* output = p->output; WebPDecBuffer* output = p->output;
const WebPYUVABuffer* const buf = &output->u.YUVA; const WebPYUVABuffer* const buf = &output->u.YUVA;
uint8_t* const y_dst = buf->y + (ptrdiff_t)io->mb_y * buf->y_stride; uint8_t* const y_dst = buf->y + (size_t)io->mb_y * buf->y_stride;
uint8_t* const u_dst = buf->u + (ptrdiff_t)(io->mb_y >> 1) * buf->u_stride; uint8_t* const u_dst = buf->u + (size_t)(io->mb_y >> 1) * buf->u_stride;
uint8_t* const v_dst = buf->v + (ptrdiff_t)(io->mb_y >> 1) * buf->v_stride; uint8_t* const v_dst = buf->v + (size_t)(io->mb_y >> 1) * buf->v_stride;
const int mb_w = io->mb_w; const int mb_w = io->mb_w;
const int mb_h = io->mb_h; const int mb_h = io->mb_h;
const int uv_w = (mb_w + 1) / 2; const int uv_w = (mb_w + 1) / 2;
@ -44,7 +42,7 @@ static int EmitYUV(const VP8Io* const io, WebPDecParams* const p) {
static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) { static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) {
WebPDecBuffer* const output = p->output; WebPDecBuffer* const output = p->output;
WebPRGBABuffer* const buf = &output->u.RGBA; WebPRGBABuffer* const buf = &output->u.RGBA;
uint8_t* const dst = buf->rgba + (ptrdiff_t)io->mb_y * buf->stride; uint8_t* const dst = buf->rgba + (size_t)io->mb_y * buf->stride;
WebPSamplerProcessPlane(io->y, io->y_stride, WebPSamplerProcessPlane(io->y, io->y_stride,
io->u, io->v, io->uv_stride, io->u, io->v, io->uv_stride,
dst, buf->stride, io->mb_w, io->mb_h, dst, buf->stride, io->mb_w, io->mb_h,
@ -59,7 +57,7 @@ static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) {
static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) { static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
int num_lines_out = io->mb_h; // a priori guess int num_lines_out = io->mb_h; // a priori guess
const WebPRGBABuffer* const buf = &p->output->u.RGBA; const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* dst = buf->rgba + (ptrdiff_t)io->mb_y * buf->stride; uint8_t* dst = buf->rgba + (size_t)io->mb_y * buf->stride;
WebPUpsampleLinePairFunc upsample = WebPUpsamplers[p->output->colorspace]; WebPUpsampleLinePairFunc upsample = WebPUpsamplers[p->output->colorspace];
const uint8_t* cur_y = io->y; const uint8_t* cur_y = io->y;
const uint8_t* cur_u = io->u; const uint8_t* cur_u = io->u;
@ -130,7 +128,7 @@ static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
const WebPYUVABuffer* const buf = &p->output->u.YUVA; const WebPYUVABuffer* const buf = &p->output->u.YUVA;
const int mb_w = io->mb_w; const int mb_w = io->mb_w;
const int mb_h = io->mb_h; const int mb_h = io->mb_h;
uint8_t* dst = buf->a + (ptrdiff_t)io->mb_y * buf->a_stride; uint8_t* dst = buf->a + (size_t)io->mb_y * buf->a_stride;
int j; int j;
(void)expected_num_lines_out; (void)expected_num_lines_out;
assert(expected_num_lines_out == mb_h); assert(expected_num_lines_out == mb_h);
@ -183,8 +181,8 @@ static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
(colorspace == MODE_ARGB || colorspace == MODE_Argb); (colorspace == MODE_ARGB || colorspace == MODE_Argb);
const WebPRGBABuffer* const buf = &p->output->u.RGBA; const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows; int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows); const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)start_y * buf->stride; uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
uint8_t* const dst = base_rgba + (alpha_first ? 0 : 3); uint8_t* const dst = base_rgba + (alpha_first ? 0 : 3);
const int has_alpha = WebPDispatchAlpha(alpha, io->width, mb_w, const int has_alpha = WebPDispatchAlpha(alpha, io->width, mb_w,
num_rows, dst, buf->stride); num_rows, dst, buf->stride);
@ -207,8 +205,8 @@ static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p,
const WEBP_CSP_MODE colorspace = p->output->colorspace; const WEBP_CSP_MODE colorspace = p->output->colorspace;
const WebPRGBABuffer* const buf = &p->output->u.RGBA; const WebPRGBABuffer* const buf = &p->output->u.RGBA;
int num_rows; int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows); const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)start_y * buf->stride; uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
#if (WEBP_SWAP_16BIT_CSP == 1) #if (WEBP_SWAP_16BIT_CSP == 1)
uint8_t* alpha_dst = base_rgba; uint8_t* alpha_dst = base_rgba;
#else #else
@ -259,7 +257,7 @@ static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) {
if (WebPIsAlphaMode(p->output->colorspace) && io->a != NULL) { if (WebPIsAlphaMode(p->output->colorspace) && io->a != NULL) {
// Before rescaling, we premultiply the luma directly into the io->y // Before rescaling, we premultiply the luma directly into the io->y
// internal buffer. This is OK since these samples are not used for // internal buffer. This is OK since these samples are not used for
// intra-prediction (the top samples are saved in cache_y/u/v). // intra-prediction (the top samples are saved in cache_y_/u_/v_).
// But we need to cast the const away, though. // But we need to cast the const away, though.
WebPMultRows((uint8_t*)io->y, io->y_stride, WebPMultRows((uint8_t*)io->y, io->y_stride,
io->a, io->width, io->mb_w, mb_h, 0); io->a, io->width, io->mb_w, mb_h, 0);
@ -273,9 +271,9 @@ static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) {
static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p, static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) { int expected_num_lines_out) {
const WebPYUVABuffer* const buf = &p->output->u.YUVA; const WebPYUVABuffer* const buf = &p->output->u.YUVA;
uint8_t* const dst_a = buf->a + (ptrdiff_t)p->last_y * buf->a_stride; uint8_t* const dst_a = buf->a + (size_t)p->last_y * buf->a_stride;
if (io->a != NULL) { if (io->a != NULL) {
uint8_t* const dst_y = buf->y + (ptrdiff_t)p->last_y * buf->y_stride; uint8_t* const dst_y = buf->y + (size_t)p->last_y * buf->y_stride;
const int num_lines_out = Rescale(io->a, io->width, io->mb_h, p->scaler_a); const int num_lines_out = Rescale(io->a, io->width, io->mb_h, p->scaler_a);
assert(expected_num_lines_out == num_lines_out); assert(expected_num_lines_out == num_lines_out);
if (num_lines_out > 0) { // unmultiply the Y if (num_lines_out > 0) { // unmultiply the Y
@ -364,7 +362,7 @@ static int ExportRGB(WebPDecParams* const p, int y_pos) {
const WebPYUV444Converter convert = const WebPYUV444Converter convert =
WebPYUV444Converters[p->output->colorspace]; WebPYUV444Converters[p->output->colorspace];
const WebPRGBABuffer* const buf = &p->output->u.RGBA; const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* dst = buf->rgba + (ptrdiff_t)y_pos * buf->stride; uint8_t* dst = buf->rgba + (size_t)y_pos * buf->stride;
int num_lines_out = 0; int num_lines_out = 0;
// For RGB rescaling, because of the YUV420, current scan position // For RGB rescaling, because of the YUV420, current scan position
// U/V can be +1/-1 line from the Y one. Hence the double test. // U/V can be +1/-1 line from the Y one. Hence the double test.
@ -391,14 +389,14 @@ static int EmitRescaledRGB(const VP8Io* const io, WebPDecParams* const p) {
while (j < mb_h) { while (j < mb_h) {
const int y_lines_in = const int y_lines_in =
WebPRescalerImport(p->scaler_y, mb_h - j, WebPRescalerImport(p->scaler_y, mb_h - j,
io->y + (ptrdiff_t)j * io->y_stride, io->y_stride); io->y + (size_t)j * io->y_stride, io->y_stride);
j += y_lines_in; j += y_lines_in;
if (WebPRescaleNeededLines(p->scaler_u, uv_mb_h - uv_j)) { if (WebPRescaleNeededLines(p->scaler_u, uv_mb_h - uv_j)) {
const int u_lines_in = WebPRescalerImport( const int u_lines_in = WebPRescalerImport(
p->scaler_u, uv_mb_h - uv_j, io->u + (ptrdiff_t)uv_j * io->uv_stride, p->scaler_u, uv_mb_h - uv_j, io->u + (size_t)uv_j * io->uv_stride,
io->uv_stride); io->uv_stride);
const int v_lines_in = WebPRescalerImport( const int v_lines_in = WebPRescalerImport(
p->scaler_v, uv_mb_h - uv_j, io->v + (ptrdiff_t)uv_j * io->uv_stride, p->scaler_v, uv_mb_h - uv_j, io->v + (size_t)uv_j * io->uv_stride,
io->uv_stride); io->uv_stride);
(void)v_lines_in; // remove a gcc warning (void)v_lines_in; // remove a gcc warning
assert(u_lines_in == v_lines_in); assert(u_lines_in == v_lines_in);
@ -411,7 +409,7 @@ static int EmitRescaledRGB(const VP8Io* const io, WebPDecParams* const p) {
static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) { static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) {
const WebPRGBABuffer* const buf = &p->output->u.RGBA; const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)y_pos * buf->stride; uint8_t* const base_rgba = buf->rgba + (size_t)y_pos * buf->stride;
const WEBP_CSP_MODE colorspace = p->output->colorspace; const WEBP_CSP_MODE colorspace = p->output->colorspace;
const int alpha_first = const int alpha_first =
(colorspace == MODE_ARGB || colorspace == MODE_Argb); (colorspace == MODE_ARGB || colorspace == MODE_Argb);
@ -439,7 +437,7 @@ static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) {
static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos, static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos,
int max_lines_out) { int max_lines_out) {
const WebPRGBABuffer* const buf = &p->output->u.RGBA; const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* const base_rgba = buf->rgba + (ptrdiff_t)y_pos * buf->stride; uint8_t* const base_rgba = buf->rgba + (size_t)y_pos * buf->stride;
#if (WEBP_SWAP_16BIT_CSP == 1) #if (WEBP_SWAP_16BIT_CSP == 1)
uint8_t* alpha_dst = base_rgba; uint8_t* alpha_dst = base_rgba;
#else #else
@ -478,7 +476,7 @@ static int EmitRescaledAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
int lines_left = expected_num_out_lines; int lines_left = expected_num_out_lines;
const int y_end = p->last_y + lines_left; const int y_end = p->last_y + lines_left;
while (lines_left > 0) { while (lines_left > 0) {
const int64_t row_offset = (ptrdiff_t)scaler->src_y - io->mb_y; const int64_t row_offset = (int64_t)scaler->src_y - io->mb_y;
WebPRescalerImport(scaler, io->mb_h + io->mb_y - scaler->src_y, WebPRescalerImport(scaler, io->mb_h + io->mb_y - scaler->src_y,
io->a + row_offset * io->width, io->width); io->a + row_offset * io->width, io->width);
lines_left -= p->emit_alpha_row(p, y_end - lines_left, lines_left); lines_left -= p->emit_alpha_row(p, y_end - lines_left, lines_left);

View File

@ -60,7 +60,7 @@ static const uint16_t kAcTable[128] = {
// Paragraph 9.6 // Paragraph 9.6
void VP8ParseQuant(VP8Decoder* const dec) { void VP8ParseQuant(VP8Decoder* const dec) {
VP8BitReader* const br = &dec->br; VP8BitReader* const br = &dec->br_;
const int base_q0 = VP8GetValue(br, 7, "global-header"); const int base_q0 = VP8GetValue(br, 7, "global-header");
const int dqy1_dc = VP8Get(br, "global-header") ? const int dqy1_dc = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 4, "global-header") : 0; VP8GetSignedValue(br, 4, "global-header") : 0;
@ -73,40 +73,40 @@ void VP8ParseQuant(VP8Decoder* const dec) {
const int dquv_ac = VP8Get(br, "global-header") ? const int dquv_ac = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 4, "global-header") : 0; VP8GetSignedValue(br, 4, "global-header") : 0;
const VP8SegmentHeader* const hdr = &dec->segment_hdr; const VP8SegmentHeader* const hdr = &dec->segment_hdr_;
int i; int i;
for (i = 0; i < NUM_MB_SEGMENTS; ++i) { for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
int q; int q;
if (hdr->use_segment) { if (hdr->use_segment_) {
q = hdr->quantizer[i]; q = hdr->quantizer_[i];
if (!hdr->absolute_delta) { if (!hdr->absolute_delta_) {
q += base_q0; q += base_q0;
} }
} else { } else {
if (i > 0) { if (i > 0) {
dec->dqm[i] = dec->dqm[0]; dec->dqm_[i] = dec->dqm_[0];
continue; continue;
} else { } else {
q = base_q0; q = base_q0;
} }
} }
{ {
VP8QuantMatrix* const m = &dec->dqm[i]; VP8QuantMatrix* const m = &dec->dqm_[i];
m->y1_mat[0] = kDcTable[clip(q + dqy1_dc, 127)]; m->y1_mat_[0] = kDcTable[clip(q + dqy1_dc, 127)];
m->y1_mat[1] = kAcTable[clip(q + 0, 127)]; m->y1_mat_[1] = kAcTable[clip(q + 0, 127)];
m->y2_mat[0] = kDcTable[clip(q + dqy2_dc, 127)] * 2; m->y2_mat_[0] = kDcTable[clip(q + dqy2_dc, 127)] * 2;
// For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16. // For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16.
// The smallest precision for that is '(x*6349) >> 12' but 16 is a good // The smallest precision for that is '(x*6349) >> 12' but 16 is a good
// word size. // word size.
m->y2_mat[1] = (kAcTable[clip(q + dqy2_ac, 127)] * 101581) >> 16; m->y2_mat_[1] = (kAcTable[clip(q + dqy2_ac, 127)] * 101581) >> 16;
if (m->y2_mat[1] < 8) m->y2_mat[1] = 8; if (m->y2_mat_[1] < 8) m->y2_mat_[1] = 8;
m->uv_mat[0] = kDcTable[clip(q + dquv_dc, 117)]; m->uv_mat_[0] = kDcTable[clip(q + dquv_dc, 117)];
m->uv_mat[1] = kAcTable[clip(q + dquv_ac, 127)]; m->uv_mat_[1] = kAcTable[clip(q + dquv_ac, 127)];
m->uv_quant = q + dquv_ac; // for dithering strength evaluation m->uv_quant_ = q + dquv_ac; // for dithering strength evaluation
} }
} }
} }

View File

@ -16,8 +16,7 @@
#include "src/utils/bit_reader_inl_utils.h" #include "src/utils/bit_reader_inl_utils.h"
#if !defined(USE_GENERIC_TREE) #if !defined(USE_GENERIC_TREE)
#if !defined(__arm__) && !defined(_M_ARM) && !WEBP_AARCH64 && \ #if !defined(__arm__) && !defined(_M_ARM) && !WEBP_AARCH64
!defined(__wasm__)
// using a table is ~1-2% slower on ARM. Prefer the coded-tree approach then. // using a table is ~1-2% slower on ARM. Prefer the coded-tree approach then.
#define USE_GENERIC_TREE 1 // ALTERNATE_CODE #define USE_GENERIC_TREE 1 // ALTERNATE_CODE
#else #else
@ -284,40 +283,40 @@ static const uint8_t kBModesProba[NUM_BMODES][NUM_BMODES][NUM_BMODES - 1] = {
}; };
void VP8ResetProba(VP8Proba* const proba) { void VP8ResetProba(VP8Proba* const proba) {
memset(proba->segments, 255u, sizeof(proba->segments)); memset(proba->segments_, 255u, sizeof(proba->segments_));
// proba->bands[][] is initialized later // proba->bands_[][] is initialized later
} }
static void ParseIntraMode(VP8BitReader* const br, static void ParseIntraMode(VP8BitReader* const br,
VP8Decoder* const dec, int mb_x) { VP8Decoder* const dec, int mb_x) {
uint8_t* const top = dec->intra_t + 4 * mb_x; uint8_t* const top = dec->intra_t_ + 4 * mb_x;
uint8_t* const left = dec->intra_l; uint8_t* const left = dec->intra_l_;
VP8MBData* const block = dec->mb_data + mb_x; VP8MBData* const block = dec->mb_data_ + mb_x;
// Note: we don't save segment map (yet), as we don't expect // Note: we don't save segment map (yet), as we don't expect
// to decode more than 1 keyframe. // to decode more than 1 keyframe.
if (dec->segment_hdr.update_map) { if (dec->segment_hdr_.update_map_) {
// Hardcoded tree parsing // Hardcoded tree parsing
block->segment = !VP8GetBit(br, dec->proba.segments[0], "segments") block->segment_ = !VP8GetBit(br, dec->proba_.segments_[0], "segments")
? VP8GetBit(br, dec->proba.segments[1], "segments") ? VP8GetBit(br, dec->proba_.segments_[1], "segments")
: VP8GetBit(br, dec->proba.segments[2], "segments") + 2; : VP8GetBit(br, dec->proba_.segments_[2], "segments") + 2;
} else { } else {
block->segment = 0; // default for intra block->segment_ = 0; // default for intra
} }
if (dec->use_skip_proba) block->skip = VP8GetBit(br, dec->skip_p, "skip"); if (dec->use_skip_proba_) block->skip_ = VP8GetBit(br, dec->skip_p_, "skip");
block->is_i4x4 = !VP8GetBit(br, 145, "block-size"); block->is_i4x4_ = !VP8GetBit(br, 145, "block-size");
if (!block->is_i4x4) { if (!block->is_i4x4_) {
// Hardcoded 16x16 intra-mode decision tree. // Hardcoded 16x16 intra-mode decision tree.
const int ymode = const int ymode =
VP8GetBit(br, 156, "pred-modes") ? VP8GetBit(br, 156, "pred-modes") ?
(VP8GetBit(br, 128, "pred-modes") ? TM_PRED : H_PRED) : (VP8GetBit(br, 128, "pred-modes") ? TM_PRED : H_PRED) :
(VP8GetBit(br, 163, "pred-modes") ? V_PRED : DC_PRED); (VP8GetBit(br, 163, "pred-modes") ? V_PRED : DC_PRED);
block->imodes[0] = ymode; block->imodes_[0] = ymode;
memset(top, ymode, 4 * sizeof(*top)); memset(top, ymode, 4 * sizeof(*top));
memset(left, ymode, 4 * sizeof(*left)); memset(left, ymode, 4 * sizeof(*left));
} else { } else {
uint8_t* modes = block->imodes; uint8_t* modes = block->imodes_;
int y; int y;
for (y = 0; y < 4; ++y) { for (y = 0; y < 4; ++y) {
int ymode = left[y]; int ymode = left[y];
@ -354,17 +353,17 @@ static void ParseIntraMode(VP8BitReader* const br,
} }
} }
// Hardcoded UVMode decision tree // Hardcoded UVMode decision tree
block->uvmode = !VP8GetBit(br, 142, "pred-modes-uv") ? DC_PRED block->uvmode_ = !VP8GetBit(br, 142, "pred-modes-uv") ? DC_PRED
: !VP8GetBit(br, 114, "pred-modes-uv") ? V_PRED : !VP8GetBit(br, 114, "pred-modes-uv") ? V_PRED
: VP8GetBit(br, 183, "pred-modes-uv") ? TM_PRED : H_PRED; : VP8GetBit(br, 183, "pred-modes-uv") ? TM_PRED : H_PRED;
} }
int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec) { int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec) {
int mb_x; int mb_x;
for (mb_x = 0; mb_x < dec->mb_w; ++mb_x) { for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
ParseIntraMode(br, dec, mb_x); ParseIntraMode(br, dec, mb_x);
} }
return !dec->br.eof; return !dec->br_.eof_;
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -514,7 +513,7 @@ static const uint8_t kBands[16 + 1] = {
}; };
void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) { void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
VP8Proba* const proba = &dec->proba; VP8Proba* const proba = &dec->proba_;
int t, b, c, p; int t, b, c, p;
for (t = 0; t < NUM_TYPES; ++t) { for (t = 0; t < NUM_TYPES; ++t) {
for (b = 0; b < NUM_BANDS; ++b) { for (b = 0; b < NUM_BANDS; ++b) {
@ -524,16 +523,16 @@ void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
VP8GetBit(br, CoeffsUpdateProba[t][b][c][p], "global-header") ? VP8GetBit(br, CoeffsUpdateProba[t][b][c][p], "global-header") ?
VP8GetValue(br, 8, "global-header") : VP8GetValue(br, 8, "global-header") :
CoeffsProba0[t][b][c][p]; CoeffsProba0[t][b][c][p];
proba->bands[t][b].probas[c][p] = v; proba->bands_[t][b].probas_[c][p] = v;
} }
} }
} }
for (b = 0; b < 16 + 1; ++b) { for (b = 0; b < 16 + 1; ++b) {
proba->bands_ptr[t][b] = &proba->bands[t][kBands[b]]; proba->bands_ptr_[t][b] = &proba->bands_[t][kBands[b]];
} }
} }
dec->use_skip_proba = VP8Get(br, "global-header"); dec->use_skip_proba_ = VP8Get(br, "global-header");
if (dec->use_skip_proba) { if (dec->use_skip_proba_) {
dec->skip_p = VP8GetValue(br, 8, "global-header"); dec->skip_p_ = VP8GetValue(br, 8, "global-header");
} }
} }

View File

@ -40,8 +40,8 @@ static void InitGetCoeffs(void);
// VP8Decoder // VP8Decoder
static void SetOk(VP8Decoder* const dec) { static void SetOk(VP8Decoder* const dec) {
dec->status = VP8_STATUS_OK; dec->status_ = VP8_STATUS_OK;
dec->error_msg = "OK"; dec->error_msg_ = "OK";
} }
int VP8InitIoInternal(VP8Io* const io, int version) { int VP8InitIoInternal(VP8Io* const io, int version) {
@ -58,9 +58,9 @@ VP8Decoder* VP8New(void) {
VP8Decoder* const dec = (VP8Decoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); VP8Decoder* const dec = (VP8Decoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
if (dec != NULL) { if (dec != NULL) {
SetOk(dec); SetOk(dec);
WebPGetWorkerInterface()->Init(&dec->worker); WebPGetWorkerInterface()->Init(&dec->worker_);
dec->ready = 0; dec->ready_ = 0;
dec->num_parts_minus_one = 0; dec->num_parts_minus_one_ = 0;
InitGetCoeffs(); InitGetCoeffs();
} }
return dec; return dec;
@ -68,13 +68,13 @@ VP8Decoder* VP8New(void) {
VP8StatusCode VP8Status(VP8Decoder* const dec) { VP8StatusCode VP8Status(VP8Decoder* const dec) {
if (!dec) return VP8_STATUS_INVALID_PARAM; if (!dec) return VP8_STATUS_INVALID_PARAM;
return dec->status; return dec->status_;
} }
const char* VP8StatusMessage(VP8Decoder* const dec) { const char* VP8StatusMessage(VP8Decoder* const dec) {
if (dec == NULL) return "no object"; if (dec == NULL) return "no object";
if (!dec->error_msg) return "OK"; if (!dec->error_msg_) return "OK";
return dec->error_msg; return dec->error_msg_;
} }
void VP8Delete(VP8Decoder* const dec) { void VP8Delete(VP8Decoder* const dec) {
@ -86,13 +86,11 @@ void VP8Delete(VP8Decoder* const dec) {
int VP8SetError(VP8Decoder* const dec, int VP8SetError(VP8Decoder* const dec,
VP8StatusCode error, const char* const msg) { VP8StatusCode error, const char* const msg) {
// VP8_STATUS_SUSPENDED is only meaningful in incremental decoding.
assert(dec->incremental || error != VP8_STATUS_SUSPENDED);
// The oldest error reported takes precedence over the new one. // The oldest error reported takes precedence over the new one.
if (dec->status == VP8_STATUS_OK) { if (dec->status_ == VP8_STATUS_OK) {
dec->status = error; dec->status_ = error;
dec->error_msg = msg; dec->error_msg_ = msg;
dec->ready = 0; dec->ready_ = 0;
} }
return 0; return 0;
} }
@ -151,11 +149,11 @@ int VP8GetInfo(const uint8_t* data, size_t data_size, size_t chunk_size,
static void ResetSegmentHeader(VP8SegmentHeader* const hdr) { static void ResetSegmentHeader(VP8SegmentHeader* const hdr) {
assert(hdr != NULL); assert(hdr != NULL);
hdr->use_segment = 0; hdr->use_segment_ = 0;
hdr->update_map = 0; hdr->update_map_ = 0;
hdr->absolute_delta = 1; hdr->absolute_delta_ = 1;
memset(hdr->quantizer, 0, sizeof(hdr->quantizer)); memset(hdr->quantizer_, 0, sizeof(hdr->quantizer_));
memset(hdr->filter_strength, 0, sizeof(hdr->filter_strength)); memset(hdr->filter_strength_, 0, sizeof(hdr->filter_strength_));
} }
// Paragraph 9.3 // Paragraph 9.3
@ -163,46 +161,46 @@ static int ParseSegmentHeader(VP8BitReader* br,
VP8SegmentHeader* hdr, VP8Proba* proba) { VP8SegmentHeader* hdr, VP8Proba* proba) {
assert(br != NULL); assert(br != NULL);
assert(hdr != NULL); assert(hdr != NULL);
hdr->use_segment = VP8Get(br, "global-header"); hdr->use_segment_ = VP8Get(br, "global-header");
if (hdr->use_segment) { if (hdr->use_segment_) {
hdr->update_map = VP8Get(br, "global-header"); hdr->update_map_ = VP8Get(br, "global-header");
if (VP8Get(br, "global-header")) { // update data if (VP8Get(br, "global-header")) { // update data
int s; int s;
hdr->absolute_delta = VP8Get(br, "global-header"); hdr->absolute_delta_ = VP8Get(br, "global-header");
for (s = 0; s < NUM_MB_SEGMENTS; ++s) { for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
hdr->quantizer[s] = VP8Get(br, "global-header") ? hdr->quantizer_[s] = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 7, "global-header") : 0; VP8GetSignedValue(br, 7, "global-header") : 0;
} }
for (s = 0; s < NUM_MB_SEGMENTS; ++s) { for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
hdr->filter_strength[s] = VP8Get(br, "global-header") ? hdr->filter_strength_[s] = VP8Get(br, "global-header") ?
VP8GetSignedValue(br, 6, "global-header") : 0; VP8GetSignedValue(br, 6, "global-header") : 0;
} }
} }
if (hdr->update_map) { if (hdr->update_map_) {
int s; int s;
for (s = 0; s < MB_FEATURE_TREE_PROBS; ++s) { for (s = 0; s < MB_FEATURE_TREE_PROBS; ++s) {
proba->segments[s] = VP8Get(br, "global-header") ? proba->segments_[s] = VP8Get(br, "global-header") ?
VP8GetValue(br, 8, "global-header") : 255u; VP8GetValue(br, 8, "global-header") : 255u;
} }
} }
} else { } else {
hdr->update_map = 0; hdr->update_map_ = 0;
} }
return !br->eof; return !br->eof_;
} }
// Paragraph 9.5 // Paragraph 9.5
// If we don't have all the necessary data in 'buf', this function returns // This function returns VP8_STATUS_SUSPENDED if we don't have all the
// VP8_STATUS_SUSPENDED in incremental decoding, VP8_STATUS_NOT_ENOUGH_DATA // necessary data in 'buf'.
// otherwise. // This case is not necessarily an error (for incremental decoding).
// In incremental decoding, this case is not necessarily an error. Still, no // Still, no bitreader is ever initialized to make it possible to read
// bitreader is ever initialized to make it possible to read unavailable memory. // unavailable memory.
// If we don't even have the partitions' sizes, then VP8_STATUS_NOT_ENOUGH_DATA // If we don't even have the partitions' sizes, than VP8_STATUS_NOT_ENOUGH_DATA
// is returned, and this is an unrecoverable error. // is returned, and this is an unrecoverable error.
// If the partitions were positioned ok, VP8_STATUS_OK is returned. // If the partitions were positioned ok, VP8_STATUS_OK is returned.
static VP8StatusCode ParsePartitions(VP8Decoder* const dec, static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
const uint8_t* buf, size_t size) { const uint8_t* buf, size_t size) {
VP8BitReader* const br = &dec->br; VP8BitReader* const br = &dec->br_;
const uint8_t* sz = buf; const uint8_t* sz = buf;
const uint8_t* buf_end = buf + size; const uint8_t* buf_end = buf + size;
const uint8_t* part_start; const uint8_t* part_start;
@ -210,8 +208,8 @@ static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
size_t last_part; size_t last_part;
size_t p; size_t p;
dec->num_parts_minus_one = (1 << VP8GetValue(br, 2, "global-header")) - 1; dec->num_parts_minus_one_ = (1 << VP8GetValue(br, 2, "global-header")) - 1;
last_part = dec->num_parts_minus_one; last_part = dec->num_parts_minus_one_;
if (size < 3 * last_part) { if (size < 3 * last_part) {
// we can't even read the sizes with sz[]! That's a failure. // we can't even read the sizes with sz[]! That's a failure.
return VP8_STATUS_NOT_ENOUGH_DATA; return VP8_STATUS_NOT_ENOUGH_DATA;
@ -221,42 +219,40 @@ static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
for (p = 0; p < last_part; ++p) { for (p = 0; p < last_part; ++p) {
size_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16); size_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16);
if (psize > size_left) psize = size_left; if (psize > size_left) psize = size_left;
VP8InitBitReader(dec->parts + p, part_start, psize); VP8InitBitReader(dec->parts_ + p, part_start, psize);
part_start += psize; part_start += psize;
size_left -= psize; size_left -= psize;
sz += 3; sz += 3;
} }
VP8InitBitReader(dec->parts + last_part, part_start, size_left); VP8InitBitReader(dec->parts_ + last_part, part_start, size_left);
if (part_start < buf_end) return VP8_STATUS_OK; return (part_start < buf_end) ? VP8_STATUS_OK :
return dec->incremental VP8_STATUS_SUSPENDED; // Init is ok, but there's not enough data
? VP8_STATUS_SUSPENDED // Init is ok, but there's not enough data
: VP8_STATUS_NOT_ENOUGH_DATA;
} }
// Paragraph 9.4 // Paragraph 9.4
static int ParseFilterHeader(VP8BitReader* br, VP8Decoder* const dec) { static int ParseFilterHeader(VP8BitReader* br, VP8Decoder* const dec) {
VP8FilterHeader* const hdr = &dec->filter_hdr; VP8FilterHeader* const hdr = &dec->filter_hdr_;
hdr->simple = VP8Get(br, "global-header"); hdr->simple_ = VP8Get(br, "global-header");
hdr->level = VP8GetValue(br, 6, "global-header"); hdr->level_ = VP8GetValue(br, 6, "global-header");
hdr->sharpness = VP8GetValue(br, 3, "global-header"); hdr->sharpness_ = VP8GetValue(br, 3, "global-header");
hdr->use_lf_delta = VP8Get(br, "global-header"); hdr->use_lf_delta_ = VP8Get(br, "global-header");
if (hdr->use_lf_delta) { if (hdr->use_lf_delta_) {
if (VP8Get(br, "global-header")) { // update lf-delta? if (VP8Get(br, "global-header")) { // update lf-delta?
int i; int i;
for (i = 0; i < NUM_REF_LF_DELTAS; ++i) { for (i = 0; i < NUM_REF_LF_DELTAS; ++i) {
if (VP8Get(br, "global-header")) { if (VP8Get(br, "global-header")) {
hdr->ref_lf_delta[i] = VP8GetSignedValue(br, 6, "global-header"); hdr->ref_lf_delta_[i] = VP8GetSignedValue(br, 6, "global-header");
} }
} }
for (i = 0; i < NUM_MODE_LF_DELTAS; ++i) { for (i = 0; i < NUM_MODE_LF_DELTAS; ++i) {
if (VP8Get(br, "global-header")) { if (VP8Get(br, "global-header")) {
hdr->mode_lf_delta[i] = VP8GetSignedValue(br, 6, "global-header"); hdr->mode_lf_delta_[i] = VP8GetSignedValue(br, 6, "global-header");
} }
} }
} }
} }
dec->filter_type = (hdr->level == 0) ? 0 : hdr->simple ? 1 : 2; dec->filter_type_ = (hdr->level_ == 0) ? 0 : hdr->simple_ ? 1 : 2;
return !br->eof; return !br->eof_;
} }
// Topmost call // Topmost call
@ -286,16 +282,16 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
// Paragraph 9.1 // Paragraph 9.1
{ {
const uint32_t bits = buf[0] | (buf[1] << 8) | (buf[2] << 16); const uint32_t bits = buf[0] | (buf[1] << 8) | (buf[2] << 16);
frm_hdr = &dec->frm_hdr; frm_hdr = &dec->frm_hdr_;
frm_hdr->key_frame = !(bits & 1); frm_hdr->key_frame_ = !(bits & 1);
frm_hdr->profile = (bits >> 1) & 7; frm_hdr->profile_ = (bits >> 1) & 7;
frm_hdr->show = (bits >> 4) & 1; frm_hdr->show_ = (bits >> 4) & 1;
frm_hdr->partition_length = (bits >> 5); frm_hdr->partition_length_ = (bits >> 5);
if (frm_hdr->profile > 3) { if (frm_hdr->profile_ > 3) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Incorrect keyframe parameters."); "Incorrect keyframe parameters.");
} }
if (!frm_hdr->show) { if (!frm_hdr->show_) {
return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE, return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
"Frame not displayable."); "Frame not displayable.");
} }
@ -303,8 +299,8 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
buf_size -= 3; buf_size -= 3;
} }
pic_hdr = &dec->pic_hdr; pic_hdr = &dec->pic_hdr_;
if (frm_hdr->key_frame) { if (frm_hdr->key_frame_) {
// Paragraph 9.2 // Paragraph 9.2
if (buf_size < 7) { if (buf_size < 7) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
@ -314,20 +310,20 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Bad code word"); "Bad code word");
} }
pic_hdr->width = ((buf[4] << 8) | buf[3]) & 0x3fff; pic_hdr->width_ = ((buf[4] << 8) | buf[3]) & 0x3fff;
pic_hdr->xscale = buf[4] >> 6; // ratio: 1, 5/4 5/3 or 2 pic_hdr->xscale_ = buf[4] >> 6; // ratio: 1, 5/4 5/3 or 2
pic_hdr->height = ((buf[6] << 8) | buf[5]) & 0x3fff; pic_hdr->height_ = ((buf[6] << 8) | buf[5]) & 0x3fff;
pic_hdr->yscale = buf[6] >> 6; pic_hdr->yscale_ = buf[6] >> 6;
buf += 7; buf += 7;
buf_size -= 7; buf_size -= 7;
dec->mb_w = (pic_hdr->width + 15) >> 4; dec->mb_w_ = (pic_hdr->width_ + 15) >> 4;
dec->mb_h = (pic_hdr->height + 15) >> 4; dec->mb_h_ = (pic_hdr->height_ + 15) >> 4;
// Setup default output area (can be later modified during io->setup()) // Setup default output area (can be later modified during io->setup())
io->width = pic_hdr->width; io->width = pic_hdr->width_;
io->height = pic_hdr->height; io->height = pic_hdr->height_;
// IMPORTANT! use some sane dimensions in crop* and scaled* fields. // IMPORTANT! use some sane dimensions in crop_* and scaled_* fields.
// So they can be used interchangeably without always testing for // So they can be used interchangeably without always testing for
// 'use_cropping'. // 'use_cropping'.
io->use_cropping = 0; io->use_cropping = 0;
@ -342,27 +338,27 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
io->mb_w = io->width; // for soundness io->mb_w = io->width; // for soundness
io->mb_h = io->height; // ditto io->mb_h = io->height; // ditto
VP8ResetProba(&dec->proba); VP8ResetProba(&dec->proba_);
ResetSegmentHeader(&dec->segment_hdr); ResetSegmentHeader(&dec->segment_hdr_);
} }
// Check if we have all the partition #0 available, and initialize dec->br // Check if we have all the partition #0 available, and initialize dec->br_
// to read this partition (and this partition only). // to read this partition (and this partition only).
if (frm_hdr->partition_length > buf_size) { if (frm_hdr->partition_length_ > buf_size) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"bad partition length"); "bad partition length");
} }
br = &dec->br; br = &dec->br_;
VP8InitBitReader(br, buf, frm_hdr->partition_length); VP8InitBitReader(br, buf, frm_hdr->partition_length_);
buf += frm_hdr->partition_length; buf += frm_hdr->partition_length_;
buf_size -= frm_hdr->partition_length; buf_size -= frm_hdr->partition_length_;
if (frm_hdr->key_frame) { if (frm_hdr->key_frame_) {
pic_hdr->colorspace = VP8Get(br, "global-header"); pic_hdr->colorspace_ = VP8Get(br, "global-header");
pic_hdr->clamp_type = VP8Get(br, "global-header"); pic_hdr->clamp_type_ = VP8Get(br, "global-header");
} }
if (!ParseSegmentHeader(br, &dec->segment_hdr, &dec->proba)) { if (!ParseSegmentHeader(br, &dec->segment_hdr_, &dec->proba_)) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"cannot parse segment header"); "cannot parse segment header");
} }
@ -380,17 +376,17 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
VP8ParseQuant(dec); VP8ParseQuant(dec);
// Frame buffer marking // Frame buffer marking
if (!frm_hdr->key_frame) { if (!frm_hdr->key_frame_) {
return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE, return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
"Not a key frame."); "Not a key frame.");
} }
VP8Get(br, "global-header"); // ignore the value of 'update_proba' VP8Get(br, "global-header"); // ignore the value of update_proba_
VP8ParseProba(br, dec); VP8ParseProba(br, dec);
// sanitized state // sanitized state
dec->ready = 1; dec->ready_ = 1;
return 1; return 1;
} }
@ -443,17 +439,17 @@ static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) {
static int GetCoeffsFast(VP8BitReader* const br, static int GetCoeffsFast(VP8BitReader* const br,
const VP8BandProbas* const prob[], const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out) { int ctx, const quant_t dq, int n, int16_t* out) {
const uint8_t* p = prob[n]->probas[ctx]; const uint8_t* p = prob[n]->probas_[ctx];
for (; n < 16; ++n) { for (; n < 16; ++n) {
if (!VP8GetBit(br, p[0], "coeffs")) { if (!VP8GetBit(br, p[0], "coeffs")) {
return n; // previous coeff was last non-zero coeff return n; // previous coeff was last non-zero coeff
} }
while (!VP8GetBit(br, p[1], "coeffs")) { // sequence of zero coeffs while (!VP8GetBit(br, p[1], "coeffs")) { // sequence of zero coeffs
p = prob[++n]->probas[0]; p = prob[++n]->probas_[0];
if (n == 16) return 16; if (n == 16) return 16;
} }
{ // non zero coeff { // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas[0]; const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0];
int v; int v;
if (!VP8GetBit(br, p[2], "coeffs")) { if (!VP8GetBit(br, p[2], "coeffs")) {
v = 1; v = 1;
@ -473,17 +469,17 @@ static int GetCoeffsFast(VP8BitReader* const br,
static int GetCoeffsAlt(VP8BitReader* const br, static int GetCoeffsAlt(VP8BitReader* const br,
const VP8BandProbas* const prob[], const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out) { int ctx, const quant_t dq, int n, int16_t* out) {
const uint8_t* p = prob[n]->probas[ctx]; const uint8_t* p = prob[n]->probas_[ctx];
for (; n < 16; ++n) { for (; n < 16; ++n) {
if (!VP8GetBitAlt(br, p[0], "coeffs")) { if (!VP8GetBitAlt(br, p[0], "coeffs")) {
return n; // previous coeff was last non-zero coeff return n; // previous coeff was last non-zero coeff
} }
while (!VP8GetBitAlt(br, p[1], "coeffs")) { // sequence of zero coeffs while (!VP8GetBitAlt(br, p[1], "coeffs")) { // sequence of zero coeffs
p = prob[++n]->probas[0]; p = prob[++n]->probas_[0];
if (n == 16) return 16; if (n == 16) return 16;
} }
{ // non zero coeff { // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas[0]; const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0];
int v; int v;
if (!VP8GetBitAlt(br, p[2], "coeffs")) { if (!VP8GetBitAlt(br, p[2], "coeffs")) {
v = 1; v = 1;
@ -516,12 +512,12 @@ static WEBP_INLINE uint32_t NzCodeBits(uint32_t nz_coeffs, int nz, int dc_nz) {
static int ParseResiduals(VP8Decoder* const dec, static int ParseResiduals(VP8Decoder* const dec,
VP8MB* const mb, VP8BitReader* const token_br) { VP8MB* const mb, VP8BitReader* const token_br) {
const VP8BandProbas* (* const bands)[16 + 1] = dec->proba.bands_ptr; const VP8BandProbas* (* const bands)[16 + 1] = dec->proba_.bands_ptr_;
const VP8BandProbas* const * ac_proba; const VP8BandProbas* const * ac_proba;
VP8MBData* const block = dec->mb_data + dec->mb_x; VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
const VP8QuantMatrix* const q = &dec->dqm[block->segment]; const VP8QuantMatrix* const q = &dec->dqm_[block->segment_];
int16_t* dst = block->coeffs; int16_t* dst = block->coeffs_;
VP8MB* const left_mb = dec->mb_info - 1; VP8MB* const left_mb = dec->mb_info_ - 1;
uint8_t tnz, lnz; uint8_t tnz, lnz;
uint32_t non_zero_y = 0; uint32_t non_zero_y = 0;
uint32_t non_zero_uv = 0; uint32_t non_zero_uv = 0;
@ -530,11 +526,11 @@ static int ParseResiduals(VP8Decoder* const dec,
int first; int first;
memset(dst, 0, 384 * sizeof(*dst)); memset(dst, 0, 384 * sizeof(*dst));
if (!block->is_i4x4) { // parse DC if (!block->is_i4x4_) { // parse DC
int16_t dc[16] = { 0 }; int16_t dc[16] = { 0 };
const int ctx = mb->nz_dc + left_mb->nz_dc; const int ctx = mb->nz_dc_ + left_mb->nz_dc_;
const int nz = GetCoeffs(token_br, bands[1], ctx, q->y2_mat, 0, dc); const int nz = GetCoeffs(token_br, bands[1], ctx, q->y2_mat_, 0, dc);
mb->nz_dc = left_mb->nz_dc = (nz > 0); mb->nz_dc_ = left_mb->nz_dc_ = (nz > 0);
if (nz > 1) { // more than just the DC -> perform the full transform if (nz > 1) { // more than just the DC -> perform the full transform
VP8TransformWHT(dc, dst); VP8TransformWHT(dc, dst);
} else { // only DC is non-zero -> inlined simplified transform } else { // only DC is non-zero -> inlined simplified transform
@ -549,14 +545,14 @@ static int ParseResiduals(VP8Decoder* const dec,
ac_proba = bands[3]; ac_proba = bands[3];
} }
tnz = mb->nz & 0x0f; tnz = mb->nz_ & 0x0f;
lnz = left_mb->nz & 0x0f; lnz = left_mb->nz_ & 0x0f;
for (y = 0; y < 4; ++y) { for (y = 0; y < 4; ++y) {
int l = lnz & 1; int l = lnz & 1;
uint32_t nz_coeffs = 0; uint32_t nz_coeffs = 0;
for (x = 0; x < 4; ++x) { for (x = 0; x < 4; ++x) {
const int ctx = l + (tnz & 1); const int ctx = l + (tnz & 1);
const int nz = GetCoeffs(token_br, ac_proba, ctx, q->y1_mat, first, dst); const int nz = GetCoeffs(token_br, ac_proba, ctx, q->y1_mat_, first, dst);
l = (nz > first); l = (nz > first);
tnz = (tnz >> 1) | (l << 7); tnz = (tnz >> 1) | (l << 7);
nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0); nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0);
@ -571,13 +567,13 @@ static int ParseResiduals(VP8Decoder* const dec,
for (ch = 0; ch < 4; ch += 2) { for (ch = 0; ch < 4; ch += 2) {
uint32_t nz_coeffs = 0; uint32_t nz_coeffs = 0;
tnz = mb->nz >> (4 + ch); tnz = mb->nz_ >> (4 + ch);
lnz = left_mb->nz >> (4 + ch); lnz = left_mb->nz_ >> (4 + ch);
for (y = 0; y < 2; ++y) { for (y = 0; y < 2; ++y) {
int l = lnz & 1; int l = lnz & 1;
for (x = 0; x < 2; ++x) { for (x = 0; x < 2; ++x) {
const int ctx = l + (tnz & 1); const int ctx = l + (tnz & 1);
const int nz = GetCoeffs(token_br, bands[2], ctx, q->uv_mat, 0, dst); const int nz = GetCoeffs(token_br, bands[2], ctx, q->uv_mat_, 0, dst);
l = (nz > 0); l = (nz > 0);
tnz = (tnz >> 1) | (l << 3); tnz = (tnz >> 1) | (l << 3);
nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0); nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0);
@ -591,16 +587,16 @@ static int ParseResiduals(VP8Decoder* const dec,
out_t_nz |= (tnz << 4) << ch; out_t_nz |= (tnz << 4) << ch;
out_l_nz |= (lnz & 0xf0) << ch; out_l_nz |= (lnz & 0xf0) << ch;
} }
mb->nz = out_t_nz; mb->nz_ = out_t_nz;
left_mb->nz = out_l_nz; left_mb->nz_ = out_l_nz;
block->non_zero_y = non_zero_y; block->non_zero_y_ = non_zero_y;
block->non_zero_uv = non_zero_uv; block->non_zero_uv_ = non_zero_uv;
// We look at the mode-code of each block and check if some blocks have less // We look at the mode-code of each block and check if some blocks have less
// than three non-zero coeffs (code < 2). This is to avoid dithering flat and // than three non-zero coeffs (code < 2). This is to avoid dithering flat and
// empty blocks. // empty blocks.
block->dither = (non_zero_uv & 0xaaaa) ? 0 : q->dither; block->dither_ = (non_zero_uv & 0xaaaa) ? 0 : q->dither_;
return !(non_zero_y | non_zero_uv); // will be used for further optimization return !(non_zero_y | non_zero_uv); // will be used for further optimization
} }
@ -609,50 +605,50 @@ static int ParseResiduals(VP8Decoder* const dec,
// Main loop // Main loop
int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) { int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) {
VP8MB* const left = dec->mb_info - 1; VP8MB* const left = dec->mb_info_ - 1;
VP8MB* const mb = dec->mb_info + dec->mb_x; VP8MB* const mb = dec->mb_info_ + dec->mb_x_;
VP8MBData* const block = dec->mb_data + dec->mb_x; VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
int skip = dec->use_skip_proba ? block->skip : 0; int skip = dec->use_skip_proba_ ? block->skip_ : 0;
if (!skip) { if (!skip) {
skip = ParseResiduals(dec, mb, token_br); skip = ParseResiduals(dec, mb, token_br);
} else { } else {
left->nz = mb->nz = 0; left->nz_ = mb->nz_ = 0;
if (!block->is_i4x4) { if (!block->is_i4x4_) {
left->nz_dc = mb->nz_dc = 0; left->nz_dc_ = mb->nz_dc_ = 0;
} }
block->non_zero_y = 0; block->non_zero_y_ = 0;
block->non_zero_uv = 0; block->non_zero_uv_ = 0;
block->dither = 0; block->dither_ = 0;
} }
if (dec->filter_type > 0) { // store filter info if (dec->filter_type_ > 0) { // store filter info
VP8FInfo* const finfo = dec->f_info + dec->mb_x; VP8FInfo* const finfo = dec->f_info_ + dec->mb_x_;
*finfo = dec->fstrengths[block->segment][block->is_i4x4]; *finfo = dec->fstrengths_[block->segment_][block->is_i4x4_];
finfo->f_inner |= !skip; finfo->f_inner_ |= !skip;
} }
return !token_br->eof; return !token_br->eof_;
} }
void VP8InitScanline(VP8Decoder* const dec) { void VP8InitScanline(VP8Decoder* const dec) {
VP8MB* const left = dec->mb_info - 1; VP8MB* const left = dec->mb_info_ - 1;
left->nz = 0; left->nz_ = 0;
left->nz_dc = 0; left->nz_dc_ = 0;
memset(dec->intra_l, B_DC_PRED, sizeof(dec->intra_l)); memset(dec->intra_l_, B_DC_PRED, sizeof(dec->intra_l_));
dec->mb_x = 0; dec->mb_x_ = 0;
} }
static int ParseFrame(VP8Decoder* const dec, VP8Io* io) { static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
for (dec->mb_y = 0; dec->mb_y < dec->br_mb_y; ++dec->mb_y) { for (dec->mb_y_ = 0; dec->mb_y_ < dec->br_mb_y_; ++dec->mb_y_) {
// Parse bitstream for this row. // Parse bitstream for this row.
VP8BitReader* const token_br = VP8BitReader* const token_br =
&dec->parts[dec->mb_y & dec->num_parts_minus_one]; &dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_];
if (!VP8ParseIntraModeRow(&dec->br, dec)) { if (!VP8ParseIntraModeRow(&dec->br_, dec)) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-partition0 encountered."); "Premature end-of-partition0 encountered.");
} }
for (; dec->mb_x < dec->mb_w; ++dec->mb_x) { for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) {
if (!VP8DecodeMB(dec, token_br)) { if (!VP8DecodeMB(dec, token_br)) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-file encountered."); "Premature end-of-file encountered.");
@ -665,8 +661,8 @@ static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
return VP8SetError(dec, VP8_STATUS_USER_ABORT, "Output aborted."); return VP8SetError(dec, VP8_STATUS_USER_ABORT, "Output aborted.");
} }
} }
if (dec->mt_method > 0) { if (dec->mt_method_ > 0) {
if (!WebPGetWorkerInterface()->Sync(&dec->worker)) return 0; if (!WebPGetWorkerInterface()->Sync(&dec->worker_)) return 0;
} }
return 1; return 1;
@ -683,12 +679,12 @@ int VP8Decode(VP8Decoder* const dec, VP8Io* const io) {
"NULL VP8Io parameter in VP8Decode()."); "NULL VP8Io parameter in VP8Decode().");
} }
if (!dec->ready) { if (!dec->ready_) {
if (!VP8GetHeaders(dec, io)) { if (!VP8GetHeaders(dec, io)) {
return 0; return 0;
} }
} }
assert(dec->ready); assert(dec->ready_);
// Finish setting up the decoding parameter. Will call io->setup(). // Finish setting up the decoding parameter. Will call io->setup().
ok = (VP8EnterCritical(dec, io) == VP8_STATUS_OK); ok = (VP8EnterCritical(dec, io) == VP8_STATUS_OK);
@ -708,7 +704,7 @@ int VP8Decode(VP8Decoder* const dec, VP8Io* const io) {
return 0; return 0;
} }
dec->ready = 0; dec->ready_ = 0;
return ok; return ok;
} }
@ -716,13 +712,13 @@ void VP8Clear(VP8Decoder* const dec) {
if (dec == NULL) { if (dec == NULL) {
return; return;
} }
WebPGetWorkerInterface()->End(&dec->worker); WebPGetWorkerInterface()->End(&dec->worker_);
WebPDeallocateAlphaMemory(dec); WebPDeallocateAlphaMemory(dec);
WebPSafeFree(dec->mem); WebPSafeFree(dec->mem_);
dec->mem = NULL; dec->mem_ = NULL;
dec->mem_size = 0; dec->mem_size_ = 0;
memset(&dec->br, 0, sizeof(dec->br)); memset(&dec->br_, 0, sizeof(dec->br_));
dec->ready = 0; dec->ready_ = 0;
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -15,7 +15,6 @@
#define WEBP_DEC_VP8_DEC_H_ #define WEBP_DEC_VP8_DEC_H_
#include "src/webp/decode.h" #include "src/webp/decode.h"
#include "src/webp/types.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -109,14 +108,16 @@ struct VP8Io {
}; };
// Internal, version-checked, entry point // Internal, version-checked, entry point
WEBP_NODISCARD int VP8InitIoInternal(VP8Io* const, int); int VP8InitIoInternal(VP8Io* const, int);
// Set the custom IO function pointers and user-data. The setter for IO hooks // Set the custom IO function pointers and user-data. The setter for IO hooks
// should be called before initiating incremental decoding. Returns true if // should be called before initiating incremental decoding. Returns true if
// WebPIDecoder object is successfully modified, false otherwise. // WebPIDecoder object is successfully modified, false otherwise.
WEBP_NODISCARD int WebPISetIOHooks(WebPIDecoder* const idec, VP8IoPutHook put, int WebPISetIOHooks(WebPIDecoder* const idec,
VP8IoSetupHook setup, VP8IoPutHook put,
VP8IoTeardownHook teardown, void* user_data); VP8IoSetupHook setup,
VP8IoTeardownHook teardown,
void* user_data);
// Main decoding object. This is an opaque structure. // Main decoding object. This is an opaque structure.
typedef struct VP8Decoder VP8Decoder; typedef struct VP8Decoder VP8Decoder;
@ -127,17 +128,17 @@ VP8Decoder* VP8New(void);
// Must be called to make sure 'io' is initialized properly. // Must be called to make sure 'io' is initialized properly.
// Returns false in case of version mismatch. Upon such failure, no other // Returns false in case of version mismatch. Upon such failure, no other
// decoding function should be called (VP8Decode, VP8GetHeaders, ...) // decoding function should be called (VP8Decode, VP8GetHeaders, ...)
WEBP_NODISCARD static WEBP_INLINE int VP8InitIo(VP8Io* const io) { static WEBP_INLINE int VP8InitIo(VP8Io* const io) {
return VP8InitIoInternal(io, WEBP_DECODER_ABI_VERSION); return VP8InitIoInternal(io, WEBP_DECODER_ABI_VERSION);
} }
// Decode the VP8 frame header. Returns true if ok. // Decode the VP8 frame header. Returns true if ok.
// Note: 'io->data' must be pointing to the start of the VP8 frame header. // Note: 'io->data' must be pointing to the start of the VP8 frame header.
WEBP_NODISCARD int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io); int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io);
// Decode a picture. Will call VP8GetHeaders() if it wasn't done already. // Decode a picture. Will call VP8GetHeaders() if it wasn't done already.
// Returns false in case of error. // Returns false in case of error.
WEBP_NODISCARD int VP8Decode(VP8Decoder* const dec, VP8Io* const io); int VP8Decode(VP8Decoder* const dec, VP8Io* const io);
// Return current status of the decoder: // Return current status of the decoder:
VP8StatusCode VP8Status(VP8Decoder* const dec); VP8StatusCode VP8Status(VP8Decoder* const dec);

View File

@ -21,7 +21,6 @@
#include "src/utils/random_utils.h" #include "src/utils/random_utils.h"
#include "src/utils/thread_utils.h" #include "src/utils/thread_utils.h"
#include "src/dsp/dsp.h" #include "src/dsp/dsp.h"
#include "src/webp/types.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -32,8 +31,8 @@ extern "C" {
// version numbers // version numbers
#define DEC_MAJ_VERSION 1 #define DEC_MAJ_VERSION 1
#define DEC_MIN_VERSION 5 #define DEC_MIN_VERSION 3
#define DEC_REV_VERSION 0 #define DEC_REV_VERSION 1
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline). // YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
// Constraints are: We need to store one 16x16 block of luma samples (y), // Constraints are: We need to store one 16x16 block of luma samples (y),
@ -69,85 +68,85 @@ extern "C" {
// Headers // Headers
typedef struct { typedef struct {
uint8_t key_frame; uint8_t key_frame_;
uint8_t profile; uint8_t profile_;
uint8_t show; uint8_t show_;
uint32_t partition_length; uint32_t partition_length_;
} VP8FrameHeader; } VP8FrameHeader;
typedef struct { typedef struct {
uint16_t width; uint16_t width_;
uint16_t height; uint16_t height_;
uint8_t xscale; uint8_t xscale_;
uint8_t yscale; uint8_t yscale_;
uint8_t colorspace; // 0 = YCbCr uint8_t colorspace_; // 0 = YCbCr
uint8_t clamp_type; uint8_t clamp_type_;
} VP8PictureHeader; } VP8PictureHeader;
// segment features // segment features
typedef struct { typedef struct {
int use_segment; int use_segment_;
int update_map; // whether to update the segment map or not int update_map_; // whether to update the segment map or not
int absolute_delta; // absolute or delta values for quantizer and filter int absolute_delta_; // absolute or delta values for quantizer and filter
int8_t quantizer[NUM_MB_SEGMENTS]; // quantization changes int8_t quantizer_[NUM_MB_SEGMENTS]; // quantization changes
int8_t filter_strength[NUM_MB_SEGMENTS]; // filter strength for segments int8_t filter_strength_[NUM_MB_SEGMENTS]; // filter strength for segments
} VP8SegmentHeader; } VP8SegmentHeader;
// probas associated to one of the contexts // probas associated to one of the contexts
typedef uint8_t VP8ProbaArray[NUM_PROBAS]; typedef uint8_t VP8ProbaArray[NUM_PROBAS];
typedef struct { // all the probas associated to one band typedef struct { // all the probas associated to one band
VP8ProbaArray probas[NUM_CTX]; VP8ProbaArray probas_[NUM_CTX];
} VP8BandProbas; } VP8BandProbas;
// Struct collecting all frame-persistent probabilities. // Struct collecting all frame-persistent probabilities.
typedef struct { typedef struct {
uint8_t segments[MB_FEATURE_TREE_PROBS]; uint8_t segments_[MB_FEATURE_TREE_PROBS];
// Type: 0:Intra16-AC 1:Intra16-DC 2:Chroma 3:Intra4 // Type: 0:Intra16-AC 1:Intra16-DC 2:Chroma 3:Intra4
VP8BandProbas bands[NUM_TYPES][NUM_BANDS]; VP8BandProbas bands_[NUM_TYPES][NUM_BANDS];
const VP8BandProbas* bands_ptr[NUM_TYPES][16 + 1]; const VP8BandProbas* bands_ptr_[NUM_TYPES][16 + 1];
} VP8Proba; } VP8Proba;
// Filter parameters // Filter parameters
typedef struct { typedef struct {
int simple; // 0=complex, 1=simple int simple_; // 0=complex, 1=simple
int level; // [0..63] int level_; // [0..63]
int sharpness; // [0..7] int sharpness_; // [0..7]
int use_lf_delta; int use_lf_delta_;
int ref_lf_delta[NUM_REF_LF_DELTAS]; int ref_lf_delta_[NUM_REF_LF_DELTAS];
int mode_lf_delta[NUM_MODE_LF_DELTAS]; int mode_lf_delta_[NUM_MODE_LF_DELTAS];
} VP8FilterHeader; } VP8FilterHeader;
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Informations about the macroblocks. // Informations about the macroblocks.
typedef struct { // filter specs typedef struct { // filter specs
uint8_t f_limit; // filter limit in [3..189], or 0 if no filtering uint8_t f_limit_; // filter limit in [3..189], or 0 if no filtering
uint8_t f_ilevel; // inner limit in [1..63] uint8_t f_ilevel_; // inner limit in [1..63]
uint8_t f_inner; // do inner filtering? uint8_t f_inner_; // do inner filtering?
uint8_t hev_thresh; // high edge variance threshold in [0..2] uint8_t hev_thresh_; // high edge variance threshold in [0..2]
} VP8FInfo; } VP8FInfo;
typedef struct { // Top/Left Contexts used for syntax-parsing typedef struct { // Top/Left Contexts used for syntax-parsing
uint8_t nz; // non-zero AC/DC coeffs (4bit for luma + 4bit for chroma) uint8_t nz_; // non-zero AC/DC coeffs (4bit for luma + 4bit for chroma)
uint8_t nz_dc; // non-zero DC coeff (1bit) uint8_t nz_dc_; // non-zero DC coeff (1bit)
} VP8MB; } VP8MB;
// Dequantization matrices // Dequantization matrices
typedef int quant_t[2]; // [DC / AC]. Can be 'uint16_t[2]' too (~slower). typedef int quant_t[2]; // [DC / AC]. Can be 'uint16_t[2]' too (~slower).
typedef struct { typedef struct {
quant_t y1_mat, y2_mat, uv_mat; quant_t y1_mat_, y2_mat_, uv_mat_;
int uv_quant; // U/V quantizer value int uv_quant_; // U/V quantizer value
int dither; // dithering amplitude (0 = off, max=255) int dither_; // dithering amplitude (0 = off, max=255)
} VP8QuantMatrix; } VP8QuantMatrix;
// Data needed to reconstruct a macroblock // Data needed to reconstruct a macroblock
typedef struct { typedef struct {
int16_t coeffs[384]; // 384 coeffs = (16+4+4) * 4*4 int16_t coeffs_[384]; // 384 coeffs = (16+4+4) * 4*4
uint8_t is_i4x4; // true if intra4x4 uint8_t is_i4x4_; // true if intra4x4
uint8_t imodes[16]; // one 16x16 mode (#0) or sixteen 4x4 modes uint8_t imodes_[16]; // one 16x16 mode (#0) or sixteen 4x4 modes
uint8_t uvmode; // chroma prediction mode uint8_t uvmode_; // chroma prediction mode
// bit-wise info about the content of each sub-4x4 blocks (in decoding order). // bit-wise info about the content of each sub-4x4 blocks (in decoding order).
// Each of the 4x4 blocks for y/u/v is associated with a 2b code according to: // Each of the 4x4 blocks for y/u/v is associated with a 2b code according to:
// code=0 -> no coefficient // code=0 -> no coefficient
@ -155,21 +154,21 @@ typedef struct {
// code=2 -> first three coefficients are non-zero // code=2 -> first three coefficients are non-zero
// code=3 -> more than three coefficients are non-zero // code=3 -> more than three coefficients are non-zero
// This allows to call specialized transform functions. // This allows to call specialized transform functions.
uint32_t non_zero_y; uint32_t non_zero_y_;
uint32_t non_zero_uv; uint32_t non_zero_uv_;
uint8_t dither; // local dithering strength (deduced from non_zero*) uint8_t dither_; // local dithering strength (deduced from non_zero_*)
uint8_t skip; uint8_t skip_;
uint8_t segment; uint8_t segment_;
} VP8MBData; } VP8MBData;
// Persistent information needed by the parallel processing // Persistent information needed by the parallel processing
typedef struct { typedef struct {
int id; // cache row to process (in [0..2]) int id_; // cache row to process (in [0..2])
int mb_y; // macroblock position of the row int mb_y_; // macroblock position of the row
int filter_row; // true if row-filtering is needed int filter_row_; // true if row-filtering is needed
VP8FInfo* f_info; // filter strengths (swapped with dec->f_info) VP8FInfo* f_info_; // filter strengths (swapped with dec->f_info_)
VP8MBData* mb_data; // reconstruction data (swapped with dec->mb_data) VP8MBData* mb_data_; // reconstruction data (swapped with dec->mb_data_)
VP8Io io; // copy of the VP8Io to pass to put() VP8Io io_; // copy of the VP8Io to pass to put()
} VP8ThreadContext; } VP8ThreadContext;
// Saved top samples, per macroblock. Fits into a cache-line. // Saved top samples, per macroblock. Fits into a cache-line.
@ -181,89 +180,88 @@ typedef struct {
// VP8Decoder: the main opaque structure handed over to user // VP8Decoder: the main opaque structure handed over to user
struct VP8Decoder { struct VP8Decoder {
VP8StatusCode status; VP8StatusCode status_;
int ready; // true if ready to decode a picture with VP8Decode() int ready_; // true if ready to decode a picture with VP8Decode()
const char* error_msg; // set when status is not OK. const char* error_msg_; // set when status_ is not OK.
// Main data source // Main data source
VP8BitReader br; VP8BitReader br_;
int incremental; // if true, incremental decoding is expected
// headers // headers
VP8FrameHeader frm_hdr; VP8FrameHeader frm_hdr_;
VP8PictureHeader pic_hdr; VP8PictureHeader pic_hdr_;
VP8FilterHeader filter_hdr; VP8FilterHeader filter_hdr_;
VP8SegmentHeader segment_hdr; VP8SegmentHeader segment_hdr_;
// Worker // Worker
WebPWorker worker; WebPWorker worker_;
int mt_method; // multi-thread method: 0=off, 1=[parse+recon][filter] int mt_method_; // multi-thread method: 0=off, 1=[parse+recon][filter]
// 2=[parse][recon+filter] // 2=[parse][recon+filter]
int cache_id; // current cache row int cache_id_; // current cache row
int num_caches; // number of cached rows of 16 pixels (1, 2 or 3) int num_caches_; // number of cached rows of 16 pixels (1, 2 or 3)
VP8ThreadContext thread_ctx; // Thread context VP8ThreadContext thread_ctx_; // Thread context
// dimension, in macroblock units. // dimension, in macroblock units.
int mb_w, mb_h; int mb_w_, mb_h_;
// Macroblock to process/filter, depending on cropping and filter_type. // Macroblock to process/filter, depending on cropping and filter_type.
int tl_mb_x, tl_mb_y; // top-left MB that must be in-loop filtered int tl_mb_x_, tl_mb_y_; // top-left MB that must be in-loop filtered
int br_mb_x, br_mb_y; // last bottom-right MB that must be decoded int br_mb_x_, br_mb_y_; // last bottom-right MB that must be decoded
// number of partitions minus one. // number of partitions minus one.
uint32_t num_parts_minus_one; uint32_t num_parts_minus_one_;
// per-partition boolean decoders. // per-partition boolean decoders.
VP8BitReader parts[MAX_NUM_PARTITIONS]; VP8BitReader parts_[MAX_NUM_PARTITIONS];
// Dithering strength, deduced from decoding options // Dithering strength, deduced from decoding options
int dither; // whether to use dithering or not int dither_; // whether to use dithering or not
VP8Random dithering_rg; // random generator for dithering VP8Random dithering_rg_; // random generator for dithering
// dequantization (one set of DC/AC dequant factor per segment) // dequantization (one set of DC/AC dequant factor per segment)
VP8QuantMatrix dqm[NUM_MB_SEGMENTS]; VP8QuantMatrix dqm_[NUM_MB_SEGMENTS];
// probabilities // probabilities
VP8Proba proba; VP8Proba proba_;
int use_skip_proba; int use_skip_proba_;
uint8_t skip_p; uint8_t skip_p_;
// Boundary data cache and persistent buffers. // Boundary data cache and persistent buffers.
uint8_t* intra_t; // top intra modes values: 4 * mb_w uint8_t* intra_t_; // top intra modes values: 4 * mb_w_
uint8_t intra_l[4]; // left intra modes values uint8_t intra_l_[4]; // left intra modes values
VP8TopSamples* yuv_t; // top y/u/v samples VP8TopSamples* yuv_t_; // top y/u/v samples
VP8MB* mb_info; // contextual macroblock info (mb_w + 1) VP8MB* mb_info_; // contextual macroblock info (mb_w_ + 1)
VP8FInfo* f_info; // filter strength info VP8FInfo* f_info_; // filter strength info
uint8_t* yuv_b; // main block for Y/U/V (size = YUV_SIZE) uint8_t* yuv_b_; // main block for Y/U/V (size = YUV_SIZE)
uint8_t* cache_y; // macroblock row for storing unfiltered samples uint8_t* cache_y_; // macroblock row for storing unfiltered samples
uint8_t* cache_u; uint8_t* cache_u_;
uint8_t* cache_v; uint8_t* cache_v_;
int cache_y_stride; int cache_y_stride_;
int cache_uv_stride; int cache_uv_stride_;
// main memory chunk for the above data. Persistent. // main memory chunk for the above data. Persistent.
void* mem; void* mem_;
size_t mem_size; size_t mem_size_;
// Per macroblock non-persistent infos. // Per macroblock non-persistent infos.
int mb_x, mb_y; // current position, in macroblock units int mb_x_, mb_y_; // current position, in macroblock units
VP8MBData* mb_data; // parsed reconstruction data VP8MBData* mb_data_; // parsed reconstruction data
// Filtering side-info // Filtering side-info
int filter_type; // 0=off, 1=simple, 2=complex int filter_type_; // 0=off, 1=simple, 2=complex
VP8FInfo fstrengths[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type VP8FInfo fstrengths_[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type
// Alpha // Alpha
struct ALPHDecoder* alph_dec; // alpha-plane decoder object struct ALPHDecoder* alph_dec_; // alpha-plane decoder object
const uint8_t* alpha_data; // compressed alpha data (if present) const uint8_t* alpha_data_; // compressed alpha data (if present)
size_t alpha_data_size; size_t alpha_data_size_;
int is_alpha_decoded; // true if alpha_data is decoded in alpha_plane int is_alpha_decoded_; // true if alpha_data_ is decoded in alpha_plane_
uint8_t* alpha_plane_mem; // memory allocated for alpha_plane uint8_t* alpha_plane_mem_; // memory allocated for alpha_plane_
uint8_t* alpha_plane; // output. Persistent, contains the whole data. uint8_t* alpha_plane_; // output. Persistent, contains the whole data.
const uint8_t* alpha_prev_line; // last decoded alpha row (or NULL) const uint8_t* alpha_prev_line_; // last decoded alpha row (or NULL)
int alpha_dithering; // derived from decoding options (0=off, 100=full) int alpha_dithering_; // derived from decoding options (0=off, 100=full)
}; };
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -283,7 +281,7 @@ int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec);
void VP8ParseQuant(VP8Decoder* const dec); void VP8ParseQuant(VP8Decoder* const dec);
// in frame.c // in frame.c
WEBP_NODISCARD int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io); int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io);
// Call io->setup() and finish setting up scan parameters. // Call io->setup() and finish setting up scan parameters.
// After this call returns, one must always call VP8ExitCritical() with the // After this call returns, one must always call VP8ExitCritical() with the
// same parameters. Both functions should be used in pair. Returns VP8_STATUS_OK // same parameters. Both functions should be used in pair. Returns VP8_STATUS_OK
@ -291,7 +289,7 @@ WEBP_NODISCARD int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io);
VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io); VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io);
// Must always be called in pair with VP8EnterCritical(). // Must always be called in pair with VP8EnterCritical().
// Returns false in case of error. // Returns false in case of error.
WEBP_NODISCARD int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io); int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io);
// Return the multi-threading method to use (0=off), depending // Return the multi-threading method to use (0=off), depending
// on options and bitstream size. Only for lossy decoding. // on options and bitstream size. Only for lossy decoding.
int VP8GetThreadMethod(const WebPDecoderOptions* const options, int VP8GetThreadMethod(const WebPDecoderOptions* const options,
@ -301,12 +299,11 @@ int VP8GetThreadMethod(const WebPDecoderOptions* const options,
void VP8InitDithering(const WebPDecoderOptions* const options, void VP8InitDithering(const WebPDecoderOptions* const options,
VP8Decoder* const dec); VP8Decoder* const dec);
// Process the last decoded row (filtering + output). // Process the last decoded row (filtering + output).
WEBP_NODISCARD int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io); int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io);
// To be called at the start of a new scanline, to initialize predictors. // To be called at the start of a new scanline, to initialize predictors.
void VP8InitScanline(VP8Decoder* const dec); void VP8InitScanline(VP8Decoder* const dec);
// Decode one macroblock. Returns false if there is not enough data. // Decode one macroblock. Returns false if there is not enough data.
WEBP_NODISCARD int VP8DecodeMB(VP8Decoder* const dec, int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br);
VP8BitReader* const token_br);
// in alpha.c // in alpha.c
const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec, const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,6 @@
#include "src/utils/bit_reader_utils.h" #include "src/utils/bit_reader_utils.h"
#include "src/utils/color_cache_utils.h" #include "src/utils/color_cache_utils.h"
#include "src/utils/huffman_utils.h" #include "src/utils/huffman_utils.h"
#include "src/webp/types.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -34,58 +33,58 @@ typedef enum {
typedef struct VP8LTransform VP8LTransform; typedef struct VP8LTransform VP8LTransform;
struct VP8LTransform { struct VP8LTransform {
VP8LImageTransformType type; // transform type. VP8LImageTransformType type_; // transform type.
int bits; // subsampling bits defining transform window. int bits_; // subsampling bits defining transform window.
int xsize; // transform window X index. int xsize_; // transform window X index.
int ysize; // transform window Y index. int ysize_; // transform window Y index.
uint32_t* data; // transform data. uint32_t* data_; // transform data.
}; };
typedef struct { typedef struct {
int color_cache_size; int color_cache_size_;
VP8LColorCache color_cache; VP8LColorCache color_cache_;
VP8LColorCache saved_color_cache; // for incremental VP8LColorCache saved_color_cache_; // for incremental
int huffman_mask; int huffman_mask_;
int huffman_subsample_bits; int huffman_subsample_bits_;
int huffman_xsize; int huffman_xsize_;
uint32_t* huffman_image; uint32_t* huffman_image_;
int num_htree_groups; int num_htree_groups_;
HTreeGroup* htree_groups; HTreeGroup* htree_groups_;
HuffmanTables huffman_tables; HuffmanCode* huffman_tables_;
} VP8LMetadata; } VP8LMetadata;
typedef struct VP8LDecoder VP8LDecoder; typedef struct VP8LDecoder VP8LDecoder;
struct VP8LDecoder { struct VP8LDecoder {
VP8StatusCode status; VP8StatusCode status_;
VP8LDecodeState state; VP8LDecodeState state_;
VP8Io* io; VP8Io* io_;
const WebPDecBuffer* output; // shortcut to io->opaque->output const WebPDecBuffer* output_; // shortcut to io->opaque->output
uint32_t* pixels; // Internal data: either uint8_t* for alpha uint32_t* pixels_; // Internal data: either uint8_t* for alpha
// or uint32_t* for BGRA. // or uint32_t* for BGRA.
uint32_t* argb_cache; // Scratch buffer for temporary BGRA storage. uint32_t* argb_cache_; // Scratch buffer for temporary BGRA storage.
VP8LBitReader br; VP8LBitReader br_;
int incremental; // if true, incremental decoding is expected int incremental_; // if true, incremental decoding is expected
VP8LBitReader saved_br; // note: could be local variables too VP8LBitReader saved_br_; // note: could be local variables too
int saved_last_pixel; int saved_last_pixel_;
int width; int width_;
int height; int height_;
int last_row; // last input row decoded so far. int last_row_; // last input row decoded so far.
int last_pixel; // last pixel decoded so far. However, it may int last_pixel_; // last pixel decoded so far. However, it may
// not be transformed, scaled and // not be transformed, scaled and
// color-converted yet. // color-converted yet.
int last_out_row; // last row output so far. int last_out_row_; // last row output so far.
VP8LMetadata hdr; VP8LMetadata hdr_;
int next_transform; int next_transform_;
VP8LTransform transforms[NUM_TRANSFORMS]; VP8LTransform transforms_[NUM_TRANSFORMS];
// or'd bitset storing the transforms types. // or'd bitset storing the transforms types.
uint32_t transforms_seen; uint32_t transforms_seen_;
uint8_t* rescaler_memory; // Working memory for rescaling work. uint8_t* rescaler_memory; // Working memory for rescaling work.
WebPRescaler* rescaler; // Common rescaler for all channels. WebPRescaler* rescaler; // Common rescaler for all channels.
@ -100,42 +99,33 @@ struct ALPHDecoder; // Defined in dec/alphai.h.
// Decodes image header for alpha data stored using lossless compression. // Decodes image header for alpha data stored using lossless compression.
// Returns false in case of error. // Returns false in case of error.
WEBP_NODISCARD int VP8LDecodeAlphaHeader(struct ALPHDecoder* const alph_dec, int VP8LDecodeAlphaHeader(struct ALPHDecoder* const alph_dec,
const uint8_t* const data, const uint8_t* const data, size_t data_size);
size_t data_size);
// Decodes *at least* 'last_row' rows of alpha. If some of the initial rows are // Decodes *at least* 'last_row' rows of alpha. If some of the initial rows are
// already decoded in previous call(s), it will resume decoding from where it // already decoded in previous call(s), it will resume decoding from where it
// was paused. // was paused.
// Returns false in case of bitstream error. // Returns false in case of bitstream error.
WEBP_NODISCARD int VP8LDecodeAlphaImageStream( int VP8LDecodeAlphaImageStream(struct ALPHDecoder* const alph_dec,
struct ALPHDecoder* const alph_dec, int last_row); int last_row);
// Allocates and initialize a new lossless decoder instance. // Allocates and initialize a new lossless decoder instance.
WEBP_NODISCARD VP8LDecoder* VP8LNew(void); VP8LDecoder* VP8LNew(void);
// Decodes the image header. Returns false in case of error. // Decodes the image header. Returns false in case of error.
WEBP_NODISCARD int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io); int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io);
// Decodes an image. It's required to decode the lossless header before calling // Decodes an image. It's required to decode the lossless header before calling
// this function. Returns false in case of error, with updated dec->status. // this function. Returns false in case of error, with updated dec->status_.
WEBP_NODISCARD int VP8LDecodeImage(VP8LDecoder* const dec); int VP8LDecodeImage(VP8LDecoder* const dec);
// Resets the decoder in its initial state, reclaiming memory.
// Preserves the dec->status_ value.
void VP8LClear(VP8LDecoder* const dec);
// Clears and deallocate a lossless decoder instance. // Clears and deallocate a lossless decoder instance.
void VP8LDelete(VP8LDecoder* const dec); void VP8LDelete(VP8LDecoder* const dec);
// Helper function for reading the different Huffman codes and storing them in
// 'huffman_tables' and 'htree_groups'.
// If mapping is NULL 'num_htree_groups_max' must equal 'num_htree_groups'.
// If it is not NULL, it maps 'num_htree_groups_max' indices to the
// 'num_htree_groups' groups. If 'num_htree_groups_max' > 'num_htree_groups',
// some of those indices map to -1. This is used for non-balanced codes to
// limit memory usage.
WEBP_NODISCARD int ReadHuffmanCodesHelper(
int color_cache_bits, int num_htree_groups, int num_htree_groups_max,
const int* const mapping, VP8LDecoder* const dec,
HuffmanTables* const huffman_tables, HTreeGroup** const htree_groups);
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -13,16 +13,11 @@
#include <stdlib.h> #include <stdlib.h>
#include "src/dec/common_dec.h"
#include "src/dec/vp8_dec.h"
#include "src/dec/vp8i_dec.h" #include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h" #include "src/dec/vp8li_dec.h"
#include "src/dec/webpi_dec.h" #include "src/dec/webpi_dec.h"
#include "src/utils/rescaler_utils.h"
#include "src/utils/utils.h" #include "src/utils/utils.h"
#include "src/webp/decode.h"
#include "src/webp/mux_types.h" // ALPHA_FLAG #include "src/webp/mux_types.h" // ALPHA_FLAG
#include "src/webp/types.h"
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// RIFF layout is: // RIFF layout is:
@ -449,9 +444,8 @@ void WebPResetDecParams(WebPDecParams* const params) {
// "Into" decoding variants // "Into" decoding variants
// Main flow // Main flow
WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data, static VP8StatusCode DecodeInto(const uint8_t* const data, size_t data_size,
size_t data_size, WebPDecParams* const params) {
WebPDecParams* const params) {
VP8StatusCode status; VP8StatusCode status;
VP8Io io; VP8Io io;
WebPHeaderStructure headers; WebPHeaderStructure headers;
@ -465,9 +459,7 @@ WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data,
} }
assert(params != NULL); assert(params != NULL);
if (!VP8InitIo(&io)) { VP8InitIo(&io);
return VP8_STATUS_INVALID_PARAM;
}
io.data = headers.data + headers.offset; io.data = headers.data + headers.offset;
io.data_size = headers.data_size - headers.offset; io.data_size = headers.data_size - headers.offset;
WebPInitCustomIo(params, &io); // Plug the I/O functions. WebPInitCustomIo(params, &io); // Plug the I/O functions.
@ -477,23 +469,23 @@ WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data,
if (dec == NULL) { if (dec == NULL) {
return VP8_STATUS_OUT_OF_MEMORY; return VP8_STATUS_OUT_OF_MEMORY;
} }
dec->alpha_data = headers.alpha_data; dec->alpha_data_ = headers.alpha_data;
dec->alpha_data_size = headers.alpha_data_size; dec->alpha_data_size_ = headers.alpha_data_size;
// Decode bitstream header, update io->width/io->height. // Decode bitstream header, update io->width/io->height.
if (!VP8GetHeaders(dec, &io)) { if (!VP8GetHeaders(dec, &io)) {
status = dec->status; // An error occurred. Grab error status. status = dec->status_; // An error occurred. Grab error status.
} else { } else {
// Allocate/check output buffers. // Allocate/check output buffers.
status = WebPAllocateDecBuffer(io.width, io.height, params->options, status = WebPAllocateDecBuffer(io.width, io.height, params->options,
params->output); params->output);
if (status == VP8_STATUS_OK) { // Decode if (status == VP8_STATUS_OK) { // Decode
// This change must be done before calling VP8Decode() // This change must be done before calling VP8Decode()
dec->mt_method = VP8GetThreadMethod(params->options, &headers, dec->mt_method_ = VP8GetThreadMethod(params->options, &headers,
io.width, io.height); io.width, io.height);
VP8InitDithering(params->options, dec); VP8InitDithering(params->options, dec);
if (!VP8Decode(dec, &io)) { if (!VP8Decode(dec, &io)) {
status = dec->status; status = dec->status_;
} }
} }
} }
@ -504,14 +496,14 @@ WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data,
return VP8_STATUS_OUT_OF_MEMORY; return VP8_STATUS_OUT_OF_MEMORY;
} }
if (!VP8LDecodeHeader(dec, &io)) { if (!VP8LDecodeHeader(dec, &io)) {
status = dec->status; // An error occurred. Grab error status. status = dec->status_; // An error occurred. Grab error status.
} else { } else {
// Allocate/check output buffers. // Allocate/check output buffers.
status = WebPAllocateDecBuffer(io.width, io.height, params->options, status = WebPAllocateDecBuffer(io.width, io.height, params->options,
params->output); params->output);
if (status == VP8_STATUS_OK) { // Decode if (status == VP8_STATUS_OK) { // Decode
if (!VP8LDecodeImage(dec)) { if (!VP8LDecodeImage(dec)) {
status = dec->status; status = dec->status_;
} }
} }
} }
@ -531,16 +523,17 @@ WEBP_NODISCARD static VP8StatusCode DecodeInto(const uint8_t* const data,
} }
// Helpers // Helpers
WEBP_NODISCARD static uint8_t* DecodeIntoRGBABuffer(WEBP_CSP_MODE colorspace, static uint8_t* DecodeIntoRGBABuffer(WEBP_CSP_MODE colorspace,
const uint8_t* const data, const uint8_t* const data,
size_t data_size, size_t data_size,
uint8_t* const rgba, uint8_t* const rgba,
int stride, size_t size) { int stride, size_t size) {
WebPDecParams params; WebPDecParams params;
WebPDecBuffer buf; WebPDecBuffer buf;
if (rgba == NULL || !WebPInitDecBuffer(&buf)) { if (rgba == NULL) {
return NULL; return NULL;
} }
WebPInitDecBuffer(&buf);
WebPResetDecParams(&params); WebPResetDecParams(&params);
params.output = &buf; params.output = &buf;
buf.colorspace = colorspace; buf.colorspace = colorspace;
@ -585,7 +578,8 @@ uint8_t* WebPDecodeYUVInto(const uint8_t* data, size_t data_size,
uint8_t* v, size_t v_size, int v_stride) { uint8_t* v, size_t v_size, int v_stride) {
WebPDecParams params; WebPDecParams params;
WebPDecBuffer output; WebPDecBuffer output;
if (luma == NULL || !WebPInitDecBuffer(&output)) return NULL; if (luma == NULL) return NULL;
WebPInitDecBuffer(&output);
WebPResetDecParams(&params); WebPResetDecParams(&params);
params.output = &output; params.output = &output;
output.colorspace = MODE_YUV; output.colorspace = MODE_YUV;
@ -607,17 +601,13 @@ uint8_t* WebPDecodeYUVInto(const uint8_t* data, size_t data_size,
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
WEBP_NODISCARD static uint8_t* Decode(WEBP_CSP_MODE mode, static uint8_t* Decode(WEBP_CSP_MODE mode, const uint8_t* const data,
const uint8_t* const data, size_t data_size, int* const width, int* const height,
size_t data_size, int* const width, WebPDecBuffer* const keep_info) {
int* const height,
WebPDecBuffer* const keep_info) {
WebPDecParams params; WebPDecParams params;
WebPDecBuffer output; WebPDecBuffer output;
if (!WebPInitDecBuffer(&output)) { WebPInitDecBuffer(&output);
return NULL;
}
WebPResetDecParams(&params); WebPResetDecParams(&params);
params.output = &output; params.output = &output;
output.colorspace = mode; output.colorspace = mode;
@ -743,64 +733,7 @@ int WebPInitDecoderConfigInternal(WebPDecoderConfig* config,
} }
memset(config, 0, sizeof(*config)); memset(config, 0, sizeof(*config));
DefaultFeatures(&config->input); DefaultFeatures(&config->input);
if (!WebPInitDecBuffer(&config->output)) { WebPInitDecBuffer(&config->output);
return 0;
}
return 1;
}
static int WebPCheckCropDimensionsBasic(int x, int y, int w, int h) {
return !(x < 0 || y < 0 || w <= 0 || h <= 0);
}
int WebPValidateDecoderConfig(const WebPDecoderConfig* config) {
const WebPDecoderOptions* options;
if (config == NULL) return 0;
if (!IsValidColorspace(config->output.colorspace)) {
return 0;
}
options = &config->options;
// bypass_filtering, no_fancy_upsampling, use_cropping, use_scaling,
// use_threads, flip can be any integer and are interpreted as boolean.
// Check for cropping.
if (options->use_cropping && !WebPCheckCropDimensionsBasic(
options->crop_left, options->crop_top,
options->crop_width, options->crop_height)) {
return 0;
}
// Check for scaling.
if (options->use_scaling &&
(options->scaled_width < 0 || options->scaled_height < 0 ||
(options->scaled_width == 0 && options->scaled_height == 0))) {
return 0;
}
// In case the WebPBitstreamFeatures has been filled in, check further.
if (config->input.width > 0 || config->input.height > 0) {
int scaled_width = options->scaled_width;
int scaled_height = options->scaled_height;
if (options->use_cropping &&
!WebPCheckCropDimensions(config->input.width, config->input.height,
options->crop_left, options->crop_top,
options->crop_width, options->crop_height)) {
return 0;
}
if (options->use_scaling && !WebPRescalerGetScaledDimensions(
config->input.width, config->input.height,
&scaled_width, &scaled_height)) {
return 0;
}
}
// Check for dithering.
if (options->dithering_strength < 0 || options->dithering_strength > 100 ||
options->alpha_dithering_strength < 0 ||
options->alpha_dithering_strength > 100) {
return 0;
}
return 1; return 1;
} }
@ -839,9 +772,7 @@ VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size,
if (WebPAvoidSlowMemory(params.output, &config->input)) { if (WebPAvoidSlowMemory(params.output, &config->input)) {
// decoding to slow memory: use a temporary in-mem buffer to decode into. // decoding to slow memory: use a temporary in-mem buffer to decode into.
WebPDecBuffer in_mem_buffer; WebPDecBuffer in_mem_buffer;
if (!WebPInitDecBuffer(&in_mem_buffer)) { WebPInitDecBuffer(&in_mem_buffer);
return VP8_STATUS_INVALID_PARAM;
}
in_mem_buffer.colorspace = config->output.colorspace; in_mem_buffer.colorspace = config->output.colorspace;
in_mem_buffer.width = config->input.width; in_mem_buffer.width = config->input.width;
in_mem_buffer.height = config->input.height; in_mem_buffer.height = config->input.height;
@ -863,8 +794,8 @@ VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size,
int WebPCheckCropDimensions(int image_width, int image_height, int WebPCheckCropDimensions(int image_width, int image_height,
int x, int y, int w, int h) { int x, int y, int w, int h) {
return WebPCheckCropDimensionsBasic(x, y, w, h) && return !(x < 0 || y < 0 || w <= 0 || h <= 0 ||
!(x >= image_width || w > image_width || w > image_width - x || x >= image_width || w > image_width || w > image_width - x ||
y >= image_height || h > image_height || h > image_height - y); y >= image_height || h > image_height || h > image_height - y);
} }

View File

@ -20,7 +20,6 @@ extern "C" {
#include "src/utils/rescaler_utils.h" #include "src/utils/rescaler_utils.h"
#include "src/dec/vp8_dec.h" #include "src/dec/vp8_dec.h"
#include "src/webp/decode.h"
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// WebPDecParams: Decoding output parameters. Transient internal object. // WebPDecParams: Decoding output parameters. Transient internal object.
@ -88,9 +87,8 @@ void WebPInitCustomIo(WebPDecParams* const params, VP8Io* const io);
// Setup crop_xxx fields, mb_w and mb_h in io. 'src_colorspace' refers // Setup crop_xxx fields, mb_w and mb_h in io. 'src_colorspace' refers
// to the *compressed* format, not the output one. // to the *compressed* format, not the output one.
WEBP_NODISCARD int WebPIoInitFromOptions( int WebPIoInitFromOptions(const WebPDecoderOptions* const options,
const WebPDecoderOptions* const options, VP8Io* const io, VP8Io* const io, WEBP_CSP_MODE src_colorspace);
WEBP_CSP_MODE src_colorspace);
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Internal functions regarding WebPDecBuffer memory (in buffer.c). // Internal functions regarding WebPDecBuffer memory (in buffer.c).

View File

@ -13,6 +13,6 @@ noinst_HEADERS =
noinst_HEADERS += ../webp/format_constants.h noinst_HEADERS += ../webp/format_constants.h
libwebpdemux_la_LIBADD = ../libwebp.la libwebpdemux_la_LIBADD = ../libwebp.la
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 2:16:0 libwebpdemux_la_LDFLAGS = -no-undefined -version-info 2:13:0
libwebpdemuxincludedir = $(includedir)/webp libwebpdemuxincludedir = $(includedir)/webp
pkgconfig_DATA = libwebpdemux.pc pkgconfig_DATA = libwebpdemux.pc

View File

@ -20,7 +20,6 @@
#include "src/utils/utils.h" #include "src/utils/utils.h"
#include "src/webp/decode.h" #include "src/webp/decode.h"
#include "src/webp/demux.h" #include "src/webp/demux.h"
#include "src/webp/types.h"
#define NUM_CHANNELS 4 #define NUM_CHANNELS 4
@ -39,18 +38,18 @@ static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst,
int num_pixels); int num_pixels);
struct WebPAnimDecoder { struct WebPAnimDecoder {
WebPDemuxer* demux; // Demuxer created from given WebP bitstream. WebPDemuxer* demux_; // Demuxer created from given WebP bitstream.
WebPDecoderConfig config; // Decoder config. WebPDecoderConfig config_; // Decoder config.
// Note: we use a pointer to a function blending multiple pixels at a time to // Note: we use a pointer to a function blending multiple pixels at a time to
// allow possible inlining of per-pixel blending function. // allow possible inlining of per-pixel blending function.
BlendRowFunc blend_func; // Pointer to the chose blend row function. BlendRowFunc blend_func_; // Pointer to the chose blend row function.
WebPAnimInfo info; // Global info about the animation. WebPAnimInfo info_; // Global info about the animation.
uint8_t* curr_frame; // Current canvas (not disposed). uint8_t* curr_frame_; // Current canvas (not disposed).
uint8_t* prev_frame_disposed; // Previous canvas (properly disposed). uint8_t* prev_frame_disposed_; // Previous canvas (properly disposed).
int prev_frame_timestamp; // Previous frame timestamp (milliseconds). int prev_frame_timestamp_; // Previous frame timestamp (milliseconds).
WebPIterator prev_iter; // Iterator object for previous frame. WebPIterator prev_iter_; // Iterator object for previous frame.
int prev_frame_was_keyframe; // True if previous frame was a keyframe. int prev_frame_was_keyframe_; // True if previous frame was a keyframe.
int next_frame; // Index of the next frame to be decoded int next_frame_; // Index of the next frame to be decoded
// (starting from 1). // (starting from 1).
}; };
@ -69,11 +68,10 @@ int WebPAnimDecoderOptionsInitInternal(WebPAnimDecoderOptions* dec_options,
return 1; return 1;
} }
WEBP_NODISCARD static int ApplyDecoderOptions( static int ApplyDecoderOptions(const WebPAnimDecoderOptions* const dec_options,
const WebPAnimDecoderOptions* const dec_options, WebPAnimDecoder* const dec) {
WebPAnimDecoder* const dec) {
WEBP_CSP_MODE mode; WEBP_CSP_MODE mode;
WebPDecoderConfig* config = &dec->config; WebPDecoderConfig* config = &dec->config_;
assert(dec_options != NULL); assert(dec_options != NULL);
mode = dec_options->color_mode; mode = dec_options->color_mode;
@ -81,12 +79,10 @@ WEBP_NODISCARD static int ApplyDecoderOptions(
mode != MODE_rgbA && mode != MODE_bgrA) { mode != MODE_rgbA && mode != MODE_bgrA) {
return 0; return 0;
} }
dec->blend_func = (mode == MODE_RGBA || mode == MODE_BGRA) dec->blend_func_ = (mode == MODE_RGBA || mode == MODE_BGRA)
? &BlendPixelRowNonPremult ? &BlendPixelRowNonPremult
: &BlendPixelRowPremult; : &BlendPixelRowPremult;
if (!WebPInitDecoderConfig(config)) { WebPInitDecoderConfig(config);
return 0;
}
config->output.colorspace = mode; config->output.colorspace = mode;
config->output.is_external_memory = 1; config->output.is_external_memory = 1;
config->options.use_threads = dec_options->use_threads; config->options.use_threads = dec_options->use_threads;
@ -123,22 +119,22 @@ WebPAnimDecoder* WebPAnimDecoderNewInternal(
} }
if (!ApplyDecoderOptions(&options, dec)) goto Error; if (!ApplyDecoderOptions(&options, dec)) goto Error;
dec->demux = WebPDemux(webp_data); dec->demux_ = WebPDemux(webp_data);
if (dec->demux == NULL) goto Error; if (dec->demux_ == NULL) goto Error;
dec->info.canvas_width = WebPDemuxGetI(dec->demux, WEBP_FF_CANVAS_WIDTH); dec->info_.canvas_width = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_WIDTH);
dec->info.canvas_height = WebPDemuxGetI(dec->demux, WEBP_FF_CANVAS_HEIGHT); dec->info_.canvas_height = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_HEIGHT);
dec->info.loop_count = WebPDemuxGetI(dec->demux, WEBP_FF_LOOP_COUNT); dec->info_.loop_count = WebPDemuxGetI(dec->demux_, WEBP_FF_LOOP_COUNT);
dec->info.bgcolor = WebPDemuxGetI(dec->demux, WEBP_FF_BACKGROUND_COLOR); dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR);
dec->info.frame_count = WebPDemuxGetI(dec->demux, WEBP_FF_FRAME_COUNT); dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT);
// Note: calloc() because we fill frame with zeroes as well. // Note: calloc() because we fill frame with zeroes as well.
dec->curr_frame = (uint8_t*)WebPSafeCalloc( dec->curr_frame_ = (uint8_t*)WebPSafeCalloc(
dec->info.canvas_width * NUM_CHANNELS, dec->info.canvas_height); dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
if (dec->curr_frame == NULL) goto Error; if (dec->curr_frame_ == NULL) goto Error;
dec->prev_frame_disposed = (uint8_t*)WebPSafeCalloc( dec->prev_frame_disposed_ = (uint8_t*)WebPSafeCalloc(
dec->info.canvas_width * NUM_CHANNELS, dec->info.canvas_height); dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
if (dec->prev_frame_disposed == NULL) goto Error; if (dec->prev_frame_disposed_ == NULL) goto Error;
WebPAnimDecoderReset(dec); WebPAnimDecoderReset(dec);
return dec; return dec;
@ -150,7 +146,7 @@ WebPAnimDecoder* WebPAnimDecoderNewInternal(
int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) { int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) {
if (dec == NULL || info == NULL) return 0; if (dec == NULL || info == NULL) return 0;
*info = dec->info; *info = dec->info_;
return 1; return 1;
} }
@ -161,8 +157,8 @@ static int IsFullFrame(int width, int height, int canvas_width,
} }
// Clear the canvas to transparent. // Clear the canvas to transparent.
WEBP_NODISCARD static int ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width, static int ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width,
uint32_t canvas_height) { uint32_t canvas_height) {
const uint64_t size = const uint64_t size =
(uint64_t)canvas_width * canvas_height * NUM_CHANNELS * sizeof(*buf); (uint64_t)canvas_width * canvas_height * NUM_CHANNELS * sizeof(*buf);
if (!CheckSizeOverflow(size)) return 0; if (!CheckSizeOverflow(size)) return 0;
@ -183,8 +179,8 @@ static void ZeroFillFrameRect(uint8_t* buf, int buf_stride, int x_offset,
} }
// Copy width * height pixels from 'src' to 'dst'. // Copy width * height pixels from 'src' to 'dst'.
WEBP_NODISCARD static int CopyCanvas(const uint8_t* src, uint8_t* dst, static int CopyCanvas(const uint8_t* src, uint8_t* dst,
uint32_t width, uint32_t height) { uint32_t width, uint32_t height) {
const uint64_t size = (uint64_t)width * height * NUM_CHANNELS; const uint64_t size = (uint64_t)width * height * NUM_CHANNELS;
if (!CheckSizeOverflow(size)) return 0; if (!CheckSizeOverflow(size)) return 0;
assert(src != NULL && dst != NULL); assert(src != NULL && dst != NULL);
@ -338,25 +334,25 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
if (dec == NULL || buf_ptr == NULL || timestamp_ptr == NULL) return 0; if (dec == NULL || buf_ptr == NULL || timestamp_ptr == NULL) return 0;
if (!WebPAnimDecoderHasMoreFrames(dec)) return 0; if (!WebPAnimDecoderHasMoreFrames(dec)) return 0;
width = dec->info.canvas_width; width = dec->info_.canvas_width;
height = dec->info.canvas_height; height = dec->info_.canvas_height;
blend_row = dec->blend_func; blend_row = dec->blend_func_;
// Get compressed frame. // Get compressed frame.
if (!WebPDemuxGetFrame(dec->demux, dec->next_frame, &iter)) { if (!WebPDemuxGetFrame(dec->demux_, dec->next_frame_, &iter)) {
return 0; return 0;
} }
timestamp = dec->prev_frame_timestamp + iter.duration; timestamp = dec->prev_frame_timestamp_ + iter.duration;
// Initialize. // Initialize.
is_key_frame = IsKeyFrame(&iter, &dec->prev_iter, is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_,
dec->prev_frame_was_keyframe, width, height); dec->prev_frame_was_keyframe_, width, height);
if (is_key_frame) { if (is_key_frame) {
if (!ZeroFillCanvas(dec->curr_frame, width, height)) { if (!ZeroFillCanvas(dec->curr_frame_, width, height)) {
goto Error; goto Error;
} }
} else { } else {
if (!CopyCanvas(dec->prev_frame_disposed, dec->curr_frame, if (!CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_,
width, height)) { width, height)) {
goto Error; goto Error;
} }
@ -370,12 +366,12 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
const uint64_t out_offset = (uint64_t)iter.y_offset * stride + const uint64_t out_offset = (uint64_t)iter.y_offset * stride +
(uint64_t)iter.x_offset * NUM_CHANNELS; // 53b (uint64_t)iter.x_offset * NUM_CHANNELS; // 53b
const uint64_t size = (uint64_t)iter.height * stride; // at most 25 + 27b const uint64_t size = (uint64_t)iter.height * stride; // at most 25 + 27b
WebPDecoderConfig* const config = &dec->config; WebPDecoderConfig* const config = &dec->config_;
WebPRGBABuffer* const buf = &config->output.u.RGBA; WebPRGBABuffer* const buf = &config->output.u.RGBA;
if ((size_t)size != size) goto Error; if ((size_t)size != size) goto Error;
buf->stride = (int)stride; buf->stride = (int)stride;
buf->size = (size_t)size; buf->size = (size_t)size;
buf->rgba = dec->curr_frame + out_offset; buf->rgba = dec->curr_frame_ + out_offset;
if (WebPDecode(in, in_size, config) != VP8_STATUS_OK) { if (WebPDecode(in, in_size, config) != VP8_STATUS_OK) {
goto Error; goto Error;
@ -388,18 +384,18 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
// that pixel in the previous frame if blending method of is WEBP_MUX_BLEND. // that pixel in the previous frame if blending method of is WEBP_MUX_BLEND.
if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND && if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND &&
!is_key_frame) { !is_key_frame) {
if (dec->prev_iter.dispose_method == WEBP_MUX_DISPOSE_NONE) { if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_NONE) {
int y; int y;
// Blend transparent pixels with pixels in previous canvas. // Blend transparent pixels with pixels in previous canvas.
for (y = 0; y < iter.height; ++y) { for (y = 0; y < iter.height; ++y) {
const size_t offset = const size_t offset =
(iter.y_offset + y) * width + iter.x_offset; (iter.y_offset + y) * width + iter.x_offset;
blend_row((uint32_t*)dec->curr_frame + offset, blend_row((uint32_t*)dec->curr_frame_ + offset,
(uint32_t*)dec->prev_frame_disposed + offset, iter.width); (uint32_t*)dec->prev_frame_disposed_ + offset, iter.width);
} }
} else { } else {
int y; int y;
assert(dec->prev_iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND); assert(dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND);
// We need to blend a transparent pixel with its value just after // We need to blend a transparent pixel with its value just after
// initialization. That is, blend it with: // initialization. That is, blend it with:
// * Fully transparent pixel if it belongs to prevRect <-- No-op. // * Fully transparent pixel if it belongs to prevRect <-- No-op.
@ -407,39 +403,37 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
for (y = 0; y < iter.height; ++y) { for (y = 0; y < iter.height; ++y) {
const int canvas_y = iter.y_offset + y; const int canvas_y = iter.y_offset + y;
int left1, width1, left2, width2; int left1, width1, left2, width2;
FindBlendRangeAtRow(&iter, &dec->prev_iter, canvas_y, &left1, &width1, FindBlendRangeAtRow(&iter, &dec->prev_iter_, canvas_y, &left1, &width1,
&left2, &width2); &left2, &width2);
if (width1 > 0) { if (width1 > 0) {
const size_t offset1 = canvas_y * width + left1; const size_t offset1 = canvas_y * width + left1;
blend_row((uint32_t*)dec->curr_frame + offset1, blend_row((uint32_t*)dec->curr_frame_ + offset1,
(uint32_t*)dec->prev_frame_disposed + offset1, width1); (uint32_t*)dec->prev_frame_disposed_ + offset1, width1);
} }
if (width2 > 0) { if (width2 > 0) {
const size_t offset2 = canvas_y * width + left2; const size_t offset2 = canvas_y * width + left2;
blend_row((uint32_t*)dec->curr_frame + offset2, blend_row((uint32_t*)dec->curr_frame_ + offset2,
(uint32_t*)dec->prev_frame_disposed + offset2, width2); (uint32_t*)dec->prev_frame_disposed_ + offset2, width2);
} }
} }
} }
} }
// Update info of the previous frame and dispose it for the next iteration. // Update info of the previous frame and dispose it for the next iteration.
dec->prev_frame_timestamp = timestamp; dec->prev_frame_timestamp_ = timestamp;
WebPDemuxReleaseIterator(&dec->prev_iter); WebPDemuxReleaseIterator(&dec->prev_iter_);
dec->prev_iter = iter; dec->prev_iter_ = iter;
dec->prev_frame_was_keyframe = is_key_frame; dec->prev_frame_was_keyframe_ = is_key_frame;
if (!CopyCanvas(dec->curr_frame, dec->prev_frame_disposed, width, height)) { CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height);
goto Error; if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
ZeroFillFrameRect(dec->prev_frame_disposed_, width * NUM_CHANNELS,
dec->prev_iter_.x_offset, dec->prev_iter_.y_offset,
dec->prev_iter_.width, dec->prev_iter_.height);
} }
if (dec->prev_iter.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) { ++dec->next_frame_;
ZeroFillFrameRect(dec->prev_frame_disposed, width * NUM_CHANNELS,
dec->prev_iter.x_offset, dec->prev_iter.y_offset,
dec->prev_iter.width, dec->prev_iter.height);
}
++dec->next_frame;
// All OK, fill in the values. // All OK, fill in the values.
*buf_ptr = dec->curr_frame; *buf_ptr = dec->curr_frame_;
*timestamp_ptr = timestamp; *timestamp_ptr = timestamp;
return 1; return 1;
@ -450,30 +444,30 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) { int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) {
if (dec == NULL) return 0; if (dec == NULL) return 0;
return (dec->next_frame <= (int)dec->info.frame_count); return (dec->next_frame_ <= (int)dec->info_.frame_count);
} }
void WebPAnimDecoderReset(WebPAnimDecoder* dec) { void WebPAnimDecoderReset(WebPAnimDecoder* dec) {
if (dec != NULL) { if (dec != NULL) {
dec->prev_frame_timestamp = 0; dec->prev_frame_timestamp_ = 0;
WebPDemuxReleaseIterator(&dec->prev_iter); WebPDemuxReleaseIterator(&dec->prev_iter_);
memset(&dec->prev_iter, 0, sizeof(dec->prev_iter)); memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_));
dec->prev_frame_was_keyframe = 0; dec->prev_frame_was_keyframe_ = 0;
dec->next_frame = 1; dec->next_frame_ = 1;
} }
} }
const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) { const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) {
if (dec == NULL) return NULL; if (dec == NULL) return NULL;
return dec->demux; return dec->demux_;
} }
void WebPAnimDecoderDelete(WebPAnimDecoder* dec) { void WebPAnimDecoderDelete(WebPAnimDecoder* dec) {
if (dec != NULL) { if (dec != NULL) {
WebPDemuxReleaseIterator(&dec->prev_iter); WebPDemuxReleaseIterator(&dec->prev_iter_);
WebPDemuxDelete(dec->demux); WebPDemuxDelete(dec->demux_);
WebPSafeFree(dec->curr_frame); WebPSafeFree(dec->curr_frame_);
WebPSafeFree(dec->prev_frame_disposed); WebPSafeFree(dec->prev_frame_disposed_);
WebPSafeFree(dec); WebPSafeFree(dec);
} }
} }

View File

@ -24,53 +24,53 @@
#include "src/webp/format_constants.h" #include "src/webp/format_constants.h"
#define DMUX_MAJ_VERSION 1 #define DMUX_MAJ_VERSION 1
#define DMUX_MIN_VERSION 5 #define DMUX_MIN_VERSION 3
#define DMUX_REV_VERSION 0 #define DMUX_REV_VERSION 1
typedef struct { typedef struct {
size_t start; // start location of the data size_t start_; // start location of the data
size_t end; // end location size_t end_; // end location
size_t riff_end; // riff chunk end location, can be > end. size_t riff_end_; // riff chunk end location, can be > end_.
size_t buf_size; // size of the buffer size_t buf_size_; // size of the buffer
const uint8_t* buf; const uint8_t* buf_;
} MemBuffer; } MemBuffer;
typedef struct { typedef struct {
size_t offset; size_t offset_;
size_t size; size_t size_;
} ChunkData; } ChunkData;
typedef struct Frame { typedef struct Frame {
int x_offset, y_offset; int x_offset_, y_offset_;
int width, height; int width_, height_;
int has_alpha; int has_alpha_;
int duration; int duration_;
WebPMuxAnimDispose dispose_method; WebPMuxAnimDispose dispose_method_;
WebPMuxAnimBlend blend_method; WebPMuxAnimBlend blend_method_;
int frame_num; int frame_num_;
int complete; // img_components contains a full image. int complete_; // img_components_ contains a full image.
ChunkData img_components[2]; // 0=VP8{,L} 1=ALPH ChunkData img_components_[2]; // 0=VP8{,L} 1=ALPH
struct Frame* next; struct Frame* next_;
} Frame; } Frame;
typedef struct Chunk { typedef struct Chunk {
ChunkData data; ChunkData data_;
struct Chunk* next; struct Chunk* next_;
} Chunk; } Chunk;
struct WebPDemuxer { struct WebPDemuxer {
MemBuffer mem; MemBuffer mem_;
WebPDemuxState state; WebPDemuxState state_;
int is_ext_format; int is_ext_format_;
uint32_t feature_flags; uint32_t feature_flags_;
int canvas_width, canvas_height; int canvas_width_, canvas_height_;
int loop_count; int loop_count_;
uint32_t bgcolor; uint32_t bgcolor_;
int num_frames; int num_frames_;
Frame* frames; Frame* frames_;
Frame** frames_tail; Frame** frames_tail_;
Chunk* chunks; // non-image chunks Chunk* chunks_; // non-image chunks
Chunk** chunks_tail; Chunk** chunks_tail_;
}; };
typedef enum { typedef enum {
@ -108,10 +108,10 @@ int WebPGetDemuxVersion(void) {
static int RemapMemBuffer(MemBuffer* const mem, static int RemapMemBuffer(MemBuffer* const mem,
const uint8_t* data, size_t size) { const uint8_t* data, size_t size) {
if (size < mem->buf_size) return 0; // can't remap to a shorter buffer! if (size < mem->buf_size_) return 0; // can't remap to a shorter buffer!
mem->buf = data; mem->buf_ = data;
mem->end = mem->buf_size = size; mem->end_ = mem->buf_size_ = size;
return 1; return 1;
} }
@ -123,49 +123,49 @@ static int InitMemBuffer(MemBuffer* const mem,
// Return the remaining data size available in 'mem'. // Return the remaining data size available in 'mem'.
static WEBP_INLINE size_t MemDataSize(const MemBuffer* const mem) { static WEBP_INLINE size_t MemDataSize(const MemBuffer* const mem) {
return (mem->end - mem->start); return (mem->end_ - mem->start_);
} }
// Return true if 'size' exceeds the end of the RIFF chunk. // Return true if 'size' exceeds the end of the RIFF chunk.
static WEBP_INLINE int SizeIsInvalid(const MemBuffer* const mem, size_t size) { static WEBP_INLINE int SizeIsInvalid(const MemBuffer* const mem, size_t size) {
return (size > mem->riff_end - mem->start); return (size > mem->riff_end_ - mem->start_);
} }
static WEBP_INLINE void Skip(MemBuffer* const mem, size_t size) { static WEBP_INLINE void Skip(MemBuffer* const mem, size_t size) {
mem->start += size; mem->start_ += size;
} }
static WEBP_INLINE void Rewind(MemBuffer* const mem, size_t size) { static WEBP_INLINE void Rewind(MemBuffer* const mem, size_t size) {
mem->start -= size; mem->start_ -= size;
} }
static WEBP_INLINE const uint8_t* GetBuffer(MemBuffer* const mem) { static WEBP_INLINE const uint8_t* GetBuffer(MemBuffer* const mem) {
return mem->buf + mem->start; return mem->buf_ + mem->start_;
} }
// Read from 'mem' and skip the read bytes. // Read from 'mem' and skip the read bytes.
static WEBP_INLINE uint8_t ReadByte(MemBuffer* const mem) { static WEBP_INLINE uint8_t ReadByte(MemBuffer* const mem) {
const uint8_t byte = mem->buf[mem->start]; const uint8_t byte = mem->buf_[mem->start_];
Skip(mem, 1); Skip(mem, 1);
return byte; return byte;
} }
static WEBP_INLINE int ReadLE16s(MemBuffer* const mem) { static WEBP_INLINE int ReadLE16s(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start; const uint8_t* const data = mem->buf_ + mem->start_;
const int val = GetLE16(data); const int val = GetLE16(data);
Skip(mem, 2); Skip(mem, 2);
return val; return val;
} }
static WEBP_INLINE int ReadLE24s(MemBuffer* const mem) { static WEBP_INLINE int ReadLE24s(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start; const uint8_t* const data = mem->buf_ + mem->start_;
const int val = GetLE24(data); const int val = GetLE24(data);
Skip(mem, 3); Skip(mem, 3);
return val; return val;
} }
static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) { static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) {
const uint8_t* const data = mem->buf + mem->start; const uint8_t* const data = mem->buf_ + mem->start_;
const uint32_t val = GetLE32(data); const uint32_t val = GetLE32(data);
Skip(mem, 4); Skip(mem, 4);
return val; return val;
@ -175,20 +175,20 @@ static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) {
// Secondary chunk parsing // Secondary chunk parsing
static void AddChunk(WebPDemuxer* const dmux, Chunk* const chunk) { static void AddChunk(WebPDemuxer* const dmux, Chunk* const chunk) {
*dmux->chunks_tail = chunk; *dmux->chunks_tail_ = chunk;
chunk->next = NULL; chunk->next_ = NULL;
dmux->chunks_tail = &chunk->next; dmux->chunks_tail_ = &chunk->next_;
} }
// Add a frame to the end of the list, ensuring the last frame is complete. // Add a frame to the end of the list, ensuring the last frame is complete.
// Returns true on success, false otherwise. // Returns true on success, false otherwise.
static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) { static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) {
const Frame* const last_frame = *dmux->frames_tail; const Frame* const last_frame = *dmux->frames_tail_;
if (last_frame != NULL && !last_frame->complete) return 0; if (last_frame != NULL && !last_frame->complete_) return 0;
*dmux->frames_tail = frame; *dmux->frames_tail_ = frame;
frame->next = NULL; frame->next_ = NULL;
dmux->frames_tail = &frame->next; dmux->frames_tail_ = &frame->next_;
return 1; return 1;
} }
@ -196,13 +196,13 @@ static void SetFrameInfo(size_t start_offset, size_t size,
int frame_num, int complete, int frame_num, int complete,
const WebPBitstreamFeatures* const features, const WebPBitstreamFeatures* const features,
Frame* const frame) { Frame* const frame) {
frame->img_components[0].offset = start_offset; frame->img_components_[0].offset_ = start_offset;
frame->img_components[0].size = size; frame->img_components_[0].size_ = size;
frame->width = features->width; frame->width_ = features->width;
frame->height = features->height; frame->height_ = features->height;
frame->has_alpha |= features->has_alpha; frame->has_alpha_ |= features->has_alpha;
frame->frame_num = frame_num; frame->frame_num_ = frame_num;
frame->complete = complete; frame->complete_ = complete;
} }
// Store image bearing chunks to 'frame'. 'min_size' is an optional size // Store image bearing chunks to 'frame'. 'min_size' is an optional size
@ -218,7 +218,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
if (done) return PARSE_NEED_MORE_DATA; if (done) return PARSE_NEED_MORE_DATA;
do { do {
const size_t chunk_start_offset = mem->start; const size_t chunk_start_offset = mem->start_;
const uint32_t fourcc = ReadLE32(mem); const uint32_t fourcc = ReadLE32(mem);
const uint32_t payload_size = ReadLE32(mem); const uint32_t payload_size = ReadLE32(mem);
uint32_t payload_size_padded; uint32_t payload_size_padded;
@ -238,10 +238,10 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
case MKFOURCC('A', 'L', 'P', 'H'): case MKFOURCC('A', 'L', 'P', 'H'):
if (alpha_chunks == 0) { if (alpha_chunks == 0) {
++alpha_chunks; ++alpha_chunks;
frame->img_components[1].offset = chunk_start_offset; frame->img_components_[1].offset_ = chunk_start_offset;
frame->img_components[1].size = chunk_size; frame->img_components_[1].size_ = chunk_size;
frame->has_alpha = 1; frame->has_alpha_ = 1;
frame->frame_num = frame_num; frame->frame_num_ = frame_num;
Skip(mem, payload_available); Skip(mem, payload_available);
} else { } else {
goto Done; goto Done;
@ -256,7 +256,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
// is incomplete. // is incomplete.
WebPBitstreamFeatures features; WebPBitstreamFeatures features;
const VP8StatusCode vp8_status = const VP8StatusCode vp8_status =
WebPGetFeatures(mem->buf + chunk_start_offset, chunk_size, WebPGetFeatures(mem->buf_ + chunk_start_offset, chunk_size,
&features); &features);
if (status == PARSE_NEED_MORE_DATA && if (status == PARSE_NEED_MORE_DATA &&
vp8_status == VP8_STATUS_NOT_ENOUGH_DATA) { vp8_status == VP8_STATUS_NOT_ENOUGH_DATA) {
@ -281,7 +281,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
break; break;
} }
if (mem->start == mem->riff_end) { if (mem->start_ == mem->riff_end_) {
done = 1; done = 1;
} else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) { } else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) {
status = PARSE_NEED_MORE_DATA; status = PARSE_NEED_MORE_DATA;
@ -310,42 +310,42 @@ static ParseStatus NewFrame(const MemBuffer* const mem,
// 'frame_chunk_size' is the previously validated, padded chunk size. // 'frame_chunk_size' is the previously validated, padded chunk size.
static ParseStatus ParseAnimationFrame( static ParseStatus ParseAnimationFrame(
WebPDemuxer* const dmux, uint32_t frame_chunk_size) { WebPDemuxer* const dmux, uint32_t frame_chunk_size) {
const int is_animation = !!(dmux->feature_flags & ANIMATION_FLAG); const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const uint32_t anmf_payload_size = frame_chunk_size - ANMF_CHUNK_SIZE; const uint32_t anmf_payload_size = frame_chunk_size - ANMF_CHUNK_SIZE;
int added_frame = 0; int added_frame = 0;
int bits; int bits;
MemBuffer* const mem = &dmux->mem; MemBuffer* const mem = &dmux->mem_;
Frame* frame; Frame* frame;
size_t start_offset; size_t start_offset;
ParseStatus status = ParseStatus status =
NewFrame(mem, ANMF_CHUNK_SIZE, frame_chunk_size, &frame); NewFrame(mem, ANMF_CHUNK_SIZE, frame_chunk_size, &frame);
if (status != PARSE_OK) return status; if (status != PARSE_OK) return status;
frame->x_offset = 2 * ReadLE24s(mem); frame->x_offset_ = 2 * ReadLE24s(mem);
frame->y_offset = 2 * ReadLE24s(mem); frame->y_offset_ = 2 * ReadLE24s(mem);
frame->width = 1 + ReadLE24s(mem); frame->width_ = 1 + ReadLE24s(mem);
frame->height = 1 + ReadLE24s(mem); frame->height_ = 1 + ReadLE24s(mem);
frame->duration = ReadLE24s(mem); frame->duration_ = ReadLE24s(mem);
bits = ReadByte(mem); bits = ReadByte(mem);
frame->dispose_method = frame->dispose_method_ =
(bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE; (bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE;
frame->blend_method = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND; frame->blend_method_ = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND;
if (frame->width * (uint64_t)frame->height >= MAX_IMAGE_AREA) { if (frame->width_ * (uint64_t)frame->height_ >= MAX_IMAGE_AREA) {
WebPSafeFree(frame); WebPSafeFree(frame);
return PARSE_ERROR; return PARSE_ERROR;
} }
// Store a frame only if the animation flag is set there is some data for // Store a frame only if the animation flag is set there is some data for
// this frame is available. // this frame is available.
start_offset = mem->start; start_offset = mem->start_;
status = StoreFrame(dmux->num_frames + 1, anmf_payload_size, mem, frame); status = StoreFrame(dmux->num_frames_ + 1, anmf_payload_size, mem, frame);
if (status != PARSE_ERROR && mem->start - start_offset > anmf_payload_size) { if (status != PARSE_ERROR && mem->start_ - start_offset > anmf_payload_size) {
status = PARSE_ERROR; status = PARSE_ERROR;
} }
if (status != PARSE_ERROR && is_animation && frame->frame_num > 0) { if (status != PARSE_ERROR && is_animation && frame->frame_num_ > 0) {
added_frame = AddFrame(dmux, frame); added_frame = AddFrame(dmux, frame);
if (added_frame) { if (added_frame) {
++dmux->num_frames; ++dmux->num_frames_;
} else { } else {
status = PARSE_ERROR; status = PARSE_ERROR;
} }
@ -364,8 +364,8 @@ static int StoreChunk(WebPDemuxer* const dmux,
Chunk* const chunk = (Chunk*)WebPSafeCalloc(1ULL, sizeof(*chunk)); Chunk* const chunk = (Chunk*)WebPSafeCalloc(1ULL, sizeof(*chunk));
if (chunk == NULL) return 0; if (chunk == NULL) return 0;
chunk->data.offset = start_offset; chunk->data_.offset_ = start_offset;
chunk->data.size = size; chunk->data_.size_ = size;
AddChunk(dmux, chunk); AddChunk(dmux, chunk);
return 1; return 1;
} }
@ -389,9 +389,9 @@ static ParseStatus ReadHeader(MemBuffer* const mem) {
if (riff_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR; if (riff_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
// There's no point in reading past the end of the RIFF chunk // There's no point in reading past the end of the RIFF chunk
mem->riff_end = riff_size + CHUNK_HEADER_SIZE; mem->riff_end_ = riff_size + CHUNK_HEADER_SIZE;
if (mem->buf_size > mem->riff_end) { if (mem->buf_size_ > mem->riff_end_) {
mem->buf_size = mem->end = mem->riff_end; mem->buf_size_ = mem->end_ = mem->riff_end_;
} }
Skip(mem, RIFF_HEADER_SIZE); Skip(mem, RIFF_HEADER_SIZE);
@ -400,12 +400,12 @@ static ParseStatus ReadHeader(MemBuffer* const mem) {
static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) { static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
const size_t min_size = CHUNK_HEADER_SIZE; const size_t min_size = CHUNK_HEADER_SIZE;
MemBuffer* const mem = &dmux->mem; MemBuffer* const mem = &dmux->mem_;
Frame* frame; Frame* frame;
ParseStatus status; ParseStatus status;
int image_added = 0; int image_added = 0;
if (dmux->frames != NULL) return PARSE_ERROR; if (dmux->frames_ != NULL) return PARSE_ERROR;
if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR; if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR;
if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA; if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA;
@ -414,29 +414,29 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
// For the single image case we allow parsing of a partial frame, so no // For the single image case we allow parsing of a partial frame, so no
// minimum size is imposed here. // minimum size is imposed here.
status = StoreFrame(1, 0, &dmux->mem, frame); status = StoreFrame(1, 0, &dmux->mem_, frame);
if (status != PARSE_ERROR) { if (status != PARSE_ERROR) {
const int has_alpha = !!(dmux->feature_flags & ALPHA_FLAG); const int has_alpha = !!(dmux->feature_flags_ & ALPHA_FLAG);
// Clear any alpha when the alpha flag is missing. // Clear any alpha when the alpha flag is missing.
if (!has_alpha && frame->img_components[1].size > 0) { if (!has_alpha && frame->img_components_[1].size_ > 0) {
frame->img_components[1].offset = 0; frame->img_components_[1].offset_ = 0;
frame->img_components[1].size = 0; frame->img_components_[1].size_ = 0;
frame->has_alpha = 0; frame->has_alpha_ = 0;
} }
// Use the frame width/height as the canvas values for non-vp8x files. // Use the frame width/height as the canvas values for non-vp8x files.
// Also, set ALPHA_FLAG if this is a lossless image with alpha. // Also, set ALPHA_FLAG if this is a lossless image with alpha.
if (!dmux->is_ext_format && frame->width > 0 && frame->height > 0) { if (!dmux->is_ext_format_ && frame->width_ > 0 && frame->height_ > 0) {
dmux->state = WEBP_DEMUX_PARSED_HEADER; dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
dmux->canvas_width = frame->width; dmux->canvas_width_ = frame->width_;
dmux->canvas_height = frame->height; dmux->canvas_height_ = frame->height_;
dmux->feature_flags |= frame->has_alpha ? ALPHA_FLAG : 0; dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0;
} }
if (!AddFrame(dmux, frame)) { if (!AddFrame(dmux, frame)) {
status = PARSE_ERROR; // last frame was left incomplete status = PARSE_ERROR; // last frame was left incomplete
} else { } else {
image_added = 1; image_added = 1;
dmux->num_frames = 1; dmux->num_frames_ = 1;
} }
} }
@ -445,14 +445,14 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
} }
static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) { static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
const int is_animation = !!(dmux->feature_flags & ANIMATION_FLAG); const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
MemBuffer* const mem = &dmux->mem; MemBuffer* const mem = &dmux->mem_;
int anim_chunks = 0; int anim_chunks = 0;
ParseStatus status = PARSE_OK; ParseStatus status = PARSE_OK;
do { do {
int store_chunk = 1; int store_chunk = 1;
const size_t chunk_start_offset = mem->start; const size_t chunk_start_offset = mem->start_;
const uint32_t fourcc = ReadLE32(mem); const uint32_t fourcc = ReadLE32(mem);
const uint32_t chunk_size = ReadLE32(mem); const uint32_t chunk_size = ReadLE32(mem);
uint32_t chunk_size_padded; uint32_t chunk_size_padded;
@ -483,8 +483,8 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
status = PARSE_NEED_MORE_DATA; status = PARSE_NEED_MORE_DATA;
} else if (anim_chunks == 0) { } else if (anim_chunks == 0) {
++anim_chunks; ++anim_chunks;
dmux->bgcolor = ReadLE32(mem); dmux->bgcolor_ = ReadLE32(mem);
dmux->loop_count = ReadLE16s(mem); dmux->loop_count_ = ReadLE16s(mem);
Skip(mem, chunk_size_padded - ANIM_CHUNK_SIZE); Skip(mem, chunk_size_padded - ANIM_CHUNK_SIZE);
} else { } else {
store_chunk = 0; store_chunk = 0;
@ -498,15 +498,15 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
break; break;
} }
case MKFOURCC('I', 'C', 'C', 'P'): { case MKFOURCC('I', 'C', 'C', 'P'): {
store_chunk = !!(dmux->feature_flags & ICCP_FLAG); store_chunk = !!(dmux->feature_flags_ & ICCP_FLAG);
goto Skip; goto Skip;
} }
case MKFOURCC('E', 'X', 'I', 'F'): { case MKFOURCC('E', 'X', 'I', 'F'): {
store_chunk = !!(dmux->feature_flags & EXIF_FLAG); store_chunk = !!(dmux->feature_flags_ & EXIF_FLAG);
goto Skip; goto Skip;
} }
case MKFOURCC('X', 'M', 'P', ' '): { case MKFOURCC('X', 'M', 'P', ' '): {
store_chunk = !!(dmux->feature_flags & XMP_FLAG); store_chunk = !!(dmux->feature_flags_ & XMP_FLAG);
goto Skip; goto Skip;
} }
Skip: Skip:
@ -527,7 +527,7 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
} }
} }
if (mem->start == mem->riff_end) { if (mem->start_ == mem->riff_end_) {
break; break;
} else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) { } else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) {
status = PARSE_NEED_MORE_DATA; status = PARSE_NEED_MORE_DATA;
@ -538,12 +538,12 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
} }
static ParseStatus ParseVP8X(WebPDemuxer* const dmux) { static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
MemBuffer* const mem = &dmux->mem; MemBuffer* const mem = &dmux->mem_;
uint32_t vp8x_size; uint32_t vp8x_size;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA; if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
dmux->is_ext_format = 1; dmux->is_ext_format_ = 1;
Skip(mem, TAG_SIZE); // VP8X Skip(mem, TAG_SIZE); // VP8X
vp8x_size = ReadLE32(mem); vp8x_size = ReadLE32(mem);
if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR; if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
@ -552,15 +552,15 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR; if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR;
if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA; if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA;
dmux->feature_flags = ReadByte(mem); dmux->feature_flags_ = ReadByte(mem);
Skip(mem, 3); // Reserved. Skip(mem, 3); // Reserved.
dmux->canvas_width = 1 + ReadLE24s(mem); dmux->canvas_width_ = 1 + ReadLE24s(mem);
dmux->canvas_height = 1 + ReadLE24s(mem); dmux->canvas_height_ = 1 + ReadLE24s(mem);
if (dmux->canvas_width * (uint64_t)dmux->canvas_height >= MAX_IMAGE_AREA) { if (dmux->canvas_width_ * (uint64_t)dmux->canvas_height_ >= MAX_IMAGE_AREA) {
return PARSE_ERROR; // image final dimension is too large return PARSE_ERROR; // image final dimension is too large
} }
Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data. Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data.
dmux->state = WEBP_DEMUX_PARSED_HEADER; dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR; if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA; if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
@ -572,13 +572,13 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
// Format validation // Format validation
static int IsValidSimpleFormat(const WebPDemuxer* const dmux) { static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
const Frame* const frame = dmux->frames; const Frame* const frame = dmux->frames_;
if (dmux->state == WEBP_DEMUX_PARSING_HEADER) return 1; if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
if (dmux->canvas_width <= 0 || dmux->canvas_height <= 0) return 0; if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
if (dmux->state == WEBP_DEMUX_DONE && frame == NULL) return 0; if (dmux->state_ == WEBP_DEMUX_DONE && frame == NULL) return 0;
if (frame->width <= 0 || frame->height <= 0) return 0; if (frame->width_ <= 0 || frame->height_ <= 0) return 0;
return 1; return 1;
} }
@ -587,65 +587,65 @@ static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
static int CheckFrameBounds(const Frame* const frame, int exact, static int CheckFrameBounds(const Frame* const frame, int exact,
int canvas_width, int canvas_height) { int canvas_width, int canvas_height) {
if (exact) { if (exact) {
if (frame->x_offset != 0 || frame->y_offset != 0) { if (frame->x_offset_ != 0 || frame->y_offset_ != 0) {
return 0; return 0;
} }
if (frame->width != canvas_width || frame->height != canvas_height) { if (frame->width_ != canvas_width || frame->height_ != canvas_height) {
return 0; return 0;
} }
} else { } else {
if (frame->x_offset < 0 || frame->y_offset < 0) return 0; if (frame->x_offset_ < 0 || frame->y_offset_ < 0) return 0;
if (frame->width + frame->x_offset > canvas_width) return 0; if (frame->width_ + frame->x_offset_ > canvas_width) return 0;
if (frame->height + frame->y_offset > canvas_height) return 0; if (frame->height_ + frame->y_offset_ > canvas_height) return 0;
} }
return 1; return 1;
} }
static int IsValidExtendedFormat(const WebPDemuxer* const dmux) { static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
const int is_animation = !!(dmux->feature_flags & ANIMATION_FLAG); const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const Frame* f = dmux->frames; const Frame* f = dmux->frames_;
if (dmux->state == WEBP_DEMUX_PARSING_HEADER) return 1; if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
if (dmux->canvas_width <= 0 || dmux->canvas_height <= 0) return 0; if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
if (dmux->loop_count < 0) return 0; if (dmux->loop_count_ < 0) return 0;
if (dmux->state == WEBP_DEMUX_DONE && dmux->frames == NULL) return 0; if (dmux->state_ == WEBP_DEMUX_DONE && dmux->frames_ == NULL) return 0;
if (dmux->feature_flags & ~ALL_VALID_FLAGS) return 0; // invalid bitstream if (dmux->feature_flags_ & ~ALL_VALID_FLAGS) return 0; // invalid bitstream
while (f != NULL) { while (f != NULL) {
const int cur_frame_set = f->frame_num; const int cur_frame_set = f->frame_num_;
// Check frame properties. // Check frame properties.
for (; f != NULL && f->frame_num == cur_frame_set; f = f->next) { for (; f != NULL && f->frame_num_ == cur_frame_set; f = f->next_) {
const ChunkData* const image = f->img_components; const ChunkData* const image = f->img_components_;
const ChunkData* const alpha = f->img_components + 1; const ChunkData* const alpha = f->img_components_ + 1;
if (!is_animation && f->frame_num > 1) return 0; if (!is_animation && f->frame_num_ > 1) return 0;
if (f->complete) { if (f->complete_) {
if (alpha->size == 0 && image->size == 0) return 0; if (alpha->size_ == 0 && image->size_ == 0) return 0;
// Ensure alpha precedes image bitstream. // Ensure alpha precedes image bitstream.
if (alpha->size > 0 && alpha->offset > image->offset) { if (alpha->size_ > 0 && alpha->offset_ > image->offset_) {
return 0; return 0;
} }
if (f->width <= 0 || f->height <= 0) return 0; if (f->width_ <= 0 || f->height_ <= 0) return 0;
} else { } else {
// There shouldn't be a partial frame in a complete file. // There shouldn't be a partial frame in a complete file.
if (dmux->state == WEBP_DEMUX_DONE) return 0; if (dmux->state_ == WEBP_DEMUX_DONE) return 0;
// Ensure alpha precedes image bitstream. // Ensure alpha precedes image bitstream.
if (alpha->size > 0 && image->size > 0 && if (alpha->size_ > 0 && image->size_ > 0 &&
alpha->offset > image->offset) { alpha->offset_ > image->offset_) {
return 0; return 0;
} }
// There shouldn't be any frames after an incomplete one. // There shouldn't be any frames after an incomplete one.
if (f->next != NULL) return 0; if (f->next_ != NULL) return 0;
} }
if (f->width > 0 && f->height > 0 && if (f->width_ > 0 && f->height_ > 0 &&
!CheckFrameBounds(f, !is_animation, !CheckFrameBounds(f, !is_animation,
dmux->canvas_width, dmux->canvas_height)) { dmux->canvas_width_, dmux->canvas_height_)) {
return 0; return 0;
} }
} }
@ -657,21 +657,21 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
// WebPDemuxer object // WebPDemuxer object
static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) { static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) {
dmux->state = WEBP_DEMUX_PARSING_HEADER; dmux->state_ = WEBP_DEMUX_PARSING_HEADER;
dmux->loop_count = 1; dmux->loop_count_ = 1;
dmux->bgcolor = 0xFFFFFFFF; // White background by default. dmux->bgcolor_ = 0xFFFFFFFF; // White background by default.
dmux->canvas_width = -1; dmux->canvas_width_ = -1;
dmux->canvas_height = -1; dmux->canvas_height_ = -1;
dmux->frames_tail = &dmux->frames; dmux->frames_tail_ = &dmux->frames_;
dmux->chunks_tail = &dmux->chunks; dmux->chunks_tail_ = &dmux->chunks_;
dmux->mem = *mem; dmux->mem_ = *mem;
} }
static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem, static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem,
WebPDemuxer** demuxer) { WebPDemuxer** demuxer) {
WebPBitstreamFeatures features; WebPBitstreamFeatures features;
const VP8StatusCode status = const VP8StatusCode status =
WebPGetFeatures(mem->buf, mem->buf_size, &features); WebPGetFeatures(mem->buf_, mem->buf_size_, &features);
*demuxer = NULL; *demuxer = NULL;
if (status != VP8_STATUS_OK) { if (status != VP8_STATUS_OK) {
return (status == VP8_STATUS_NOT_ENOUGH_DATA) ? PARSE_NEED_MORE_DATA return (status == VP8_STATUS_NOT_ENOUGH_DATA) ? PARSE_NEED_MORE_DATA
@ -683,14 +683,14 @@ static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem,
Frame* const frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(*frame)); Frame* const frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(*frame));
if (dmux == NULL || frame == NULL) goto Error; if (dmux == NULL || frame == NULL) goto Error;
InitDemux(dmux, mem); InitDemux(dmux, mem);
SetFrameInfo(0, mem->buf_size, 1 /*frame_num*/, 1 /*complete*/, &features, SetFrameInfo(0, mem->buf_size_, 1 /*frame_num*/, 1 /*complete*/, &features,
frame); frame);
if (!AddFrame(dmux, frame)) goto Error; if (!AddFrame(dmux, frame)) goto Error;
dmux->state = WEBP_DEMUX_DONE; dmux->state_ = WEBP_DEMUX_DONE;
dmux->canvas_width = frame->width; dmux->canvas_width_ = frame->width_;
dmux->canvas_height = frame->height; dmux->canvas_height_ = frame->height_;
dmux->feature_flags |= frame->has_alpha ? ALPHA_FLAG : 0; dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0;
dmux->num_frames = 1; dmux->num_frames_ = 1;
assert(IsValidSimpleFormat(dmux)); assert(IsValidSimpleFormat(dmux));
*demuxer = dmux; *demuxer = dmux;
return PARSE_OK; return PARSE_OK;
@ -734,7 +734,7 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
return NULL; return NULL;
} }
partial = (mem.buf_size < mem.riff_end); partial = (mem.buf_size_ < mem.riff_end_);
if (!allow_partial && partial) return NULL; if (!allow_partial && partial) return NULL;
dmux = (WebPDemuxer*)WebPSafeCalloc(1ULL, sizeof(*dmux)); dmux = (WebPDemuxer*)WebPSafeCalloc(1ULL, sizeof(*dmux));
@ -743,16 +743,16 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
status = PARSE_ERROR; status = PARSE_ERROR;
for (parser = kMasterChunks; parser->parse != NULL; ++parser) { for (parser = kMasterChunks; parser->parse != NULL; ++parser) {
if (!memcmp(parser->id, GetBuffer(&dmux->mem), TAG_SIZE)) { if (!memcmp(parser->id, GetBuffer(&dmux->mem_), TAG_SIZE)) {
status = parser->parse(dmux); status = parser->parse(dmux);
if (status == PARSE_OK) dmux->state = WEBP_DEMUX_DONE; if (status == PARSE_OK) dmux->state_ = WEBP_DEMUX_DONE;
if (status == PARSE_NEED_MORE_DATA && !partial) status = PARSE_ERROR; if (status == PARSE_NEED_MORE_DATA && !partial) status = PARSE_ERROR;
if (status != PARSE_ERROR && !parser->valid(dmux)) status = PARSE_ERROR; if (status != PARSE_ERROR && !parser->valid(dmux)) status = PARSE_ERROR;
if (status == PARSE_ERROR) dmux->state = WEBP_DEMUX_PARSE_ERROR; if (status == PARSE_ERROR) dmux->state_ = WEBP_DEMUX_PARSE_ERROR;
break; break;
} }
} }
if (state != NULL) *state = dmux->state; if (state != NULL) *state = dmux->state_;
if (status == PARSE_ERROR) { if (status == PARSE_ERROR) {
WebPDemuxDelete(dmux); WebPDemuxDelete(dmux);
@ -766,14 +766,14 @@ void WebPDemuxDelete(WebPDemuxer* dmux) {
Frame* f; Frame* f;
if (dmux == NULL) return; if (dmux == NULL) return;
for (f = dmux->frames; f != NULL;) { for (f = dmux->frames_; f != NULL;) {
Frame* const cur_frame = f; Frame* const cur_frame = f;
f = f->next; f = f->next_;
WebPSafeFree(cur_frame); WebPSafeFree(cur_frame);
} }
for (c = dmux->chunks; c != NULL;) { for (c = dmux->chunks_; c != NULL;) {
Chunk* const cur_chunk = c; Chunk* const cur_chunk = c;
c = c->next; c = c->next_;
WebPSafeFree(cur_chunk); WebPSafeFree(cur_chunk);
} }
WebPSafeFree(dmux); WebPSafeFree(dmux);
@ -785,12 +785,12 @@ uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) {
if (dmux == NULL) return 0; if (dmux == NULL) return 0;
switch (feature) { switch (feature) {
case WEBP_FF_FORMAT_FLAGS: return dmux->feature_flags; case WEBP_FF_FORMAT_FLAGS: return dmux->feature_flags_;
case WEBP_FF_CANVAS_WIDTH: return (uint32_t)dmux->canvas_width; case WEBP_FF_CANVAS_WIDTH: return (uint32_t)dmux->canvas_width_;
case WEBP_FF_CANVAS_HEIGHT: return (uint32_t)dmux->canvas_height; case WEBP_FF_CANVAS_HEIGHT: return (uint32_t)dmux->canvas_height_;
case WEBP_FF_LOOP_COUNT: return (uint32_t)dmux->loop_count; case WEBP_FF_LOOP_COUNT: return (uint32_t)dmux->loop_count_;
case WEBP_FF_BACKGROUND_COLOR: return dmux->bgcolor; case WEBP_FF_BACKGROUND_COLOR: return dmux->bgcolor_;
case WEBP_FF_FRAME_COUNT: return (uint32_t)dmux->num_frames; case WEBP_FF_FRAME_COUNT: return (uint32_t)dmux->num_frames_;
} }
return 0; return 0;
} }
@ -800,8 +800,8 @@ uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) {
static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) { static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) {
const Frame* f; const Frame* f;
for (f = dmux->frames; f != NULL; f = f->next) { for (f = dmux->frames_; f != NULL; f = f->next_) {
if (frame_num == f->frame_num) break; if (frame_num == f->frame_num_) break;
} }
return f; return f;
} }
@ -811,19 +811,19 @@ static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
size_t* const data_size) { size_t* const data_size) {
*data_size = 0; *data_size = 0;
if (frame != NULL) { if (frame != NULL) {
const ChunkData* const image = frame->img_components; const ChunkData* const image = frame->img_components_;
const ChunkData* const alpha = frame->img_components + 1; const ChunkData* const alpha = frame->img_components_ + 1;
size_t start_offset = image->offset; size_t start_offset = image->offset_;
*data_size = image->size; *data_size = image->size_;
// if alpha exists it precedes image, update the size allowing for // if alpha exists it precedes image, update the size allowing for
// intervening chunks. // intervening chunks.
if (alpha->size > 0) { if (alpha->size_ > 0) {
const size_t inter_size = (image->offset > 0) const size_t inter_size = (image->offset_ > 0)
? image->offset - (alpha->offset + alpha->size) ? image->offset_ - (alpha->offset_ + alpha->size_)
: 0; : 0;
start_offset = alpha->offset; start_offset = alpha->offset_;
*data_size += alpha->size + inter_size; *data_size += alpha->size_ + inter_size;
} }
return mem_buf + start_offset; return mem_buf + start_offset;
} }
@ -834,23 +834,23 @@ static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
static int SynthesizeFrame(const WebPDemuxer* const dmux, static int SynthesizeFrame(const WebPDemuxer* const dmux,
const Frame* const frame, const Frame* const frame,
WebPIterator* const iter) { WebPIterator* const iter) {
const uint8_t* const mem_buf = dmux->mem.buf; const uint8_t* const mem_buf = dmux->mem_.buf_;
size_t payload_size = 0; size_t payload_size = 0;
const uint8_t* const payload = GetFramePayload(mem_buf, frame, &payload_size); const uint8_t* const payload = GetFramePayload(mem_buf, frame, &payload_size);
if (payload == NULL) return 0; if (payload == NULL) return 0;
assert(frame != NULL); assert(frame != NULL);
iter->frame_num = frame->frame_num; iter->frame_num = frame->frame_num_;
iter->num_frames = dmux->num_frames; iter->num_frames = dmux->num_frames_;
iter->x_offset = frame->x_offset; iter->x_offset = frame->x_offset_;
iter->y_offset = frame->y_offset; iter->y_offset = frame->y_offset_;
iter->width = frame->width; iter->width = frame->width_;
iter->height = frame->height; iter->height = frame->height_;
iter->has_alpha = frame->has_alpha; iter->has_alpha = frame->has_alpha_;
iter->duration = frame->duration; iter->duration = frame->duration_;
iter->dispose_method = frame->dispose_method; iter->dispose_method = frame->dispose_method_;
iter->blend_method = frame->blend_method; iter->blend_method = frame->blend_method_;
iter->complete = frame->complete; iter->complete = frame->complete_;
iter->fragment.bytes = payload; iter->fragment.bytes = payload;
iter->fragment.size = payload_size; iter->fragment.size = payload_size;
return 1; return 1;
@ -860,8 +860,8 @@ static int SetFrame(int frame_num, WebPIterator* const iter) {
const Frame* frame; const Frame* frame;
const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_; const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_;
if (dmux == NULL || frame_num < 0) return 0; if (dmux == NULL || frame_num < 0) return 0;
if (frame_num > dmux->num_frames) return 0; if (frame_num > dmux->num_frames_) return 0;
if (frame_num == 0) frame_num = dmux->num_frames; if (frame_num == 0) frame_num = dmux->num_frames_;
frame = GetFrame(dmux, frame_num); frame = GetFrame(dmux, frame_num);
if (frame == NULL) return 0; if (frame == NULL) return 0;
@ -896,11 +896,11 @@ void WebPDemuxReleaseIterator(WebPIterator* iter) {
// Chunk iteration // Chunk iteration
static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) { static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) {
const uint8_t* const mem_buf = dmux->mem.buf; const uint8_t* const mem_buf = dmux->mem_.buf_;
const Chunk* c; const Chunk* c;
int count = 0; int count = 0;
for (c = dmux->chunks; c != NULL; c = c->next) { for (c = dmux->chunks_; c != NULL; c = c->next_) {
const uint8_t* const header = mem_buf + c->data.offset; const uint8_t* const header = mem_buf + c->data_.offset_;
if (!memcmp(header, fourcc, TAG_SIZE)) ++count; if (!memcmp(header, fourcc, TAG_SIZE)) ++count;
} }
return count; return count;
@ -908,11 +908,11 @@ static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) {
static const Chunk* GetChunk(const WebPDemuxer* const dmux, static const Chunk* GetChunk(const WebPDemuxer* const dmux,
const char fourcc[4], int chunk_num) { const char fourcc[4], int chunk_num) {
const uint8_t* const mem_buf = dmux->mem.buf; const uint8_t* const mem_buf = dmux->mem_.buf_;
const Chunk* c; const Chunk* c;
int count = 0; int count = 0;
for (c = dmux->chunks; c != NULL; c = c->next) { for (c = dmux->chunks_; c != NULL; c = c->next_) {
const uint8_t* const header = mem_buf + c->data.offset; const uint8_t* const header = mem_buf + c->data_.offset_;
if (!memcmp(header, fourcc, TAG_SIZE)) ++count; if (!memcmp(header, fourcc, TAG_SIZE)) ++count;
if (count == chunk_num) break; if (count == chunk_num) break;
} }
@ -930,10 +930,10 @@ static int SetChunk(const char fourcc[4], int chunk_num,
if (chunk_num == 0) chunk_num = count; if (chunk_num == 0) chunk_num = count;
if (chunk_num <= count) { if (chunk_num <= count) {
const uint8_t* const mem_buf = dmux->mem.buf; const uint8_t* const mem_buf = dmux->mem_.buf_;
const Chunk* const chunk = GetChunk(dmux, fourcc, chunk_num); const Chunk* const chunk = GetChunk(dmux, fourcc, chunk_num);
iter->chunk.bytes = mem_buf + chunk->data.offset + CHUNK_HEADER_SIZE; iter->chunk.bytes = mem_buf + chunk->data_.offset_ + CHUNK_HEADER_SIZE;
iter->chunk.size = chunk->data.size - CHUNK_HEADER_SIZE; iter->chunk.size = chunk->data_.size_ - CHUNK_HEADER_SIZE;
iter->num_chunks = count; iter->num_chunks = count;
iter->chunk_num = chunk_num; iter->chunk_num = chunk_num;
return 1; return 1;

View File

@ -6,8 +6,8 @@
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
VS_VERSION_INFO VERSIONINFO VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,5,0 FILEVERSION 1,0,3,1
PRODUCTVERSION 1,0,5,0 PRODUCTVERSION 1,0,3,1
FILEFLAGSMASK 0x3fL FILEFLAGSMASK 0x3fL
#ifdef _DEBUG #ifdef _DEBUG
FILEFLAGS 0x1L FILEFLAGS 0x1L
@ -24,12 +24,12 @@ BEGIN
BEGIN BEGIN
VALUE "CompanyName", "Google, Inc." VALUE "CompanyName", "Google, Inc."
VALUE "FileDescription", "libwebpdemux DLL" VALUE "FileDescription", "libwebpdemux DLL"
VALUE "FileVersion", "1.5.0" VALUE "FileVersion", "1.3.1"
VALUE "InternalName", "libwebpdemux.dll" VALUE "InternalName", "libwebpdemux.dll"
VALUE "LegalCopyright", "Copyright (C) 2024" VALUE "LegalCopyright", "Copyright (C) 2023"
VALUE "OriginalFilename", "libwebpdemux.dll" VALUE "OriginalFilename", "libwebpdemux.dll"
VALUE "ProductName", "WebP Image Demuxer" VALUE "ProductName", "WebP Image Demuxer"
VALUE "ProductVersion", "1.5.0" VALUE "ProductVersion", "1.3.1"
END END
END END
BLOCK "VarFileInfo" BLOCK "VarFileInfo"

View File

@ -5,8 +5,6 @@ noinst_LTLIBRARIES += libwebpdsp_sse2.la
noinst_LTLIBRARIES += libwebpdspdecode_sse2.la noinst_LTLIBRARIES += libwebpdspdecode_sse2.la
noinst_LTLIBRARIES += libwebpdsp_sse41.la noinst_LTLIBRARIES += libwebpdsp_sse41.la
noinst_LTLIBRARIES += libwebpdspdecode_sse41.la noinst_LTLIBRARIES += libwebpdspdecode_sse41.la
noinst_LTLIBRARIES += libwebpdsp_avx2.la
noinst_LTLIBRARIES += libwebpdspdecode_avx2.la
noinst_LTLIBRARIES += libwebpdsp_neon.la noinst_LTLIBRARIES += libwebpdsp_neon.la
noinst_LTLIBRARIES += libwebpdspdecode_neon.la noinst_LTLIBRARIES += libwebpdspdecode_neon.la
noinst_LTLIBRARIES += libwebpdsp_msa.la noinst_LTLIBRARIES += libwebpdsp_msa.la
@ -46,11 +44,6 @@ ENC_SOURCES += lossless_enc.c
ENC_SOURCES += quant.h ENC_SOURCES += quant.h
ENC_SOURCES += ssim.c ENC_SOURCES += ssim.c
libwebpdspdecode_avx2_la_SOURCES =
libwebpdspdecode_avx2_la_SOURCES += lossless_avx2.c
libwebpdspdecode_avx2_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdspdecode_avx2_la_CFLAGS = $(AM_CFLAGS) $(AVX2_FLAGS)
libwebpdspdecode_sse41_la_SOURCES = libwebpdspdecode_sse41_la_SOURCES =
libwebpdspdecode_sse41_la_SOURCES += alpha_processing_sse41.c libwebpdspdecode_sse41_la_SOURCES += alpha_processing_sse41.c
libwebpdspdecode_sse41_la_SOURCES += dec_sse41.c libwebpdspdecode_sse41_la_SOURCES += dec_sse41.c
@ -130,12 +123,6 @@ libwebpdsp_sse41_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_sse41_la_CFLAGS = $(AM_CFLAGS) $(SSE41_FLAGS) libwebpdsp_sse41_la_CFLAGS = $(AM_CFLAGS) $(SSE41_FLAGS)
libwebpdsp_sse41_la_LIBADD = libwebpdspdecode_sse41.la libwebpdsp_sse41_la_LIBADD = libwebpdspdecode_sse41.la
libwebpdsp_avx2_la_SOURCES =
libwebpdsp_avx2_la_SOURCES += lossless_enc_avx2.c
libwebpdsp_avx2_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_avx2_la_CFLAGS = $(AM_CFLAGS) $(AVX2_FLAGS)
libwebpdsp_avx2_la_LIBADD = libwebpdspdecode_avx2.la
libwebpdsp_neon_la_SOURCES = libwebpdsp_neon_la_SOURCES =
libwebpdsp_neon_la_SOURCES += cost_neon.c libwebpdsp_neon_la_SOURCES += cost_neon.c
libwebpdsp_neon_la_SOURCES += enc_neon.c libwebpdsp_neon_la_SOURCES += enc_neon.c
@ -180,7 +167,6 @@ libwebpdsp_la_LDFLAGS = -lm
libwebpdsp_la_LIBADD = libwebpdsp_la_LIBADD =
libwebpdsp_la_LIBADD += libwebpdsp_sse2.la libwebpdsp_la_LIBADD += libwebpdsp_sse2.la
libwebpdsp_la_LIBADD += libwebpdsp_sse41.la libwebpdsp_la_LIBADD += libwebpdsp_sse41.la
libwebpdsp_la_LIBADD += libwebpdsp_avx2.la
libwebpdsp_la_LIBADD += libwebpdsp_neon.la libwebpdsp_la_LIBADD += libwebpdsp_neon.la
libwebpdsp_la_LIBADD += libwebpdsp_msa.la libwebpdsp_la_LIBADD += libwebpdsp_msa.la
libwebpdsp_la_LIBADD += libwebpdsp_mips32.la libwebpdsp_la_LIBADD += libwebpdsp_mips32.la
@ -194,7 +180,6 @@ if BUILD_LIBWEBPDECODER
libwebpdspdecode_la_LIBADD = libwebpdspdecode_la_LIBADD =
libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse2.la libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse2.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse41.la libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse41.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_avx2.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_neon.la libwebpdspdecode_la_LIBADD += libwebpdspdecode_neon.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_msa.la libwebpdspdecode_la_LIBADD += libwebpdspdecode_msa.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_mips32.la libwebpdspdecode_la_LIBADD += libwebpdspdecode_mips32.la

View File

@ -16,8 +16,6 @@
#if defined(WEBP_USE_SSE2) #if defined(WEBP_USE_SSE2)
#include <emmintrin.h> #include <emmintrin.h>
#include "src/dsp/cpu.h"
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha, static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
@ -28,44 +26,38 @@ static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
uint32_t alpha_and = 0xff; uint32_t alpha_and = 0xff;
int i, j; int i, j;
const __m128i zero = _mm_setzero_si128(); const __m128i zero = _mm_setzero_si128();
const __m128i alpha_mask = _mm_set1_epi32((int)0xff); // to preserve A const __m128i rgb_mask = _mm_set1_epi32((int)0xffffff00); // to preserve RGB
const __m128i all_0xff = _mm_set1_epi8(0xff); const __m128i all_0xff = _mm_set_epi32(0, 0, ~0, ~0);
__m128i all_alphas16 = all_0xff; __m128i all_alphas = all_0xff;
__m128i all_alphas8 = all_0xff;
// We must be able to access 3 extra bytes after the last written byte // We must be able to access 3 extra bytes after the last written byte
// 'dst[4 * width - 4]', because we don't know if alpha is the first or the // 'dst[4 * width - 4]', because we don't know if alpha is the first or the
// last byte of the quadruplet. // last byte of the quadruplet.
const int limit = (width - 1) & ~7;
for (j = 0; j < height; ++j) { for (j = 0; j < height; ++j) {
char* ptr = (char*)dst; __m128i* out = (__m128i*)dst;
for (i = 0; i + 16 <= width - 1; i += 16) { for (i = 0; i < limit; i += 8) {
// load 16 alpha bytes
const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]);
const __m128i a1_lo = _mm_unpacklo_epi8(a0, zero);
const __m128i a1_hi = _mm_unpackhi_epi8(a0, zero);
const __m128i a2_lo_lo = _mm_unpacklo_epi16(a1_lo, zero);
const __m128i a2_lo_hi = _mm_unpackhi_epi16(a1_lo, zero);
const __m128i a2_hi_lo = _mm_unpacklo_epi16(a1_hi, zero);
const __m128i a2_hi_hi = _mm_unpackhi_epi16(a1_hi, zero);
_mm_maskmoveu_si128(a2_lo_lo, alpha_mask, ptr + 0);
_mm_maskmoveu_si128(a2_lo_hi, alpha_mask, ptr + 16);
_mm_maskmoveu_si128(a2_hi_lo, alpha_mask, ptr + 32);
_mm_maskmoveu_si128(a2_hi_hi, alpha_mask, ptr + 48);
// accumulate 16 alpha 'and' in parallel
all_alphas16 = _mm_and_si128(all_alphas16, a0);
ptr += 64;
}
if (i + 8 <= width - 1) {
// load 8 alpha bytes // load 8 alpha bytes
const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]); const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]);
const __m128i a1 = _mm_unpacklo_epi8(a0, zero); const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero); const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);
const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero); const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);
_mm_maskmoveu_si128(a2_lo, alpha_mask, ptr); // load 8 dst pixels (32 bytes)
_mm_maskmoveu_si128(a2_hi, alpha_mask, ptr + 16); const __m128i b0_lo = _mm_loadu_si128(out + 0);
// accumulate 8 alpha 'and' in parallel const __m128i b0_hi = _mm_loadu_si128(out + 1);
all_alphas8 = _mm_and_si128(all_alphas8, a0); // mask dst alpha values
i += 8; const __m128i b1_lo = _mm_and_si128(b0_lo, rgb_mask);
const __m128i b1_hi = _mm_and_si128(b0_hi, rgb_mask);
// combine
const __m128i b2_lo = _mm_or_si128(b1_lo, a2_lo);
const __m128i b2_hi = _mm_or_si128(b1_hi, a2_hi);
// store
_mm_storeu_si128(out + 0, b2_lo);
_mm_storeu_si128(out + 1, b2_hi);
// accumulate eight alpha 'and' in parallel
all_alphas = _mm_and_si128(all_alphas, a0);
out += 2;
} }
for (; i < width; ++i) { for (; i < width; ++i) {
const uint32_t alpha_value = alpha[i]; const uint32_t alpha_value = alpha[i];
@ -76,9 +68,8 @@ static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
dst += dst_stride; dst += dst_stride;
} }
// Combine the eight alpha 'and' into a 8-bit mask. // Combine the eight alpha 'and' into a 8-bit mask.
alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas8, all_0xff)) & 0xff; alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));
return (alpha_and != 0xff || return (alpha_and != 0xff);
_mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas16, all_0xff)) != 0xffff);
} }
static void DispatchAlphaToGreen_SSE2(const uint8_t* WEBP_RESTRICT alpha, static void DispatchAlphaToGreen_SSE2(const uint8_t* WEBP_RESTRICT alpha,
@ -153,46 +144,6 @@ static int ExtractAlpha_SSE2(const uint8_t* WEBP_RESTRICT argb, int argb_stride,
return (alpha_and == 0xff); return (alpha_and == 0xff);
} }
static void ExtractGreen_SSE2(const uint32_t* WEBP_RESTRICT argb,
uint8_t* WEBP_RESTRICT alpha, int size) {
int i;
const __m128i mask = _mm_set1_epi32(0xff);
const __m128i* src = (const __m128i*)argb;
for (i = 0; i + 16 <= size; i += 16, src += 4) {
const __m128i a0 = _mm_loadu_si128(src + 0);
const __m128i a1 = _mm_loadu_si128(src + 1);
const __m128i a2 = _mm_loadu_si128(src + 2);
const __m128i a3 = _mm_loadu_si128(src + 3);
const __m128i b0 = _mm_srli_epi32(a0, 8);
const __m128i b1 = _mm_srli_epi32(a1, 8);
const __m128i b2 = _mm_srli_epi32(a2, 8);
const __m128i b3 = _mm_srli_epi32(a3, 8);
const __m128i c0 = _mm_and_si128(b0, mask);
const __m128i c1 = _mm_and_si128(b1, mask);
const __m128i c2 = _mm_and_si128(b2, mask);
const __m128i c3 = _mm_and_si128(b3, mask);
const __m128i d0 = _mm_packs_epi32(c0, c1);
const __m128i d1 = _mm_packs_epi32(c2, c3);
const __m128i e = _mm_packus_epi16(d0, d1);
// store
_mm_storeu_si128((__m128i*)&alpha[i], e);
}
if (i + 8 <= size) {
const __m128i a0 = _mm_loadu_si128(src + 0);
const __m128i a1 = _mm_loadu_si128(src + 1);
const __m128i b0 = _mm_srli_epi32(a0, 8);
const __m128i b1 = _mm_srli_epi32(a1, 8);
const __m128i c0 = _mm_and_si128(b0, mask);
const __m128i c1 = _mm_and_si128(b1, mask);
const __m128i d = _mm_packs_epi32(c0, c1);
const __m128i e = _mm_packus_epi16(d, d);
_mm_storel_epi64((__m128i*)&alpha[i], e);
i += 8;
}
for (; i < size; ++i) alpha[i] = argb[i] >> 8;
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Non-dither premultiplied modes // Non-dither premultiplied modes
@ -403,7 +354,6 @@ WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingSSE2(void) {
WebPDispatchAlpha = DispatchAlpha_SSE2; WebPDispatchAlpha = DispatchAlpha_SSE2;
WebPDispatchAlphaToGreen = DispatchAlphaToGreen_SSE2; WebPDispatchAlphaToGreen = DispatchAlphaToGreen_SSE2;
WebPExtractAlpha = ExtractAlpha_SSE2; WebPExtractAlpha = ExtractAlpha_SSE2;
WebPExtractGreen = ExtractGreen_SSE2;
WebPHasAlpha8b = HasAlpha8b_SSE2; WebPHasAlpha8b = HasAlpha8b_SSE2;
WebPHasAlpha32b = HasAlpha32b_SSE2; WebPHasAlpha32b = HasAlpha32b_SSE2;

View File

@ -354,8 +354,8 @@ static int GetResidualCost_C(int ctx0, const VP8Residual* const res) {
return cost; return cost;
} }
static void SetResidualCoeffs_C(const int16_t* WEBP_RESTRICT const coeffs, static void SetResidualCoeffs_C(const int16_t* const coeffs,
VP8Residual* WEBP_RESTRICT const res) { VP8Residual* const res) {
int n; int n;
res->last = -1; res->last = -1;
assert(res->first == 0 || coeffs[0] == 0); assert(res->first == 0 || coeffs[0] == 0);

View File

@ -96,8 +96,8 @@ static int GetResidualCost_MIPS32(int ctx0, const VP8Residual* const res) {
return cost; return cost;
} }
static void SetResidualCoeffs_MIPS32(const int16_t* WEBP_RESTRICT const coeffs, static void SetResidualCoeffs_MIPS32(const int16_t* const coeffs,
VP8Residual* WEBP_RESTRICT const res) { VP8Residual* const res) {
const int16_t* p_coeffs = (int16_t*)coeffs; const int16_t* p_coeffs = (int16_t*)coeffs;
int temp0, temp1, temp2, n, n1; int temp0, temp1, temp2, n, n1;
assert(res->first == 0 || coeffs[0] == 0); assert(res->first == 0 || coeffs[0] == 0);

View File

@ -19,8 +19,8 @@
static const uint8_t position[16] = { 1, 2, 3, 4, 5, 6, 7, 8, static const uint8_t position[16] = { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 }; 9, 10, 11, 12, 13, 14, 15, 16 };
static void SetResidualCoeffs_NEON(const int16_t* WEBP_RESTRICT const coeffs, static void SetResidualCoeffs_NEON(const int16_t* const coeffs,
VP8Residual* WEBP_RESTRICT const res) { VP8Residual* const res) {
const int16x8_t minus_one = vdupq_n_s16(-1); const int16x8_t minus_one = vdupq_n_s16(-1);
const int16x8_t coeffs_0 = vld1q_s16(coeffs); const int16x8_t coeffs_0 = vld1q_s16(coeffs);
const int16x8_t coeffs_1 = vld1q_s16(coeffs + 8); const int16x8_t coeffs_1 = vld1q_s16(coeffs + 8);

View File

@ -22,8 +22,8 @@
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
static void SetResidualCoeffs_SSE2(const int16_t* WEBP_RESTRICT const coeffs, static void SetResidualCoeffs_SSE2(const int16_t* const coeffs,
VP8Residual* WEBP_RESTRICT const res) { VP8Residual* const res) {
const __m128i c0 = _mm_loadu_si128((const __m128i*)(coeffs + 0)); const __m128i c0 = _mm_loadu_si128((const __m128i*)(coeffs + 0));
const __m128i c1 = _mm_loadu_si128((const __m128i*)(coeffs + 8)); const __m128i c1 = _mm_loadu_si128((const __m128i*)(coeffs + 8));
// Use SSE2 to compare 16 values with a single instruction. // Use SSE2 to compare 16 values with a single instruction.

View File

@ -36,6 +36,18 @@ static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) {
: "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type), "c"(0)); : "a"(info_type), "c"(0));
} }
#elif defined(__x86_64__) && \
(defined(__code_model_medium__) || defined(__code_model_large__)) && \
defined(__PIC__)
static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) {
__asm__ volatile (
"xchg{q}\t{%%rbx}, %q1\n"
"cpuid\n"
"xchg{q}\t{%%rbx}, %q1\n"
: "=a"(cpu_info[0]), "=&r"(cpu_info[1]), "=c"(cpu_info[2]),
"=d"(cpu_info[3])
: "a"(info_type), "c"(0));
}
#elif defined(__i386__) || defined(__x86_64__) #elif defined(__i386__) || defined(__x86_64__)
static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) { static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) {
__asm__ volatile ( __asm__ volatile (

View File

@ -56,11 +56,6 @@
(defined(_M_X64) || defined(_M_IX86)) (defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE41 // Visual C++ SSE4.1 targets #define WEBP_MSC_SSE41 // Visual C++ SSE4.1 targets
#endif #endif
#if defined(_MSC_VER) && _MSC_VER >= 1700 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_AVX2 // Visual C++ AVX2 targets
#endif
#endif #endif
// WEBP_HAVE_* are used to indicate the presence of the instruction set in dsp // WEBP_HAVE_* are used to indicate the presence of the instruction set in dsp
@ -85,16 +80,6 @@
#define WEBP_HAVE_SSE41 #define WEBP_HAVE_SSE41
#endif #endif
#if (defined(__AVX2__) || defined(WEBP_MSC_AVX2)) && \
(!defined(HAVE_CONFIG_H) || defined(WEBP_HAVE_AVX2))
#define WEBP_USE_AVX2
#endif
#if defined(WEBP_USE_AVX2) && !defined(WEBP_HAVE_AVX2)
#define WEBP_HAVE_AVX2
#endif
#undef WEBP_MSC_AVX2
#undef WEBP_MSC_SSE41 #undef WEBP_MSC_SSE41
#undef WEBP_MSC_SSE2 #undef WEBP_MSC_SSE2

View File

@ -37,19 +37,19 @@ static WEBP_INLINE uint8_t clip_8b(int v) {
STORE(3, y, DC - (d)); \ STORE(3, y, DC - (d)); \
} while (0) } while (0)
#define MUL1(a) ((((a) * 20091) >> 16) + (a))
#define MUL2(a) (((a) * 35468) >> 16)
#if !WEBP_NEON_OMIT_C_CODE #if !WEBP_NEON_OMIT_C_CODE
static void TransformOne_C(const int16_t* WEBP_RESTRICT in, static void TransformOne_C(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
int C[4 * 4], *tmp; int C[4 * 4], *tmp;
int i; int i;
tmp = C; tmp = C;
for (i = 0; i < 4; ++i) { // vertical pass for (i = 0; i < 4; ++i) { // vertical pass
const int a = in[0] + in[8]; // [-4096, 4094] const int a = in[0] + in[8]; // [-4096, 4094]
const int b = in[0] - in[8]; // [-4095, 4095] const int b = in[0] - in[8]; // [-4095, 4095]
const int c = WEBP_TRANSFORM_AC3_MUL2(in[4]) - const int c = MUL2(in[4]) - MUL1(in[12]); // [-3783, 3783]
WEBP_TRANSFORM_AC3_MUL1(in[12]); // [-3783, 3783] const int d = MUL1(in[4]) + MUL2(in[12]); // [-3785, 3781]
const int d = WEBP_TRANSFORM_AC3_MUL1(in[4]) +
WEBP_TRANSFORM_AC3_MUL2(in[12]); // [-3785, 3781]
tmp[0] = a + d; // [-7881, 7875] tmp[0] = a + d; // [-7881, 7875]
tmp[1] = b + c; // [-7878, 7878] tmp[1] = b + c; // [-7878, 7878]
tmp[2] = b - c; // [-7878, 7878] tmp[2] = b - c; // [-7878, 7878]
@ -69,10 +69,8 @@ static void TransformOne_C(const int16_t* WEBP_RESTRICT in,
const int dc = tmp[0] + 4; const int dc = tmp[0] + 4;
const int a = dc + tmp[8]; const int a = dc + tmp[8];
const int b = dc - tmp[8]; const int b = dc - tmp[8];
const int c = const int c = MUL2(tmp[4]) - MUL1(tmp[12]);
WEBP_TRANSFORM_AC3_MUL2(tmp[4]) - WEBP_TRANSFORM_AC3_MUL1(tmp[12]); const int d = MUL1(tmp[4]) + MUL2(tmp[12]);
const int d =
WEBP_TRANSFORM_AC3_MUL1(tmp[4]) + WEBP_TRANSFORM_AC3_MUL2(tmp[12]);
STORE(0, 0, a + d); STORE(0, 0, a + d);
STORE(1, 0, b + c); STORE(1, 0, b + c);
STORE(2, 0, b - c); STORE(2, 0, b - c);
@ -83,22 +81,22 @@ static void TransformOne_C(const int16_t* WEBP_RESTRICT in,
} }
// Simplified transform when only in[0], in[1] and in[4] are non-zero // Simplified transform when only in[0], in[1] and in[4] are non-zero
static void TransformAC3_C(const int16_t* WEBP_RESTRICT in, static void TransformAC3_C(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int a = in[0] + 4; const int a = in[0] + 4;
const int c4 = WEBP_TRANSFORM_AC3_MUL2(in[4]); const int c4 = MUL2(in[4]);
const int d4 = WEBP_TRANSFORM_AC3_MUL1(in[4]); const int d4 = MUL1(in[4]);
const int c1 = WEBP_TRANSFORM_AC3_MUL2(in[1]); const int c1 = MUL2(in[1]);
const int d1 = WEBP_TRANSFORM_AC3_MUL1(in[1]); const int d1 = MUL1(in[1]);
STORE2(0, a + d4, d1, c1); STORE2(0, a + d4, d1, c1);
STORE2(1, a + c4, d1, c1); STORE2(1, a + c4, d1, c1);
STORE2(2, a - c4, d1, c1); STORE2(2, a - c4, d1, c1);
STORE2(3, a - d4, d1, c1); STORE2(3, a - d4, d1, c1);
} }
#undef MUL1
#undef MUL2
#undef STORE2 #undef STORE2
static void TransformTwo_C(const int16_t* WEBP_RESTRICT in, static void TransformTwo_C(const int16_t* in, uint8_t* dst, int do_two) {
uint8_t* WEBP_RESTRICT dst, int do_two) {
TransformOne_C(in, dst); TransformOne_C(in, dst);
if (do_two) { if (do_two) {
TransformOne_C(in + 16, dst + 4); TransformOne_C(in + 16, dst + 4);
@ -106,15 +104,13 @@ static void TransformTwo_C(const int16_t* WEBP_RESTRICT in,
} }
#endif // !WEBP_NEON_OMIT_C_CODE #endif // !WEBP_NEON_OMIT_C_CODE
static void TransformUV_C(const int16_t* WEBP_RESTRICT in, static void TransformUV_C(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
VP8Transform(in + 0 * 16, dst, 1); VP8Transform(in + 0 * 16, dst, 1);
VP8Transform(in + 2 * 16, dst + 4 * BPS, 1); VP8Transform(in + 2 * 16, dst + 4 * BPS, 1);
} }
#if !WEBP_NEON_OMIT_C_CODE #if !WEBP_NEON_OMIT_C_CODE
static void TransformDC_C(const int16_t* WEBP_RESTRICT in, static void TransformDC_C(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int DC = in[0] + 4; const int DC = in[0] + 4;
int i, j; int i, j;
for (j = 0; j < 4; ++j) { for (j = 0; j < 4; ++j) {
@ -125,8 +121,7 @@ static void TransformDC_C(const int16_t* WEBP_RESTRICT in,
} }
#endif // !WEBP_NEON_OMIT_C_CODE #endif // !WEBP_NEON_OMIT_C_CODE
static void TransformDCUV_C(const int16_t* WEBP_RESTRICT in, static void TransformDCUV_C(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
if (in[0 * 16]) VP8TransformDC(in + 0 * 16, dst); if (in[0 * 16]) VP8TransformDC(in + 0 * 16, dst);
if (in[1 * 16]) VP8TransformDC(in + 1 * 16, dst + 4); if (in[1 * 16]) VP8TransformDC(in + 1 * 16, dst + 4);
if (in[2 * 16]) VP8TransformDC(in + 2 * 16, dst + 4 * BPS); if (in[2 * 16]) VP8TransformDC(in + 2 * 16, dst + 4 * BPS);
@ -139,8 +134,7 @@ static void TransformDCUV_C(const int16_t* WEBP_RESTRICT in,
// Paragraph 14.3 // Paragraph 14.3
#if !WEBP_NEON_OMIT_C_CODE #if !WEBP_NEON_OMIT_C_CODE
static void TransformWHT_C(const int16_t* WEBP_RESTRICT in, static void TransformWHT_C(const int16_t* in, int16_t* out) {
int16_t* WEBP_RESTRICT out) {
int tmp[16]; int tmp[16];
int i; int i;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
@ -168,7 +162,7 @@ static void TransformWHT_C(const int16_t* WEBP_RESTRICT in,
} }
#endif // !WEBP_NEON_OMIT_C_CODE #endif // !WEBP_NEON_OMIT_C_CODE
VP8WHT VP8TransformWHT; void (*VP8TransformWHT)(const int16_t* in, int16_t* out);
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Intra predictions // Intra predictions
@ -668,32 +662,32 @@ static void HFilter16i_C(uint8_t* p, int stride,
#if !WEBP_NEON_OMIT_C_CODE #if !WEBP_NEON_OMIT_C_CODE
// 8-pixels wide variant, for chroma filtering // 8-pixels wide variant, for chroma filtering
static void VFilter8_C(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8_C(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop26_C(u, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop26_C(u, stride, 1, 8, thresh, ithresh, hev_thresh);
FilterLoop26_C(v, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop26_C(v, stride, 1, 8, thresh, ithresh, hev_thresh);
} }
#endif // !WEBP_NEON_OMIT_C_CODE #endif // !WEBP_NEON_OMIT_C_CODE
#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC #if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC
static void HFilter8_C(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8_C(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop26_C(u, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop26_C(u, 1, stride, 8, thresh, ithresh, hev_thresh);
FilterLoop26_C(v, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop26_C(v, 1, stride, 8, thresh, ithresh, hev_thresh);
} }
#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC #endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC
#if !WEBP_NEON_OMIT_C_CODE #if !WEBP_NEON_OMIT_C_CODE
static void VFilter8i_C(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8i_C(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop24_C(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop24_C(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
FilterLoop24_C(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop24_C(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
} }
#endif // !WEBP_NEON_OMIT_C_CODE #endif // !WEBP_NEON_OMIT_C_CODE
#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC #if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC
static void HFilter8i_C(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8i_C(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop24_C(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop24_C(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
FilterLoop24_C(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop24_C(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
} }
@ -701,8 +695,8 @@ static void HFilter8i_C(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
static void DitherCombine8x8_C(const uint8_t* WEBP_RESTRICT dither, static void DitherCombine8x8_C(const uint8_t* dither, uint8_t* dst,
uint8_t* WEBP_RESTRICT dst, int dst_stride) { int dst_stride) {
int i, j; int i, j;
for (j = 0; j < 8; ++j) { for (j = 0; j < 8; ++j) {
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
@ -737,8 +731,8 @@ VP8SimpleFilterFunc VP8SimpleHFilter16;
VP8SimpleFilterFunc VP8SimpleVFilter16i; VP8SimpleFilterFunc VP8SimpleVFilter16i;
VP8SimpleFilterFunc VP8SimpleHFilter16i; VP8SimpleFilterFunc VP8SimpleHFilter16i;
void (*VP8DitherCombine8x8)(const uint8_t* WEBP_RESTRICT dither, void (*VP8DitherCombine8x8)(const uint8_t* dither, uint8_t* dst,
uint8_t* WEBP_RESTRICT dst, int dst_stride); int dst_stride);
extern VP8CPUInfo VP8GetCPUInfo; extern VP8CPUInfo VP8GetCPUInfo;
extern void VP8DspInitSSE2(void); extern void VP8DspInitSSE2(void);

View File

@ -18,8 +18,8 @@
#include "src/dsp/mips_macro.h" #include "src/dsp/mips_macro.h"
static const int kC1 = WEBP_TRANSFORM_AC3_C1; static const int kC1 = 20091 + (1 << 16);
static const int kC2 = WEBP_TRANSFORM_AC3_C2; static const int kC2 = 35468;
static WEBP_INLINE int abs_mips32(int x) { static WEBP_INLINE int abs_mips32(int x) {
const int sign = x >> 31; const int sign = x >> 31;
@ -133,26 +133,26 @@ static void HFilter16(uint8_t* p, int stride,
} }
// 8-pixels wide variant, for chroma filtering // 8-pixels wide variant, for chroma filtering
static void VFilter8(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh);
FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh);
} }
static void HFilter8(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh);
FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh);
} }
static void VFilter8i(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8i(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
} }
static void HFilter8i(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
} }
@ -215,12 +215,11 @@ static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) {
} }
} }
static void TransformOne(const int16_t* WEBP_RESTRICT in, static void TransformOne(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
int temp0, temp1, temp2, temp3, temp4; int temp0, temp1, temp2, temp3, temp4;
int temp5, temp6, temp7, temp8, temp9; int temp5, temp6, temp7, temp8, temp9;
int temp10, temp11, temp12, temp13, temp14; int temp10, temp11, temp12, temp13, temp14;
int temp15, temp16, temp17, temp18, temp19; int temp15, temp16, temp17, temp18;
int16_t* p_in = (int16_t*)in; int16_t* p_in = (int16_t*)in;
// loops unrolled and merged to avoid usage of tmp buffer // loops unrolled and merged to avoid usage of tmp buffer
@ -234,14 +233,16 @@ static void TransformOne(const int16_t* WEBP_RESTRICT in,
"addu %[temp16], %[temp0], %[temp8] \n\t" "addu %[temp16], %[temp0], %[temp8] \n\t"
"subu %[temp0], %[temp0], %[temp8] \n\t" "subu %[temp0], %[temp0], %[temp8] \n\t"
"mul %[temp8], %[temp4], %[kC2] \n\t" "mul %[temp8], %[temp4], %[kC2] \n\t"
MUL_SHIFT_C1(temp17, temp12) "mul %[temp17], %[temp12], %[kC1] \n\t"
MUL_SHIFT_C1_IO(temp4, temp19) "mul %[temp4], %[temp4], %[kC1] \n\t"
"mul %[temp12], %[temp12], %[kC2] \n\t" "mul %[temp12], %[temp12], %[kC2] \n\t"
"lh %[temp1], 2(%[in]) \n\t" "lh %[temp1], 2(%[in]) \n\t"
"lh %[temp5], 10(%[in]) \n\t" "lh %[temp5], 10(%[in]) \n\t"
"lh %[temp9], 18(%[in]) \n\t" "lh %[temp9], 18(%[in]) \n\t"
"lh %[temp13], 26(%[in]) \n\t" "lh %[temp13], 26(%[in]) \n\t"
"sra %[temp8], %[temp8], 16 \n\t" "sra %[temp8], %[temp8], 16 \n\t"
"sra %[temp17], %[temp17], 16 \n\t"
"sra %[temp4], %[temp4], 16 \n\t"
"sra %[temp12], %[temp12], 16 \n\t" "sra %[temp12], %[temp12], 16 \n\t"
"lh %[temp2], 4(%[in]) \n\t" "lh %[temp2], 4(%[in]) \n\t"
"lh %[temp6], 12(%[in]) \n\t" "lh %[temp6], 12(%[in]) \n\t"
@ -260,43 +261,49 @@ static void TransformOne(const int16_t* WEBP_RESTRICT in,
"addu %[temp12], %[temp0], %[temp17] \n\t" "addu %[temp12], %[temp0], %[temp17] \n\t"
"subu %[temp0], %[temp0], %[temp17] \n\t" "subu %[temp0], %[temp0], %[temp17] \n\t"
"mul %[temp9], %[temp5], %[kC2] \n\t" "mul %[temp9], %[temp5], %[kC2] \n\t"
MUL_SHIFT_C1(temp17, temp13) "mul %[temp17], %[temp13], %[kC1] \n\t"
MUL_SHIFT_C1_IO(temp5, temp19) "mul %[temp5], %[temp5], %[kC1] \n\t"
"mul %[temp13], %[temp13], %[kC2] \n\t" "mul %[temp13], %[temp13], %[kC2] \n\t"
"sra %[temp9], %[temp9], 16 \n\t" "sra %[temp9], %[temp9], 16 \n\t"
"sra %[temp17], %[temp17], 16 \n\t"
"subu %[temp17], %[temp9], %[temp17] \n\t" "subu %[temp17], %[temp9], %[temp17] \n\t"
"sra %[temp5], %[temp5], 16 \n\t"
"sra %[temp13], %[temp13], 16 \n\t" "sra %[temp13], %[temp13], 16 \n\t"
"addu %[temp5], %[temp5], %[temp13] \n\t" "addu %[temp5], %[temp5], %[temp13] \n\t"
"addu %[temp13], %[temp1], %[temp17] \n\t" "addu %[temp13], %[temp1], %[temp17] \n\t"
"subu %[temp1], %[temp1], %[temp17] \n\t" "subu %[temp1], %[temp1], %[temp17] \n\t"
MUL_SHIFT_C1(temp17, temp14) "mul %[temp17], %[temp14], %[kC1] \n\t"
"mul %[temp14], %[temp14], %[kC2] \n\t" "mul %[temp14], %[temp14], %[kC2] \n\t"
"addu %[temp9], %[temp16], %[temp5] \n\t" "addu %[temp9], %[temp16], %[temp5] \n\t"
"subu %[temp5], %[temp16], %[temp5] \n\t" "subu %[temp5], %[temp16], %[temp5] \n\t"
"addu %[temp16], %[temp2], %[temp10] \n\t" "addu %[temp16], %[temp2], %[temp10] \n\t"
"subu %[temp2], %[temp2], %[temp10] \n\t" "subu %[temp2], %[temp2], %[temp10] \n\t"
"mul %[temp10], %[temp6], %[kC2] \n\t" "mul %[temp10], %[temp6], %[kC2] \n\t"
MUL_SHIFT_C1_IO(temp6, temp19) "mul %[temp6], %[temp6], %[kC1] \n\t"
"sra %[temp17], %[temp17], 16 \n\t"
"sra %[temp14], %[temp14], 16 \n\t" "sra %[temp14], %[temp14], 16 \n\t"
"sra %[temp10], %[temp10], 16 \n\t" "sra %[temp10], %[temp10], 16 \n\t"
"sra %[temp6], %[temp6], 16 \n\t"
"subu %[temp17], %[temp10], %[temp17] \n\t" "subu %[temp17], %[temp10], %[temp17] \n\t"
"addu %[temp6], %[temp6], %[temp14] \n\t" "addu %[temp6], %[temp6], %[temp14] \n\t"
"addu %[temp10], %[temp16], %[temp6] \n\t" "addu %[temp10], %[temp16], %[temp6] \n\t"
"subu %[temp6], %[temp16], %[temp6] \n\t" "subu %[temp6], %[temp16], %[temp6] \n\t"
"addu %[temp14], %[temp2], %[temp17] \n\t" "addu %[temp14], %[temp2], %[temp17] \n\t"
"subu %[temp2], %[temp2], %[temp17] \n\t" "subu %[temp2], %[temp2], %[temp17] \n\t"
MUL_SHIFT_C1(temp17, temp15) "mul %[temp17], %[temp15], %[kC1] \n\t"
"mul %[temp15], %[temp15], %[kC2] \n\t" "mul %[temp15], %[temp15], %[kC2] \n\t"
"addu %[temp16], %[temp3], %[temp11] \n\t" "addu %[temp16], %[temp3], %[temp11] \n\t"
"subu %[temp3], %[temp3], %[temp11] \n\t" "subu %[temp3], %[temp3], %[temp11] \n\t"
"mul %[temp11], %[temp7], %[kC2] \n\t" "mul %[temp11], %[temp7], %[kC2] \n\t"
MUL_SHIFT_C1_IO(temp7, temp19) "mul %[temp7], %[temp7], %[kC1] \n\t"
"addiu %[temp8], %[temp8], 4 \n\t" "addiu %[temp8], %[temp8], 4 \n\t"
"addiu %[temp12], %[temp12], 4 \n\t" "addiu %[temp12], %[temp12], 4 \n\t"
"addiu %[temp0], %[temp0], 4 \n\t" "addiu %[temp0], %[temp0], 4 \n\t"
"addiu %[temp4], %[temp4], 4 \n\t" "addiu %[temp4], %[temp4], 4 \n\t"
"sra %[temp17], %[temp17], 16 \n\t"
"sra %[temp15], %[temp15], 16 \n\t" "sra %[temp15], %[temp15], 16 \n\t"
"sra %[temp11], %[temp11], 16 \n\t" "sra %[temp11], %[temp11], 16 \n\t"
"sra %[temp7], %[temp7], 16 \n\t"
"subu %[temp17], %[temp11], %[temp17] \n\t" "subu %[temp17], %[temp11], %[temp17] \n\t"
"addu %[temp7], %[temp7], %[temp15] \n\t" "addu %[temp7], %[temp7], %[temp15] \n\t"
"addu %[temp15], %[temp3], %[temp17] \n\t" "addu %[temp15], %[temp3], %[temp17] \n\t"
@ -306,40 +313,48 @@ static void TransformOne(const int16_t* WEBP_RESTRICT in,
"addu %[temp16], %[temp8], %[temp10] \n\t" "addu %[temp16], %[temp8], %[temp10] \n\t"
"subu %[temp8], %[temp8], %[temp10] \n\t" "subu %[temp8], %[temp8], %[temp10] \n\t"
"mul %[temp10], %[temp9], %[kC2] \n\t" "mul %[temp10], %[temp9], %[kC2] \n\t"
MUL_SHIFT_C1(temp17, temp11) "mul %[temp17], %[temp11], %[kC1] \n\t"
MUL_SHIFT_C1_IO(temp9, temp19) "mul %[temp9], %[temp9], %[kC1] \n\t"
"mul %[temp11], %[temp11], %[kC2] \n\t" "mul %[temp11], %[temp11], %[kC2] \n\t"
"sra %[temp10], %[temp10], 16 \n\t" "sra %[temp10], %[temp10], 16 \n\t"
"sra %[temp17], %[temp17], 16 \n\t"
"sra %[temp9], %[temp9], 16 \n\t"
"sra %[temp11], %[temp11], 16 \n\t" "sra %[temp11], %[temp11], 16 \n\t"
"subu %[temp17], %[temp10], %[temp17] \n\t" "subu %[temp17], %[temp10], %[temp17] \n\t"
"addu %[temp11], %[temp9], %[temp11] \n\t" "addu %[temp11], %[temp9], %[temp11] \n\t"
"addu %[temp10], %[temp12], %[temp14] \n\t" "addu %[temp10], %[temp12], %[temp14] \n\t"
"subu %[temp12], %[temp12], %[temp14] \n\t" "subu %[temp12], %[temp12], %[temp14] \n\t"
"mul %[temp14], %[temp13], %[kC2] \n\t" "mul %[temp14], %[temp13], %[kC2] \n\t"
MUL_SHIFT_C1(temp9, temp15) "mul %[temp9], %[temp15], %[kC1] \n\t"
MUL_SHIFT_C1_IO(temp13, temp19) "mul %[temp13], %[temp13], %[kC1] \n\t"
"mul %[temp15], %[temp15], %[kC2] \n\t" "mul %[temp15], %[temp15], %[kC2] \n\t"
"sra %[temp14], %[temp14], 16 \n\t" "sra %[temp14], %[temp14], 16 \n\t"
"sra %[temp9], %[temp9], 16 \n\t"
"sra %[temp13], %[temp13], 16 \n\t"
"sra %[temp15], %[temp15], 16 \n\t" "sra %[temp15], %[temp15], 16 \n\t"
"subu %[temp9], %[temp14], %[temp9] \n\t" "subu %[temp9], %[temp14], %[temp9] \n\t"
"addu %[temp15], %[temp13], %[temp15] \n\t" "addu %[temp15], %[temp13], %[temp15] \n\t"
"addu %[temp14], %[temp0], %[temp2] \n\t" "addu %[temp14], %[temp0], %[temp2] \n\t"
"subu %[temp0], %[temp0], %[temp2] \n\t" "subu %[temp0], %[temp0], %[temp2] \n\t"
"mul %[temp2], %[temp1], %[kC2] \n\t" "mul %[temp2], %[temp1], %[kC2] \n\t"
MUL_SHIFT_C1(temp13, temp3) "mul %[temp13], %[temp3], %[kC1] \n\t"
MUL_SHIFT_C1_IO(temp1, temp19) "mul %[temp1], %[temp1], %[kC1] \n\t"
"mul %[temp3], %[temp3], %[kC2] \n\t" "mul %[temp3], %[temp3], %[kC2] \n\t"
"sra %[temp2], %[temp2], 16 \n\t" "sra %[temp2], %[temp2], 16 \n\t"
"sra %[temp13], %[temp13], 16 \n\t"
"sra %[temp1], %[temp1], 16 \n\t"
"sra %[temp3], %[temp3], 16 \n\t" "sra %[temp3], %[temp3], 16 \n\t"
"subu %[temp13], %[temp2], %[temp13] \n\t" "subu %[temp13], %[temp2], %[temp13] \n\t"
"addu %[temp3], %[temp1], %[temp3] \n\t" "addu %[temp3], %[temp1], %[temp3] \n\t"
"addu %[temp2], %[temp4], %[temp6] \n\t" "addu %[temp2], %[temp4], %[temp6] \n\t"
"subu %[temp4], %[temp4], %[temp6] \n\t" "subu %[temp4], %[temp4], %[temp6] \n\t"
"mul %[temp6], %[temp5], %[kC2] \n\t" "mul %[temp6], %[temp5], %[kC2] \n\t"
MUL_SHIFT_C1(temp1, temp7) "mul %[temp1], %[temp7], %[kC1] \n\t"
MUL_SHIFT_C1_IO(temp5, temp19) "mul %[temp5], %[temp5], %[kC1] \n\t"
"mul %[temp7], %[temp7], %[kC2] \n\t" "mul %[temp7], %[temp7], %[kC2] \n\t"
"sra %[temp6], %[temp6], 16 \n\t" "sra %[temp6], %[temp6], 16 \n\t"
"sra %[temp1], %[temp1], 16 \n\t"
"sra %[temp5], %[temp5], 16 \n\t"
"sra %[temp7], %[temp7], 16 \n\t" "sra %[temp7], %[temp7], 16 \n\t"
"subu %[temp1], %[temp6], %[temp1] \n\t" "subu %[temp1], %[temp6], %[temp1] \n\t"
"addu %[temp7], %[temp5], %[temp7] \n\t" "addu %[temp7], %[temp5], %[temp7] \n\t"
@ -527,14 +542,13 @@ static void TransformOne(const int16_t* WEBP_RESTRICT in,
[temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11), [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11),
[temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14), [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14),
[temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17), [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17),
[temp18]"=&r"(temp18), [temp19]"=&r"(temp19) [temp18]"=&r"(temp18)
: [in]"r"(p_in), [kC1]"r"(kC1), [kC2]"r"(kC2), [dst]"r"(dst) : [in]"r"(p_in), [kC1]"r"(kC1), [kC2]"r"(kC2), [dst]"r"(dst)
: "memory", "hi", "lo" : "memory", "hi", "lo"
); );
} }
static void TransformTwo(const int16_t* WEBP_RESTRICT in, static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
uint8_t* WEBP_RESTRICT dst, int do_two) {
TransformOne(in, dst); TransformOne(in, dst);
if (do_two) { if (do_two) {
TransformOne(in + 16, dst + 4); TransformOne(in + 16, dst + 4);

View File

@ -18,11 +18,12 @@
#include "src/dsp/mips_macro.h" #include "src/dsp/mips_macro.h"
static const int kC1 = WEBP_TRANSFORM_AC3_C1; static const int kC1 = 20091 + (1 << 16);
static const int kC2 = WEBP_TRANSFORM_AC3_C2; static const int kC2 = 35468;
static void TransformDC(const int16_t* WEBP_RESTRICT in, #define MUL(a, b) (((a) * (b)) >> 16)
uint8_t* WEBP_RESTRICT dst) {
static void TransformDC(const int16_t* in, uint8_t* dst) {
int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10; int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10;
__asm__ volatile ( __asm__ volatile (
@ -46,13 +47,12 @@ static void TransformDC(const int16_t* WEBP_RESTRICT in,
); );
} }
static void TransformAC3(const int16_t* WEBP_RESTRICT in, static void TransformAC3(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int a = in[0] + 4; const int a = in[0] + 4;
int c4 = WEBP_TRANSFORM_AC3_MUL2(in[4]); int c4 = MUL(in[4], kC2);
const int d4 = WEBP_TRANSFORM_AC3_MUL1(in[4]); const int d4 = MUL(in[4], kC1);
const int c1 = WEBP_TRANSFORM_AC3_MUL2(in[1]); const int c1 = MUL(in[1], kC2);
const int d1 = WEBP_TRANSFORM_AC3_MUL1(in[1]); const int d1 = MUL(in[1], kC1);
int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18; int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18;
@ -83,8 +83,7 @@ static void TransformAC3(const int16_t* WEBP_RESTRICT in,
); );
} }
static void TransformOne(const int16_t* WEBP_RESTRICT in, static void TransformOne(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9;
int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18; int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18;
@ -151,8 +150,7 @@ static void TransformOne(const int16_t* WEBP_RESTRICT in,
); );
} }
static void TransformTwo(const int16_t* WEBP_RESTRICT in, static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
uint8_t* WEBP_RESTRICT dst, int do_two) {
TransformOne(in, dst); TransformOne(in, dst);
if (do_two) { if (do_two) {
TransformOne(in + 16, dst + 4); TransformOne(in + 16, dst + 4);
@ -438,14 +436,14 @@ static void HFilter16(uint8_t* p, int stride,
} }
// 8-pixels wide variant, for chroma filtering // 8-pixels wide variant, for chroma filtering
static void VFilter8(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh);
FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh);
} }
static void HFilter8(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh);
FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh);
} }
@ -469,18 +467,20 @@ static void HFilter16i(uint8_t* p, int stride,
} }
} }
static void VFilter8i(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8i(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh);
} }
static void HFilter8i(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh);
} }
#undef MUL
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Simple In-loop filtering (Paragraph 15.2) // Simple In-loop filtering (Paragraph 15.2)

View File

@ -37,9 +37,10 @@
d1_m = d_tmp1_m + d_tmp2_m; \ d1_m = d_tmp1_m + d_tmp2_m; \
BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
} }
#define MULT1(a) ((((a) * 20091) >> 16) + (a))
#define MULT2(a) (((a) * 35468) >> 16)
static void TransformOne(const int16_t* WEBP_RESTRICT in, static void TransformOne(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
v8i16 input0, input1; v8i16 input0, input1;
v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
v4i32 res0, res1, res2, res3; v4i32 res0, res1, res2, res3;
@ -66,16 +67,14 @@ static void TransformOne(const int16_t* WEBP_RESTRICT in,
ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS); ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS);
} }
static void TransformTwo(const int16_t* WEBP_RESTRICT in, static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
uint8_t* WEBP_RESTRICT dst, int do_two) {
TransformOne(in, dst); TransformOne(in, dst);
if (do_two) { if (do_two) {
TransformOne(in + 16, dst + 4); TransformOne(in + 16, dst + 4);
} }
} }
static void TransformWHT(const int16_t* WEBP_RESTRICT in, static void TransformWHT(const int16_t* in, int16_t* out) {
int16_t* WEBP_RESTRICT out) {
v8i16 input0, input1; v8i16 input0, input1;
const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 }; const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 };
const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 }; const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 };
@ -117,20 +116,18 @@ static void TransformWHT(const int16_t* WEBP_RESTRICT in,
out[240] = __msa_copy_s_h(out1, 7); out[240] = __msa_copy_s_h(out1, 7);
} }
static void TransformDC(const int16_t* WEBP_RESTRICT in, static void TransformDC(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int DC = (in[0] + 4) >> 3; const int DC = (in[0] + 4) >> 3;
const v8i16 tmp0 = __msa_fill_h(DC); const v8i16 tmp0 = __msa_fill_h(DC);
ADDBLK_ST4x4_UB(tmp0, tmp0, tmp0, tmp0, dst, BPS); ADDBLK_ST4x4_UB(tmp0, tmp0, tmp0, tmp0, dst, BPS);
} }
static void TransformAC3(const int16_t* WEBP_RESTRICT in, static void TransformAC3(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int a = in[0] + 4; const int a = in[0] + 4;
const int c4 = WEBP_TRANSFORM_AC3_MUL2(in[4]); const int c4 = MULT2(in[4]);
const int d4 = WEBP_TRANSFORM_AC3_MUL1(in[4]); const int d4 = MULT1(in[4]);
const int in2 = WEBP_TRANSFORM_AC3_MUL2(in[1]); const int in2 = MULT2(in[1]);
const int in3 = WEBP_TRANSFORM_AC3_MUL1(in[1]); const int in3 = MULT1(in[1]);
v4i32 tmp0 = { 0 }; v4i32 tmp0 = { 0 };
v4i32 out0 = __msa_fill_w(a + d4); v4i32 out0 = __msa_fill_w(a + d4);
v4i32 out1 = __msa_fill_w(a + c4); v4i32 out1 = __msa_fill_w(a + c4);
@ -480,8 +477,8 @@ static void HFilter16i(uint8_t* src_y, int stride,
} }
// 8-pixels wide variants, for chroma filtering // 8-pixels wide variants, for chroma filtering
static void VFilter8(uint8_t* WEBP_RESTRICT src_u, uint8_t* WEBP_RESTRICT src_v, static void VFilter8(uint8_t* src_u, uint8_t* src_v, int stride,
int stride, int b_limit_in, int limit_in, int thresh_in) { int b_limit_in, int limit_in, int thresh_in) {
uint8_t* ptmp_src_u = src_u - 4 * stride; uint8_t* ptmp_src_u = src_u - 4 * stride;
uint8_t* ptmp_src_v = src_v - 4 * stride; uint8_t* ptmp_src_v = src_v - 4 * stride;
uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
@ -525,8 +522,8 @@ static void VFilter8(uint8_t* WEBP_RESTRICT src_u, uint8_t* WEBP_RESTRICT src_v,
SD(q2_d, ptmp_src_v); SD(q2_d, ptmp_src_v);
} }
static void HFilter8(uint8_t* WEBP_RESTRICT src_u, uint8_t* WEBP_RESTRICT src_v, static void HFilter8(uint8_t* src_u, uint8_t* src_v, int stride,
int stride, int b_limit_in, int limit_in, int thresh_in) { int b_limit_in, int limit_in, int thresh_in) {
uint8_t* ptmp_src_u = src_u - 4; uint8_t* ptmp_src_u = src_u - 4;
uint8_t* ptmp_src_v = src_v - 4; uint8_t* ptmp_src_v = src_v - 4;
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
@ -561,8 +558,7 @@ static void HFilter8(uint8_t* WEBP_RESTRICT src_u, uint8_t* WEBP_RESTRICT src_v,
ST6x4_UB(tmp7, 0, tmp5, 4, ptmp_src_v, stride); ST6x4_UB(tmp7, 0, tmp5, 4, ptmp_src_v, stride);
} }
static void VFilter8i(uint8_t* WEBP_RESTRICT src_u, static void VFilter8i(uint8_t* src_u, uint8_t* src_v, int stride,
uint8_t* WEBP_RESTRICT src_v, int stride,
int b_limit_in, int limit_in, int thresh_in) { int b_limit_in, int limit_in, int thresh_in) {
uint64_t p1_d, p0_d, q0_d, q1_d; uint64_t p1_d, p0_d, q0_d, q1_d;
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
@ -593,8 +589,7 @@ static void VFilter8i(uint8_t* WEBP_RESTRICT src_u,
SD4(q1_d, q0_d, p0_d, p1_d, src_v, -stride); SD4(q1_d, q0_d, p0_d, p1_d, src_v, -stride);
} }
static void HFilter8i(uint8_t* WEBP_RESTRICT src_u, static void HFilter8i(uint8_t* src_u, uint8_t* src_v, int stride,
uint8_t* WEBP_RESTRICT src_v, int stride,
int b_limit_in, int limit_in, int thresh_in) { int b_limit_in, int limit_in, int thresh_in) {
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;

View File

@ -916,8 +916,8 @@ static void HFilter16i_NEON(uint8_t* p, int stride,
#endif // !WORK_AROUND_GCC #endif // !WORK_AROUND_GCC
// 8-pixels wide variant, for chroma filtering // 8-pixels wide variant, for chroma filtering
static void VFilter8_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8_NEON(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
{ {
@ -932,8 +932,7 @@ static void VFilter8_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
Store8x2x2_NEON(oq1, oq2, u + 2 * stride, v + 2 * stride, stride); Store8x2x2_NEON(oq1, oq2, u + 2 * stride, v + 2 * stride, stride);
} }
} }
static void VFilter8i_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8i_NEON(uint8_t* u, uint8_t* v, int stride,
int stride,
int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
u += 4 * stride; u += 4 * stride;
@ -950,8 +949,8 @@ static void VFilter8i_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
} }
#if !defined(WORK_AROUND_GCC) #if !defined(WORK_AROUND_GCC)
static void HFilter8_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8_NEON(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3);
{ {
@ -965,8 +964,7 @@ static void HFilter8_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
} }
} }
static void HFilter8i_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8i_NEON(uint8_t* u, uint8_t* v, int stride,
int stride,
int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3;
u += 4; u += 4;
@ -1002,9 +1000,8 @@ static void HFilter8i_NEON(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
// libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the // libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the
// same issue with kC1 and vqdmulh that we work around by down shifting kC2 // same issue with kC1 and vqdmulh that we work around by down shifting kC2
static const int16_t kC1 = WEBP_TRANSFORM_AC3_C1; static const int16_t kC1 = 20091;
static const int16_t kC2 = static const int16_t kC2 = 17734; // half of kC2, actually. See comment above.
WEBP_TRANSFORM_AC3_C2 / 2; // half of kC2, actually. See comment above.
#if defined(WEBP_USE_INTRINSICS) #if defined(WEBP_USE_INTRINSICS)
static WEBP_INLINE void Transpose8x2_NEON(const int16x8_t in0, static WEBP_INLINE void Transpose8x2_NEON(const int16x8_t in0,
@ -1043,8 +1040,7 @@ static WEBP_INLINE void TransformPass_NEON(int16x8x2_t* const rows) {
Transpose8x2_NEON(E0, E1, rows); Transpose8x2_NEON(E0, E1, rows);
} }
static void TransformOne_NEON(const int16_t* WEBP_RESTRICT in, static void TransformOne_NEON(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
int16x8x2_t rows; int16x8x2_t rows;
INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8)); INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8));
TransformPass_NEON(&rows); TransformPass_NEON(&rows);
@ -1054,8 +1050,7 @@ static void TransformOne_NEON(const int16_t* WEBP_RESTRICT in,
#else #else
static void TransformOne_NEON(const int16_t* WEBP_RESTRICT in, static void TransformOne_NEON(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int kBPS = BPS; const int kBPS = BPS;
// kC1, kC2. Padded because vld1.16 loads 8 bytes // kC1, kC2. Padded because vld1.16 loads 8 bytes
const int16_t constants[4] = { kC1, kC2, 0, 0 }; const int16_t constants[4] = { kC1, kC2, 0, 0 };
@ -1188,16 +1183,14 @@ static void TransformOne_NEON(const int16_t* WEBP_RESTRICT in,
#endif // WEBP_USE_INTRINSICS #endif // WEBP_USE_INTRINSICS
static void TransformTwo_NEON(const int16_t* WEBP_RESTRICT in, static void TransformTwo_NEON(const int16_t* in, uint8_t* dst, int do_two) {
uint8_t* WEBP_RESTRICT dst, int do_two) {
TransformOne_NEON(in, dst); TransformOne_NEON(in, dst);
if (do_two) { if (do_two) {
TransformOne_NEON(in + 16, dst + 4); TransformOne_NEON(in + 16, dst + 4);
} }
} }
static void TransformDC_NEON(const int16_t* WEBP_RESTRICT in, static void TransformDC_NEON(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) {
const int16x8_t DC = vdupq_n_s16(in[0]); const int16x8_t DC = vdupq_n_s16(in[0]);
Add4x4_NEON(DC, DC, dst); Add4x4_NEON(DC, DC, dst);
} }
@ -1211,8 +1204,7 @@ static void TransformDC_NEON(const int16_t* WEBP_RESTRICT in,
*dst = vgetq_lane_s32(rows.val[3], col); (dst) += 16; \ *dst = vgetq_lane_s32(rows.val[3], col); (dst) += 16; \
} while (0) } while (0)
static void TransformWHT_NEON(const int16_t* WEBP_RESTRICT in, static void TransformWHT_NEON(const int16_t* in, int16_t* out) {
int16_t* WEBP_RESTRICT out) {
int32x4x4_t tmp; int32x4x4_t tmp;
{ {
@ -1263,13 +1255,15 @@ static void TransformWHT_NEON(const int16_t* WEBP_RESTRICT in,
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
static void TransformAC3_NEON(const int16_t* WEBP_RESTRICT in, #define MUL(a, b) (((a) * (b)) >> 16)
uint8_t* WEBP_RESTRICT dst) { static void TransformAC3_NEON(const int16_t* in, uint8_t* dst) {
static const int kC1_full = 20091 + (1 << 16);
static const int kC2_full = 35468;
const int16x4_t A = vld1_dup_s16(in); const int16x4_t A = vld1_dup_s16(in);
const int16x4_t c4 = vdup_n_s16(WEBP_TRANSFORM_AC3_MUL2(in[4])); const int16x4_t c4 = vdup_n_s16(MUL(in[4], kC2_full));
const int16x4_t d4 = vdup_n_s16(WEBP_TRANSFORM_AC3_MUL1(in[4])); const int16x4_t d4 = vdup_n_s16(MUL(in[4], kC1_full));
const int c1 = WEBP_TRANSFORM_AC3_MUL2(in[1]); const int c1 = MUL(in[1], kC2_full);
const int d1 = WEBP_TRANSFORM_AC3_MUL1(in[1]); const int d1 = MUL(in[1], kC1_full);
const uint64_t cd = (uint64_t)( d1 & 0xffff) << 0 | const uint64_t cd = (uint64_t)( d1 & 0xffff) << 0 |
(uint64_t)( c1 & 0xffff) << 16 | (uint64_t)( c1 & 0xffff) << 16 |
(uint64_t)(-c1 & 0xffff) << 32 | (uint64_t)(-c1 & 0xffff) << 32 |
@ -1280,6 +1274,7 @@ static void TransformAC3_NEON(const int16_t* WEBP_RESTRICT in,
const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4)); const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4));
Add4x4_NEON(m0_m1, m2_m3, dst); Add4x4_NEON(m0_m1, m2_m3, dst);
} }
#undef MUL
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// 4x4 // 4x4
@ -1308,19 +1303,18 @@ static void DC4_NEON(uint8_t* dst) { // DC
static WEBP_INLINE void TrueMotion_NEON(uint8_t* dst, int size) { static WEBP_INLINE void TrueMotion_NEON(uint8_t* dst, int size) {
const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]' const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]'
const uint8x8_t T = vld1_u8(dst - BPS); // top row 'A[0..3]' const uint8x8_t T = vld1_u8(dst - BPS); // top row 'A[0..3]'
const uint16x8_t d = vsubl_u8(T, TL); // A[c] - A[-1] const int16x8_t d = vreinterpretq_s16_u16(vsubl_u8(T, TL)); // A[c] - A[-1]
int y; int y;
for (y = 0; y < size; y += 4) { for (y = 0; y < size; y += 4) {
// left edge // left edge
const uint8x8_t L0 = vld1_dup_u8(dst + 0 * BPS - 1); const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1));
const uint8x8_t L1 = vld1_dup_u8(dst + 1 * BPS - 1); const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1));
const uint8x8_t L2 = vld1_dup_u8(dst + 2 * BPS - 1); const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1));
const uint8x8_t L3 = vld1_dup_u8(dst + 3 * BPS - 1); const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1));
// L[r] + A[c] - A[-1] const int16x8_t r0 = vaddq_s16(L0, d); // L[r] + A[c] - A[-1]
const int16x8_t r0 = vreinterpretq_s16_u16(vaddw_u8(d, L0)); const int16x8_t r1 = vaddq_s16(L1, d);
const int16x8_t r1 = vreinterpretq_s16_u16(vaddw_u8(d, L1)); const int16x8_t r2 = vaddq_s16(L2, d);
const int16x8_t r2 = vreinterpretq_s16_u16(vaddw_u8(d, L2)); const int16x8_t r3 = vaddq_s16(L3, d);
const int16x8_t r3 = vreinterpretq_s16_u16(vaddw_u8(d, L3));
// Saturate and store the result. // Saturate and store the result.
const uint32x2_t r0_u32 = vreinterpret_u32_u8(vqmovun_s16(r0)); const uint32x2_t r0_u32 = vreinterpret_u32_u8(vqmovun_s16(r0));
const uint32x2_t r1_u32 = vreinterpret_u32_u8(vqmovun_s16(r1)); const uint32x2_t r1_u32 = vreinterpret_u32_u8(vqmovun_s16(r1));
@ -1581,24 +1575,23 @@ static void TM16_NEON(uint8_t* dst) {
const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]' const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]'
const uint8x16_t T = vld1q_u8(dst - BPS); // top row 'A[0..15]' const uint8x16_t T = vld1q_u8(dst - BPS); // top row 'A[0..15]'
// A[c] - A[-1] // A[c] - A[-1]
const uint16x8_t d_lo = vsubl_u8(vget_low_u8(T), TL); const int16x8_t d_lo = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), TL));
const uint16x8_t d_hi = vsubl_u8(vget_high_u8(T), TL); const int16x8_t d_hi = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), TL));
int y; int y;
for (y = 0; y < 16; y += 4) { for (y = 0; y < 16; y += 4) {
// left edge // left edge
const uint8x8_t L0 = vld1_dup_u8(dst + 0 * BPS - 1); const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1));
const uint8x8_t L1 = vld1_dup_u8(dst + 1 * BPS - 1); const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1));
const uint8x8_t L2 = vld1_dup_u8(dst + 2 * BPS - 1); const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1));
const uint8x8_t L3 = vld1_dup_u8(dst + 3 * BPS - 1); const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1));
// L[r] + A[c] - A[-1] const int16x8_t r0_lo = vaddq_s16(L0, d_lo); // L[r] + A[c] - A[-1]
const int16x8_t r0_lo = vreinterpretq_s16_u16(vaddw_u8(d_lo, L0)); const int16x8_t r1_lo = vaddq_s16(L1, d_lo);
const int16x8_t r1_lo = vreinterpretq_s16_u16(vaddw_u8(d_lo, L1)); const int16x8_t r2_lo = vaddq_s16(L2, d_lo);
const int16x8_t r2_lo = vreinterpretq_s16_u16(vaddw_u8(d_lo, L2)); const int16x8_t r3_lo = vaddq_s16(L3, d_lo);
const int16x8_t r3_lo = vreinterpretq_s16_u16(vaddw_u8(d_lo, L3)); const int16x8_t r0_hi = vaddq_s16(L0, d_hi);
const int16x8_t r0_hi = vreinterpretq_s16_u16(vaddw_u8(d_hi, L0)); const int16x8_t r1_hi = vaddq_s16(L1, d_hi);
const int16x8_t r1_hi = vreinterpretq_s16_u16(vaddw_u8(d_hi, L1)); const int16x8_t r2_hi = vaddq_s16(L2, d_hi);
const int16x8_t r2_hi = vreinterpretq_s16_u16(vaddw_u8(d_hi, L2)); const int16x8_t r3_hi = vaddq_s16(L3, d_hi);
const int16x8_t r3_hi = vreinterpretq_s16_u16(vaddw_u8(d_hi, L3));
// Saturate and store the result. // Saturate and store the result.
const uint8x16_t row0 = vcombine_u8(vqmovun_s16(r0_lo), vqmovun_s16(r0_hi)); const uint8x16_t row0 = vcombine_u8(vqmovun_s16(r0_lo), vqmovun_s16(r0_hi));
const uint8x16_t row1 = vcombine_u8(vqmovun_s16(r1_lo), vqmovun_s16(r1_hi)); const uint8x16_t row1 = vcombine_u8(vqmovun_s16(r1_lo), vqmovun_s16(r1_hi));

View File

@ -30,8 +30,7 @@
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Transforms (Paragraph 14.4) // Transforms (Paragraph 14.4)
static void Transform_SSE2(const int16_t* WEBP_RESTRICT in, static void Transform_SSE2(const int16_t* in, uint8_t* dst, int do_two) {
uint8_t* WEBP_RESTRICT dst, int do_two) {
// This implementation makes use of 16-bit fixed point versions of two // This implementation makes use of 16-bit fixed point versions of two
// multiply constants: // multiply constants:
// K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
@ -197,14 +196,15 @@ static void Transform_SSE2(const int16_t* WEBP_RESTRICT in,
} }
#if (USE_TRANSFORM_AC3 == 1) #if (USE_TRANSFORM_AC3 == 1)
#define MUL(a, b) (((a) * (b)) >> 16)
static void TransformAC3_SSE2(const int16_t* WEBP_RESTRICT in, static void TransformAC3(const int16_t* in, uint8_t* dst) {
uint8_t* WEBP_RESTRICT dst) { static const int kC1 = 20091 + (1 << 16);
static const int kC2 = 35468;
const __m128i A = _mm_set1_epi16(in[0] + 4); const __m128i A = _mm_set1_epi16(in[0] + 4);
const __m128i c4 = _mm_set1_epi16(WEBP_TRANSFORM_AC3_MUL2(in[4])); const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2));
const __m128i d4 = _mm_set1_epi16(WEBP_TRANSFORM_AC3_MUL1(in[4])); const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1));
const int c1 = WEBP_TRANSFORM_AC3_MUL2(in[1]); const int c1 = MUL(in[1], kC2);
const int d1 = WEBP_TRANSFORM_AC3_MUL1(in[1]); const int d1 = MUL(in[1], kC1);
const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1); const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1);
const __m128i B = _mm_adds_epi16(A, CD); const __m128i B = _mm_adds_epi16(A, CD);
const __m128i m0 = _mm_adds_epi16(B, d4); const __m128i m0 = _mm_adds_epi16(B, d4);
@ -238,7 +238,7 @@ static void TransformAC3_SSE2(const int16_t* WEBP_RESTRICT in,
WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2));
WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3));
} }
#undef MUL
#endif // USE_TRANSFORM_AC3 #endif // USE_TRANSFORM_AC3
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -259,15 +259,15 @@ static WEBP_INLINE void SignedShift8b_SSE2(__m128i* const x) {
*x = _mm_packs_epi16(lo_1, hi_1); *x = _mm_packs_epi16(lo_1, hi_1);
} }
#define FLIP_SIGN_BIT2(a, b) do { \ #define FLIP_SIGN_BIT2(a, b) { \
(a) = _mm_xor_si128(a, sign_bit); \ (a) = _mm_xor_si128(a, sign_bit); \
(b) = _mm_xor_si128(b, sign_bit); \ (b) = _mm_xor_si128(b, sign_bit); \
} while (0) }
#define FLIP_SIGN_BIT4(a, b, c, d) do { \ #define FLIP_SIGN_BIT4(a, b, c, d) { \
FLIP_SIGN_BIT2(a, b); \ FLIP_SIGN_BIT2(a, b); \
FLIP_SIGN_BIT2(c, d); \ FLIP_SIGN_BIT2(c, d); \
} while (0) }
// input/output is uint8_t // input/output is uint8_t
static WEBP_INLINE void GetNotHEV_SSE2(const __m128i* const p1, static WEBP_INLINE void GetNotHEV_SSE2(const __m128i* const p1,
@ -645,12 +645,12 @@ static void SimpleHFilter16i_SSE2(uint8_t* p, int stride, int thresh) {
(m) = _mm_max_epu8(m, MM_ABS(p2, p1)); \ (m) = _mm_max_epu8(m, MM_ABS(p2, p1)); \
} while (0) } while (0)
#define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) do { \ #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \
(e1) = _mm_loadu_si128((__m128i*)&(p)[0 * (stride)]); \ (e1) = _mm_loadu_si128((__m128i*)&(p)[0 * (stride)]); \
(e2) = _mm_loadu_si128((__m128i*)&(p)[1 * (stride)]); \ (e2) = _mm_loadu_si128((__m128i*)&(p)[1 * (stride)]); \
(e3) = _mm_loadu_si128((__m128i*)&(p)[2 * (stride)]); \ (e3) = _mm_loadu_si128((__m128i*)&(p)[2 * (stride)]); \
(e4) = _mm_loadu_si128((__m128i*)&(p)[3 * (stride)]); \ (e4) = _mm_loadu_si128((__m128i*)&(p)[3 * (stride)]); \
} while (0) }
#define LOADUV_H_EDGE(p, u, v, stride) do { \ #define LOADUV_H_EDGE(p, u, v, stride) do { \
const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \ const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \
@ -658,18 +658,18 @@ static void SimpleHFilter16i_SSE2(uint8_t* p, int stride, int thresh) {
(p) = _mm_unpacklo_epi64(U, V); \ (p) = _mm_unpacklo_epi64(U, V); \
} while (0) } while (0)
#define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) do { \ #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \
LOADUV_H_EDGE(e1, u, v, 0 * (stride)); \ LOADUV_H_EDGE(e1, u, v, 0 * (stride)); \
LOADUV_H_EDGE(e2, u, v, 1 * (stride)); \ LOADUV_H_EDGE(e2, u, v, 1 * (stride)); \
LOADUV_H_EDGE(e3, u, v, 2 * (stride)); \ LOADUV_H_EDGE(e3, u, v, 2 * (stride)); \
LOADUV_H_EDGE(e4, u, v, 3 * (stride)); \ LOADUV_H_EDGE(e4, u, v, 3 * (stride)); \
} while (0) }
#define STOREUV(p, u, v, stride) do { \ #define STOREUV(p, u, v, stride) { \
_mm_storel_epi64((__m128i*)&(u)[(stride)], p); \ _mm_storel_epi64((__m128i*)&(u)[(stride)], p); \
(p) = _mm_srli_si128(p, 8); \ (p) = _mm_srli_si128(p, 8); \
_mm_storel_epi64((__m128i*)&(v)[(stride)], p); \ _mm_storel_epi64((__m128i*)&(v)[(stride)], p); \
} while (0) }
static WEBP_INLINE void ComplexMask_SSE2(const __m128i* const p1, static WEBP_INLINE void ComplexMask_SSE2(const __m128i* const p1,
const __m128i* const p0, const __m128i* const p0,
@ -794,8 +794,8 @@ static void HFilter16i_SSE2(uint8_t* p, int stride,
} }
// 8-pixels wide variant, for chroma filtering // 8-pixels wide variant, for chroma filtering
static void VFilter8_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8_SSE2(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
__m128i mask; __m128i mask;
__m128i t1, p2, p1, p0, q0, q1, q2; __m128i t1, p2, p1, p0, q0, q1, q2;
@ -819,8 +819,8 @@ static void VFilter8_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
STOREUV(q2, u, v, 2 * stride); STOREUV(q2, u, v, 2 * stride);
} }
static void HFilter8_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8_SSE2(uint8_t* u, uint8_t* v, int stride,
int stride, int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
__m128i mask; __m128i mask;
__m128i p3, p2, p1, p0, q0, q1, q2, q3; __m128i p3, p2, p1, p0, q0, q1, q2, q3;
@ -839,8 +839,7 @@ static void HFilter8_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
Store16x4_SSE2(&q0, &q1, &q2, &q3, u, v, stride); Store16x4_SSE2(&q0, &q1, &q2, &q3, u, v, stride);
} }
static void VFilter8i_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void VFilter8i_SSE2(uint8_t* u, uint8_t* v, int stride,
int stride,
int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
__m128i mask; __m128i mask;
__m128i t1, t2, p1, p0, q0, q1; __m128i t1, t2, p1, p0, q0, q1;
@ -866,8 +865,7 @@ static void VFilter8i_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
STOREUV(q1, u, v, 1 * stride); STOREUV(q1, u, v, 1 * stride);
} }
static void HFilter8i_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v, static void HFilter8i_SSE2(uint8_t* u, uint8_t* v, int stride,
int stride,
int thresh, int ithresh, int hev_thresh) { int thresh, int ithresh, int hev_thresh) {
__m128i mask; __m128i mask;
__m128i t1, t2, p1, p0, q0, q1; __m128i t1, t2, p1, p0, q0, q1;

Some files were not shown because too many files have changed in this diff Show More