Compare commits

..

121 Commits

Author SHA1 Message Date
46e18c0a25 vwebp: fix incorrect clipping w/NO_BLEND
when the previous frame does not specify dispose to background only the
current frame's rectangle should be cleared

related to bug #245

(cherry picked from commit 469ba2cdfd)

Change-Id: I2fc4f5be99057e0bf87d8fedec57b06859b070bd
2015-10-23 13:10:17 -07:00
fcfde90b9c update issue tracker url
code.google.com -> bugs.chromium.org

(cherry picked from commit 4b9186b2eb)

Change-Id: I0dc99a85c29657415401160df3c7dd0423f96457
2015-10-20 22:44:52 -07:00
8c3fb330e5 update AUTHORS
missed in 0.4.3

Change-Id: I2b488307dfec592264467847adcbc3de0a6c83f3
2015-10-19 15:41:24 -07:00
808d4a686e update NEWS
Change-Id: I301be57ed6d925164f3827a79c6215ab79f120c7
2015-10-19 15:41:24 -07:00
62864042c0 bump version to 0.4.4
libwebp{,decoder} - 0.4.4
libwebp libtool - 5.4.0
libwebpdecoder libtool - 1.4.0

mux/demux - 0.2.2 (unchanged)
libtool - 1.2.0 (unchanged)

Change-Id: I7d421dc47ad4d25a17450ce1b04562c5d58c596b
2015-10-19 15:41:23 -07:00
b8b314ab39 doc/webp-container-spec: update repo browser link
gerrit.chromium.org is deprecated, use chromium.googlesource.com.

(cherry picked from commit f0486968ba)

Change-Id: Iaa6d6d18798dbd8cce908988287387f5cb8e8e64
2015-10-19 15:41:23 -07:00
c3953e37c9 fix typo: constitutes -> constitute
(cherry picked from commit 5fe1fe37a5)

Change-Id: I5b20ef41f4a810e11a4499b46b5e7dc93247beed
2015-10-19 15:41:23 -07:00
cd377e291c Use __has_builtin to check clang support
Older versions of Xcode with clang reporting versions 4.[012] and 5.0
did not include support for __builtin_bswap16. Checking in this manner
avoids using brittle version checks.

Matches a change to libvpx:
https://chromium-review.googlesource.com/305573
to fix:
https://code.google.com/p/webm/issues/detail?id=1082

(cherry picked from commit d26d9def80)

Change-Id: I23ea466ee1b53b12cd3fb45f65a2186c8dda95a1
2015-10-19 15:41:22 -07:00
e2e89806f7 wicdec: fix alpha detection w/64bpp BGRA/RGBA
(cherry picked from commit badfcbaa1e)

Change-Id: Ia712cf736e490d482a52b63d8e2816d0b7035cd0
2015-10-19 15:41:22 -07:00
5c3fe77dd8 iosbuild: fix linking with Xcode 7 / iOS SDK 9
-fembed-bitcode is the default, a framework built without this flag will
fail to link against an application using it.

BUG=267

(cherry picked from commit db1321a6a2)

Change-Id: I83461cb058b1866ac99b3f0bdfa890933e88ed26
2015-10-19 15:41:22 -07:00
f9f5498b6c VP8LAllocateHistogramSet: align histogram[] entries
fixes issue #262: a SIGBUS when accessing a misaligned double in
VP8LHistogram

(cherry picked from commit cd82440ec7)

Change-Id: Ic78cc5366d7e43d892c375b6a69dce2379db931b
2015-10-19 15:41:21 -07:00
3026db2ee5 Loosen the buffer size checks for Y/U/V/A too.
(follow-up to 15ca5014)

(cherry picked from commit 017f8cccec)

Change-Id: Ia122e96f616bd6317c24b69c9534cb7919b8a4a4
2015-10-19 15:41:21 -07:00
d089362d07 loosen the padding check on buffer size
Strictly speaking, the last (or first) row doesn't require padding.

cf https://code.google.com/p/webp/issues/detail?id=258

(cherry picked from commit 15ca5014f1)

Change-Id: Ie9ec8eb776fec1f5cea4cf9e21e81901fd79bf33
2015-10-19 15:41:21 -07:00
53d22c5b3e dec_neon: add whitespace around stringizing operator
prevents unintentional side-effects (though unlikely in this case) with
future compilers, cf:
eebaf97 dsp/mips: add whitespace around stringizing operator

(cherry picked from commit d623a8706f)

Change-Id: I0537091fcc97b4f54d0a156c3c83a28c51456b17
2015-10-19 15:41:21 -07:00
8bcc4d4523 dsp/mips: add whitespace around stringizing operator
fixes compile with gcc 5.1
BUG=259

(cherry picked from commit eebaf97f5a)

Change-Id: Ideb39c6290ab8569b1b6cc835bea11c822d0286c
2015-10-19 15:41:20 -07:00
d49c44f450 Container spec: clarify ordering of ALPH chunk.
Reported by user: https://code.google.com/p/webp/issues/detail?id=255

(cherry picked from commit 585d93dbba)

Change-Id: I9c027ea828d5a367b317744fad7607a16ed52fa5
2015-10-19 15:41:20 -07:00
382de22c84 msvc: fix pointer type warning in BitsLog2Floor
_BitScanReverse() takes an unsigned long*
http://msdn.microsoft.com/en-us/library/fbxyd7zd.aspx

fixes:
C4057: 'function': 'unsigned long *' differs in indirection to slightly
different base types from 'uint32_t *'

fixes issue #253

(cherry picked from commit 0250dfcc19)

Change-Id: I0101ef7be18c7ed188b35e9b17e7f71290953786
2015-10-19 15:41:20 -07:00
84ecd9d85c FlattenSimilarBlocks should only be tried when blending is possible.
This is because, FlattenSimilarBlocks() replaces some opaque pixels by
transparent ones. This results in an equivalent output only if blending
is turned on for the current frame.

(cherry picked from commit 5cccdadf2e)

Change-Id: I05612c952fdbd4b3a6e0ac9f3a7d49822f0cfb9b
2015-10-19 15:41:19 -07:00
f55ebbba82 backport rescaler fix
backported from: 7df9389, 5ff0079

Change-Id: I11b4d97c3c483431528be9ccbd9895baac8c6a63
2015-10-19 15:41:13 -07:00
2ff633c938 fix mips2 build target
tested with mips1 and mips2; this should cover 3/4 as well.
fixes an ftbfs reported on the debian issue tracker:
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=785000

(cherry picked from commit bf46d0acff)

Change-Id: I2458487c92bd638589fdfec5adb4f22102a5960c
2015-10-15 21:12:28 -07:00
326b5fb863 update ChangeLog
Change-Id: I794a5d58005bb0934ce0de06483567e8ed6bd8eb
2015-03-10 23:06:09 -07:00
a661e50bcb Disable NEON code on Native Client
The NEON assember in libwebp has not yet been ported
to Native Client. This changes disables it.
Related issue:
https://code.google.com/p/nativeclient/issues/detail?id=3205

(cherry picked from commit ac4f5784a0)

Change-Id: I200291db7aa79d40c1f10cff7622c9b8599e6886
2015-03-10 20:29:08 -07:00
fcd94e925a update ChangeLog
Change-Id: I60c273c650a305fe36564ccc5fb1c8d7ea18118f
2015-03-04 11:30:23 -08:00
569fe5789e update NEWS
Change-Id: Iade773fc3b9961fcd9b4112a3972cfc68e3670f2
2015-03-03 19:19:50 -08:00
bd852f5d81 bump version to 0.4.3
libwebp{,decoder} - 0.4.3
libwebp libtool - 5.3.0
libwebpdecoder libtool - 1.3.0

mux/demux - 0.2.2 (unchanged)
libtool - 1.2.0 (unchanged)

Change-Id: Ie8c35ffc20c1bfd782bdafd99da6c6b1373022c1
2015-03-03 19:05:40 -08:00
2d58b64f51 WebPPictureRescale: add a note about 0 width/height
(cherry picked from commit 0f773693bf)

Change-Id: I3890bb3fd32a148d7dd24c714546160c6c59d4ea
2015-03-03 17:53:49 -08:00
a0d8ca576f examples/Android.mk: add webpmux_example target
renamed from 'webpmux' to avoid name clash with the library module name

(cherry picked from commit 6cef0e4fa4)

Change-Id: I33bbdbdcb25a6f35bd85c9a0dbbb93b2428b05f3
2015-03-03 17:53:49 -08:00
34b1d29e3c Android.mk: add webpmux target
(cherry picked from commit 53c16ff047)

Change-Id: I60fc898fd804e23f08d760694192c5d04adcae91
2015-03-03 17:53:49 -08:00
75619881e6 Android.mk: add webpdemux target
(cherry picked from commit 21852a00a1)

Change-Id: I2fbbefbee59a96c52f5addcfc5bfe1216caad5cc
2015-03-03 17:53:48 -08:00
a98757650a Android.mk: add webpdecoder{,_static} targets
webpdecoder_static is reused to create libwebpdecoder.so and
libwebp.{a,so}

(cherry picked from commit 8697a3bcc8)

Change-Id: I940293cb755040c0ea45dc13f22624de8f355867
2015-03-03 17:53:48 -08:00
a6d4859725 Android.mk: split source lists per-directory
will allow reuse in future targets

(cherry picked from commit 4a67049113)

Conflicts:
	Android.mk

Change-Id: Iededc19d954226e62f2d2383a2b80f268d613647
2015-03-03 17:53:48 -08:00
77544d5f5b fix iOS arm64 build with Xcode 6.3
the standard vtbl functions are available there [1][2].
based on a patch from: aaroncrespo
fixes issue #243.

[1]
http://adcdownload.apple.com//Developer_Tools/Xcode_6.3_beta/Xcode_6.3_beta_Release_Notes.pdf
[2] Apple LLVM Compiler Version 6.1
- Xcode 6.3 updates the Apple LLVM compiler to version 6.1.0.
[...]
Support for the arm64 architecture has been significantly revised to
align with ARM's implementation, where the most visible impact is that a
few of the vector intrinsics have changed to match ARM's specifications.

(cherry picked from commit 602a00f93f)

Change-Id: I79a0016f44b9dbe36d0373f7f00a50ab3c2ca447
2015-03-03 17:53:47 -08:00
6dea15784d doc/webp-container-spec: note MSB order for chunk diagrams
addresses question in issue #241

(cherry picked from commit b510fbfe3b)

Change-Id: Iff6a172d5822f6ec8b9bc0951a1c9cd3f98c9251
2015-03-03 17:53:47 -08:00
f7cd57b23d doc/webp-container-spec: cosmetics
partially normalize indent, vertical whitespace and capitalization with
the copy used on developers.google.com/speed/webp

(cherry picked from commit e7d3df2314)

Change-Id: I8044418eeb9eaf5bd5c799675c74f6f845d503d6
2015-03-03 17:53:47 -08:00
1d6b250b07 vwebp: clear canvas at the beginning of each loop
this is in line with the recommendation in the spec, cf.,
5603947 webp-container-spec: clarify background clear on loop

(cherry picked from commit 1579de3cae)

Change-Id: Id3910395b05a1a1f2804be841b61f97bd4bac593
2015-03-03 17:53:46 -08:00
f97b3f86bf webp-container-spec: clarify background clear on loop
at the beginning of the loop there's an implicit clear of the entire
canvas to the background (or application defined) color. this avoids
adding the final composited frame to the first.

(cherry picked from commit 560394798f)

Change-Id: Ia3a52cf4482c6176334a5c9c99a0ddd07d1776e7
2015-03-03 17:53:46 -08:00
4ba83c1759 vwebp: remove unnecessary static Help() prototype
all uses occur after its declaration

(cherry picked from commit 2c906c407c)

Change-Id: I775642ce6d1dec3bc6da2fa0d5d87490992c7e6c
2015-03-03 17:53:46 -08:00
d34e8e3d18 vwebp/animation: display last frame on end-of-loop
previously the first frame would be redisplayed, which might be
unexpected if the final frame was meant to be a composite, for example.

(cherry picked from commit 0f017b56f3)

Change-Id: I4da795623c71501e2fa426e8fba8fb2ffcbab58a
2015-03-03 17:53:45 -08:00
bbbc524fb4 dec/vp8: clear 'dither_' on skipped blocks
DitherRow() only checks this value, not 'skip_' so previously it was
uninitialized for these blocks.

(cherry picked from commit 66935fb9ee)

Change-Id: I0f698b81854ee9d91edacb51c1e3bdab9cba96f2
2015-03-03 17:53:45 -08:00
0339fa26eb lossless_neon: enable subtract green for aarch64
similar to:
1ba61b0 enable NEON intrinsics in aarch64 builds

vtbl1_u8 is available everywhere but Xcode-based iOS arm64 builds, use
vtbl1q_u8 there.

performance varies based on the input, 1-3% on encode was observed

(cherry picked from commit 416e1cea9b)

Change-Id: Ifec35b37eb856acfcf69ed7f16fa078cd40b7034
2015-03-03 17:53:45 -08:00
5a0c2207f4 Regression fix for lossless decoding
Reported here: https://code.google.com/p/webp/issues/detail?id=239

At the beginning of method 'DecodeImageData', pixels up to
'dec->last_pixel_' are assumed to be already cached. So, at the end of
previous call to that method also, that assumption should hold true.

Hence, we should cache all pixels up to 'src' regardless of 'src_last'.

This affects lossless incremental decoding only, as that is when
src_last and src_end differ.
Note: alpha decoding is implicitly incremental, as alpha decoding of
only the rows 'y_end - y_start' happens during FinishRow() call. So, this bug
affects alpha decoding in non-incremental decoding flow as well.

This bug was introduced in: https://gerrit.chromium.org/gerrit/#/c/59716.

(cherry picked from commit 783a8cda24)

Change-Id: Ide6edfeb2609b02aff701e1bd9fd776da0a16be0
2015-03-03 17:53:44 -08:00
6e3a31d595 wicdec: (msvs) quiet some /analyze warnings
add additional return checks and asserts to avoid:
C6102: Using 'XXX' from failed function call ...

(cherry picked from commit 9b228b5416)

Change-Id: I51f5fa630324e0cd7b2d9fceefecb4f4021474b1
2015-03-03 17:53:44 -08:00
b49a578135 dwebp/WritePNG: mark png variables volatile
these are used on both sides of the setjmp().

(cherry picked from commit 7a191398ca)

Change-Id: I4a789bfb3a5d56946a22286c5a140008d90e1ba2
2015-03-03 17:53:43 -08:00
0a4391a196 dwebp: include setjmp.h w/WEBP_HAVE_PNG
setjmp() is used in WritePNG().

(cherry picked from commit 775dfad297)

Change-Id: Iadd836272fc7d368d635c891507ce2a08c4d3dec
2015-03-03 17:53:43 -08:00
90f1ec58a9 dwebp: correct sign in format strings
width / height are unsigned; fixes a warning with msvs /analyze:
C6340: Mismatch on sign: 'const unsigned int' passed as _Param_(4) when
some signed type is required in call to 'fprintf'.

(cherry picked from commit 47d26be760)

Change-Id: I5f1fad4c93745baf17d70178a5e66579ccd2b155
2015-03-03 17:53:43 -08:00
b61ce861f3 VP8LEncodeStream: add an assert
check enc->argb_ to quiet an msvs /analyze warning:
C6387: 'enc->argb_+y*width' could be '0':  this does not adhere to the
specification for the function 'memcpy'.

(cherry picked from commit f0e0677b87)

Change-Id: I87544e92ee0d3ea38942a475c30c6d552f9877b7
2015-03-03 17:53:42 -08:00
df1081bb82 dsp/cpu: (msvs) add include for __cpuidex
and only use it on x86 / x64 where it's available.
has the side-effect of quieting a msvs /analyze warning:
C6001: Using uninitialized memory 'cpu_info'.

(cherry picked from commit 0de5f33e31)

Change-Id: Iae51be3b22b2ee949cfc473eeea9fd9fb6b3c2cb
2015-03-03 17:53:42 -08:00
39aa055529 dsp/cpu: (msvs) avoid immintrin.h on _M_ARM
_xgetgv() isn't relevant there anyway

broken since:
279e661 Merge "dsp/cpu: add include for _xgetbv() w/MSVS"

(cherry picked from commit 4fbe9cf202)

Change-Id: Iaa7bc0c5be9c06bfffab39e194c64c09bf5b5a27
2015-03-03 17:53:42 -08:00
f814f429ca dsp/cpu: add include for _xgetbv() w/MSVS
explicitly add immintrin.h instead of transitively picking it up via
windows.h presumably. makes the code easier to move around.

(cherry picked from commit b6c0428e8c)

Change-Id: If70d5143ac94fc331da763ce034358858e460e06
2015-03-03 17:53:41 -08:00
8508ab99a7 cpu: fix AVX2 detection for gcc/clang targets
ecx needs to be set to 0; the visual studio builds were already doing
this.

https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family

(cherry picked from commit d7c4b02a57)

Change-Id: I95efb115b4d50bbdb6b14fca2aa63d0a24974e55
2015-03-03 17:53:41 -08:00
5769623b6f fix handling of zero-sized partition #0 corner case
reported in https://code.google.com/p/webp/issues/detail?id=237

An empty partition #0 should be indicative of a bitstream error.
The previous code was correct, only an assert was triggered in debug mode.
But we might as well handle the case properly right away...

(cherry picked from commit 205c7f26af)

Change-Id: I4dc31a46191fa9e65659c9a5bf5de9605e93f2f5
2015-03-03 17:53:40 -08:00
b2e71a9080 make the 'last_cpuinfo_used' variable names unique
allows the sources to be #include'd in some hackish builds (don't do
that!)

(cherry picked from commit 67f601cd46)

Conflicts:
	src/dsp/alpha_processing.c
	src/dsp/argb.c
	src/dsp/dec.c
	src/dsp/enc.c
	src/dsp/lossless.c
	src/dsp/upsampling.c
	src/dsp/yuv.c

Change-Id: I0c7a43acbebd0e2d5068845e6daa8ce47361cd91
2015-03-02 18:43:41 -08:00
1273e84517 add -Wformat-nonliteral and -Wformat-security
can be useful, not sure they are a subset of the flags we
use already...

(cherry picked from commit 80d950d94e)

Change-Id: Iec742a99427a791d9527368302a1136df2ff96cd
2015-03-02 18:43:35 -08:00
3ae78eb757 multi-thread fix: lock each entry points with a static var
we compare the current VP8GetCPUInfo pointer to the last used.
This is less code overall and each implementation is still
testable separately (by just changing VP8GetCPUInfo, but not
a separate threads!)

(cherry picked from commit a437694a17)

Conflicts:
	src/dsp/alpha_processing.c
	src/dsp/argb.c
	src/dsp/dec.c
	src/dsp/enc.c
	src/dsp/lossless.c
	src/dsp/upsampling.c
	src/dsp/yuv.c

Change-Id: Ia13fa8ffc4561a884508f6ab71ed0d1b9f1ce59b
2015-03-02 18:43:31 -08:00
5c1eeda922 webp-container-spec: remove references to fragments
this portion of the format was never finalized

(cherry picked from commit a66e66c79d)

Change-Id: I80aa1b27457a0e52b047c7284df2f58b181ca5d8
2015-03-02 18:43:26 -08:00
c5ceea4899 enc_neon: fix building with non-Xcode clang (iOS)
check for __apple_build_version__ to distinguish the two; a version
check could work as Apple bumped Xcode's to 5.x/6.x, but it's unclear
how upstream will deal with their versioning as they go 3.6+, so avoid
it for now.

(cherry picked from commit a3946b8956)

Change-Id: I67cda67c4f68e262a92d805a63cc1496374be063
2015-03-02 18:43:20 -08:00
d0859d69de iosbuild: add x64_64 simulator support
based on the patch here:
https://github.com/pixelkind/webp-ios-build

(cherry picked from commit a96ccf8fde)

Change-Id: Iaa346b751e5f18e8cf13a8e5c4064b0c2a3f5f6c
2015-03-02 18:43:14 -08:00
046732ca65 WebPEncode: Support encoding same pic twice (even if modified)
This wasn't working for this specific scenario:
- Encode an RGBA 'pic' (with trivial alpha) using lossy encoding.
(so that pic->a == NULL after import happens).
- Modify the 'pic->argb' so that it has non-trivial alpha.
- Encode the same 'pic' again.
This used to fail to encode alpha data as pic->a == NULL.

(cherry picked from commit e4f4dddba3)

Change-Id: Ieaaa7bd09825c42f54fbd99e6781d98f0b19cc0c
2015-03-02 18:43:08 -08:00
4426f50179 webp/types.h: use inline for clang++/-std=c++11
at least clang 3.[45] in c++ mode with -std=c++11 define __STRICT_ANSI__
this change set WEBP_INLINE to inline for c++/non-strict-ansi/> c99

fixes crbug.com/428383

(cherry picked from commit 6638710b9e)

Change-Id: Ief2b934353c336a75865c73c90cc3dc5e4f83913
2015-03-02 18:43:02 -08:00
e297fc7171 gif2webp: Use the default hint instead of WEBP_HINT_GRAPH.
This is much faster and the compression is slightly better too.

(cherry picked from commit c94ed49efd)

Change-Id: Ibf0d10eea83bfabfcc44ee497074767462ff41b1
2015-03-02 18:42:54 -08:00
855fe4354b Makefile.vc: add a 'legacy' RTLIBCFG option
disables buffer security checks (/GS-) and any machine optimizations
(e.g., sse2)

fixes issue #228

(cherry picked from commit 34c20c06c8)

Change-Id: I81fa483dc1654199b2017626320383d2d63317dc
2015-03-02 18:42:48 -08:00
b7eb6d55c7 gif2webp: Support GIF_DISPOSE_RESTORE_PREVIOUS
Tweaked the gif2webp_util API to support this.

Requested in: https://code.google.com/p/webp/issues/detail?id=144

(cherry picked from commit 65e5eb8a62)

Change-Id: I0e8c4edc39227355cd8d3acc55795186e25d0c3a
2015-03-02 18:42:40 -08:00
5691bdd9da gif2webp: Handle frames with odd offsets + disposal to background.
Snapping odd offsets in GIF to even offsets in WebP was causing extra row/column
being disposed in such cases.

Code is rewritten to maintain previous and current canvas (it used to maintain
previous canvas and current frame earlier). And we recompute change rectangles
as those from GIF may no longer apply.
Also, this renders methods like ReduceTransparency() and ConvertToKeyFrame()
redundant, as internally maintained current canvas is always independent of
previous canvases.

Disposal method choice: we pick the disposal method that results in the smallest
change rectangle.

(cherry picked from commit e4c829efe9)

Conflicts:
	examples/gif2webp_util.c

Change-Id: Ic31186d98fe1a2a790a89d1571b17e3abd127e79
2015-03-02 18:42:36 -08:00
8301da1380 stopwatch.h: fix includes
WEBP_INLINE -> webp/types.h
memcpy -> string.h

(cherry picked from commit 54edbf65ff)

Change-Id: Iab2ea8b553dc98be75eede751de62ab0292d1f97
2015-03-02 18:42:28 -08:00
6a2209aa36 update ChangeLog
Change-Id: I32a22786f99d0c239761a362485a4a91c783851b
2014-10-17 16:15:30 +02:00
36cad6abe8 bit_reader.h: cosmetics: fix a typo
Change-Id: I1ba09124700b3120f18eb3705eb5ba805feb2ca0
(cherry picked from commit 79b5bdbfde)
2014-10-17 16:02:03 +02:00
e2ecae62f0 enc_mips32: workaround gcc-4.9 bug
avoids an ICE with NDK r10b + NDK_TOOLCHAIN_VERSION=4.9

In function 'SSE16x16':
enc_mips32.c (684) internal compiler error: Segmentation fault

Change-Id: I1a3d33c0a9534c97633ab93bcdf9bf59d3a7e473
(cherry picked from commit 0ce27e715e)
2014-10-15 19:58:03 +02:00
243e68df60 update ChangeLog
Change-Id: Ib9504997d00c9f72cff30aa699fe1998b6be940d
2014-10-14 20:43:43 +02:00
eec5f5f121 enc/vp8enci.h: update version number
0.4.1 -> 0.4.2
missed in: 857578a bump version to 0.4.2

Change-Id: Iaa62ce5af5935243748e51105008f252443e7d27
2014-10-14 20:40:02 +02:00
0c1b98d28c update NEWS
Change-Id: Ib9b48e3611b77214bc875524249366ff62451c1b
2014-10-14 12:02:17 +02:00
69b0fc9492 update AUTHORS
Change-Id: I750dc65205705b94f6a3fd467e6746c482c983cb
2014-10-14 12:02:17 +02:00
857578a811 bump version to 0.4.2
libwebp{,decoder} - 0.4.2
libwebp libtool - 5.2.0
libwebpdecoder libtool - 1.2.0

mux/demux - 0.2.2
libtool - 1.2.0

Change-Id: If593a198f802fd68c7dbbdbe0fc2612dbc44e2df
2014-10-14 12:02:17 +02:00
9129deb5b1 restore encode API compatibility
protect WebPPictureSmartARGBToYUVA with an ABI check

Change-Id: Iaec4a9f8f590f27c4c72129b90068690efc84eb7
2014-10-14 12:02:17 +02:00
f17b95e992 AssignSegments: quiet -Warray-bounds warning
the number of segments are previously validated, but an explicit check
is needed to avoid a warning under gcc-4.9

(cherry picked from commit c8a87bb62d)

Change-Id: Ifa7c0dd7f3f075b3860fa8ec176d2c98ff54fcea
2014-10-14 12:02:16 +02:00
9c56c8a12e enc_neon: initialize vectors w/vdup_n_u32
replaces {} initialization gnu-ism

(cherry picked from commit 7534d71640)

Change-Id: I5a7b2d4246f0205e4bfb7f4b77d720c47d8674ec
2014-10-14 12:02:16 +02:00
a008902320 iosbuild: cleanup
- s/declare -r/readonly/
- remove unnecessary build variables
- echo configure line

(cherry picked from commit 041956f6a3)

Change-Id: I4489d8413934dcec57d60abf136a72d08fa0fd99
2014-10-14 12:02:16 +02:00
cc6de53b3b iosbuild: output autoconf req. on failure
(cherry picked from commit 767eb40238)

Change-Id: I2a6d80cf22f5b58e80345a411c48f047fecdbb47
2014-10-14 12:02:16 +02:00
740d765aea iosbuild: make iOS 6 the minimum requirement
iOS 5 support isn't available in the Xcode 6 install; iOS 6 covers
phones starting at the 3GS, so should be a reasonable base line

(cherry picked from commit 9f7d9e6dda)

Change-Id: Ie5603c9e30cb52114b372509e183febbf679a69a
2014-10-14 12:02:16 +02:00
403023f500 iobuild.sh: only install .h files in Headers
cp * -> cp *.h; avoids picking up autoconf files

(cherry picked from commit 38128cb9df)

Change-Id: I57c04562d554431ddf4605af65077f32d90ac58e
2014-10-14 12:02:16 +02:00
b65727b55d Premultiply with alpha during U/V downsampling
This prevents the 'alpha-leak' reported in issue #220

Speed-diff is kept minimal.

(cherry picked from commit c792d4129a)

Change-Id: I1976de5e6de7cfcec89a54df9233c1a6586a5846
2014-10-14 12:02:16 +02:00
8de0debcec gif2webp: Background color correction
For some GIF images, the first frame is missing the corresponding
graphic control extension. For such cases, we were never calling
GetBackgroundColor(), and default background color value (white) was being used
incorrectly.
So, we call GetBackgroundColor() when we encounter the first image
descriptor instead, to make sure that it is always called.

(cherry picked from commit 0cc811d7d6)

Change-Id: I00fc8e943d8a0c1578dcd718f3e74dec7de4ed61
2014-10-14 12:02:16 +02:00
f8b7d94daa Amend the lossless spec according to issue #205, #206 and #224
http://code.google.com/p/webp/issues/detail?id=205 <- Select()
http://code.google.com/p/webp/issues/detail?id=206 <- out-of-bound colormap index
http://code.google.com/p/webp/issues/detail?id=224 <- version number MUST be 0

(cherry picked from commit d7167ff7ce)

Change-Id: I56a575529862dfc8ad189ddcfc47ef59a58f273d
2014-10-14 12:02:16 +02:00
9102a7b63d Add a WebPExtractAlpha function to dsp
This is the opposite of WebPDispatchAlpha

+ Implement the SSE2 version

(cherry picked from commit cddd334050)

Conflicts:
	src/dsp/alpha_processing_sse2.c

Change-Id: I0c297309255f508c5261da8aad01f7e57f924d6c
2014-10-14 12:02:16 +02:00
e407b5d516 webpmux: simplify InitializeConfig()
put WebPMuxConfig on the stack in main() rather than allocating it in
InitializeConfig(); removes a level of indirection there.

(cherry picked from commit c0a462cac2)

Change-Id: I81d386f7472ebbd322dd3fdbfda9d78dbeb62a66
2014-10-14 12:02:16 +02:00
3e70e64153 webpmux: fix indent
+ remove unnecessary cast

(cherry picked from commit 6986bb5e12)

Change-Id: I2070fbe6aeda49f5790c69390e5b539a2c1a5616
2014-10-14 12:02:16 +02:00
be38f1af47 webpmux: fix exit status on numeric value parse error
in most cases 'ok' is set via a goto macro

(cherry picked from commit f89e1690df)

Change-Id: I17c832446bf3e716d3bcd323dbcc72bec544029c
2014-10-14 12:02:16 +02:00
94dadcb1a1 webpmux: fix loop_count range check
explicitly check [0, 65535], the use of 'long' was removed in a prior
commit

(cherry picked from commit 0e23c487da)

Change-Id: I70d5bf286908459b5d4d619c657853f0e833b6ea
2014-10-14 12:02:16 +02:00
40b3a618ec examples: warn on invalid numeric parameters
add ExUtilGet[U]Int / ExUtilGetFloat which print an error message on
parse failure.
fixes issue #219.

(cherry picked from commit 96d43a873a)

Conflicts:
	examples/cwebp.c
	examples/dwebp.c

Change-Id: Ie537f5aebd138925bf1a48289b6b5e261b3af2ca
2014-10-14 12:02:16 +02:00
b7d209a448 gif2webp: Handle frames with missing graphic control extension
According to the GIF spec (http://www.w3.org/Graphics/GIF/spec-gif89a.txt),
this block is optional, and its scope is only the first graphic rendering block
that follows.

The spec doesn't mention what default values of frame dimensions, offsets,
duration and transparent index to use in such a case, though. So, we use the
defaults used by GIF reader in Chromium:
https://code.google.com/p/chromium/codesearch#chromium/src/third_party/WebKit/Source/platform/image-decoders/gif/GIFImageReader.h&l=186

(cherry picked from commit d51f3e4069)

Change-Id: Iecc6967847192483770e85ac15fe2835cd01ce7b
2014-10-14 12:02:16 +02:00
bf0eb74829 configure: simplify libpng-config invocation
use --ldflags over --prefix + --libs combination

based on comment in issue #180.

(cherry picked from commit be70b86c57)

Change-Id: If2ca06053d5237b6722ddf4117917e5f3c06ab59
2014-10-14 12:02:15 +02:00
3740f7d4c6 Rectify bug in lossless incremental decoding.
Handle the corner case when VP8LDecodeImage() method is called with an invalid
header data. The lossless decoding doesn't support incremental mode yet.
Return the error status as BITSTREAM error in case not all pixels are decoded
with the provided bit-stream. Also added asserts in the VP8LDecodeImage() method
to validate the decoder header with appropriate/valid data for huffman trees
(htree_groups_ etc).

(cherry picked from commit e0a9932161)

Change-Id: Ibac9fcfc4bd0a2c5f624bb9d4a2b9f6459aa19ea
2014-10-14 12:02:06 +02:00
3ab0a377d2 make VP8LSetBitPos() set br->eos_ flag
ReadSymbol() finishes with a VP8LSetBitPos() call only and could miss an eos_ during the decode loop.

Things are faster because of inlining too.

(cherry picked from commit d3242aee16)

Change-Id: I2d2a275f38834ba005bc767d45c5de72d032103e
2014-10-14 10:03:15 +02:00
2e4312b14f Lossless decoding: fix eos_ flag condition
eos_ needs to be set only when superfluous bits have actually
been requested.
Earlier, we were assuming pre-mature end-of-stream to be an error.
Now, more precisely, we mark error when we have encountered end-of-stream *and*
we attempt to read more bits after that.

This handles cases where image data requires no bits to be read

(cherry picked from commit a9decb5584)

Change-Id: I628e2c39c64f10c443fb51f86b1f5919cc9fd299
2014-10-14 10:03:14 +02:00
e6609ac6b9 fix erroneous dec->status_ setting
We only need to set BITSTREAM_ERROR if !ok.

(cherry picked from commit 3fea6a28da)

Conflicts:
    src/dec/vp8l.c

Change-Id: I5bd13e64797e8bc509477edb29158abb39cb0ee1
2014-10-14 10:02:30 +02:00
5692eae1f3 add a fallback to ALPHA_NO_COMPRESSION
if ALPHA_LOSSLESS_COMPRESSION produces a too big file (very rare!),
we fall-back to no-compression automatically.

(cherry picked from commit 187d379db6)

Change-Id: I5f3f509c635ce43a5e7c23f5d0f0c8329a5f24b7
2014-10-14 09:57:41 +02:00
6ecd5bf682 ExUtilReadFromStdin: (windows) open stdin in bin mode
fixes input/decode from stdin in the examples

(cherry picked from commit a6140194ff)

Change-Id: Ie8052da758a9ef64477501b709408236d258da82
2014-10-14 09:57:41 +02:00
4206ac6bbf webpmux: (windows) open stdout in binary mode
prevents corrupt output. related to issue #217

(cherry picked from commit e80eab1fbc)

Change-Id: I6f0dac8131127717ba72b0709fb35d421ab41acb
2014-10-14 09:57:41 +02:00
d40e885931 cwebp: (windows) open stdout in binary mode
prevents corrupt output. fixes issue #217

(cherry picked from commit e9bfb1166d)

Change-Id: If90afb441636144300da66d64f0e7f78505b4060
2014-10-14 09:57:41 +02:00
4aaf463449 example_util: add ExUtilSetBinaryMode
use it in dwebp when dealing with 'stdout'

(cherry picked from commit 5927e15bc7)

Change-Id: I8b8a0b0de9e73731e913ac3c83b5e2b33c693175
2014-10-14 09:57:41 +02:00
4c82ff76c1 webpmux man page: Clarify some title, descriptions and examples
Based on the feedback here:
https://groups.google.com/a/webmproject.org/d/msg/webp-discuss/qY6rWJLqRTY/pF8oSj8DOGYJ

(cherry picked from commit 30f3b75b33)

Change-Id: I9119ea8e211ffb269026010e3c590385bc6a9f17
2014-10-14 09:57:41 +02:00
23d4fb3362 dsp/lossless: workaround gcc-4.9 bug on arm
force Sub3() to not be inlined, otherwise the code in Select() will be
incorrect.
https://android-review.googlesource.com/#/c/102511

(cherry picked from commit 637b388809)

Change-Id: I90ae58bf3e6cc92ca9897f69974733d562e29aaf
2014-10-13 19:03:49 +02:00
5af7719047 dsp.h: collect gcc/clang version test macros
endian_inl.h already relies on dsp.h, grab the definitions from there.

(cherry picked from commit 8323a9038d)

Change-Id: I445f7d0631723043c55da1070498f89965bec7b1
2014-10-13 19:03:49 +02:00
90d112466b enc_neon: enable QuantizeBlock for aarch64
vtbl4_u8 is available everywhere except iOS arm64: use vtbl2q_u8 there
with a corresponding change in the load.

(cherry picked from commit 953acd56a4)

Change-Id: Ib84212dda3c7875348282726c29e3b79b78b0eac
2014-10-13 19:03:48 +02:00
ee78e7801d SmartRGBYUV: fix odd-width problem with pixel replication
rightmost pixel was missing a copy, which could lead to invalid read.

Also added a lower dimension of 4, below which we use the regular conversion.
This is to prevent corner cases, in addition to not being overkill.

(cherry picked from commit 2523aa73cb)

Change-Id: Iac12e7a3d74590f12fe8eeb1830b9891e61439f6
2014-10-13 19:03:48 +02:00
c9ac2041e9 fix some MSVC64 warning about float conversion
(cherry picked from commit ee52dc4e54)

Change-Id: I27ab27fc15033d27d0505729f6275fb542c8d473
2014-10-13 19:03:48 +02:00
f4497a1ef5 cpu: check for _MSC_VER before using msvc inline asm
_M_IX86 will be defined in mingw builds after including windows.h. as
the gcc inline asm is first, this missing check would only have caused
an error if the code was reorganized.

(cherry picked from commit 3fca851a20)

Change-Id: I395679bcfc43e94d308d1ceb0c0fbf932b2c378c
2014-10-13 19:03:48 +02:00
e2159fdff7 faster RGB->YUV conversion function (~7% speedup)
with a special case for dithering==0., it gets somewhat faster on x86
thanks to inlining.

Also, less macros.

(cherry picked from commit e2a83d7109)

Change-Id: Ic2f2bf6718310743bb40cef2104fa759a073e6d5
2014-10-13 19:03:48 +02:00
21abaa05e3 Add smart RGB->YUV conversion option -pre 4
New function: WebPPictureSmartARGBToYUVA()
This implement smart RGB->YUV conversion.

This is rather undocumented for now, and is triggered using '-pre 4'
preprocessing option.

This is slow-ish and use quite some memory, but should be improvable.
This is somehow a usable beta version.

(cherry picked from commit 3fc4c539aa)

Change-Id: Ia50a8c30134e4cab8a7d3eb70aef13ce1f6187a1
2014-10-13 19:03:48 +02:00
1a161e20a4 configure: add work around for gcc-4.9 aarch64 bug
add -frename-registers to avoid:
src/dsp/dec_neon.c🔢1: internal
compiler error: in simplify_const_unary_operation, at
simplify-rtx.c:1539

https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62040
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61622
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=757738

(cherry picked from commit e2b8cec05b)

Change-Id: I52fb3a29ac30b82b27da05378bdb495ddebb97d7
2014-10-13 19:03:48 +02:00
55b10de73f MIPS: mips32r2: added optimization for BSwap32
gcc < 4.8.3 doesn't translate bswap optimally.
use optimized version always

(cherry picked from commit 98c54107df)

Change-Id: I979ea26ad6dc0166d3d2f39c4148eb8adfb7ddec
2014-10-13 19:03:48 +02:00
76d2192206 Update PATENTS to reflect s/VP8/WebM/g
Sync with http://www.webmproject.org/license/additional/

modified:   PATENTS

(cherry picked from commit dab702b357)

Change-Id: I9f7af36fdcf57a82311363a043707b181832fc18
2014-10-13 18:19:08 +02:00
29a9db1f7c MIPS: detect mips32r6 and disable mips32r1 code
(cherry picked from commit b7e5a5c451)

Change-Id: Id1325c789a990c9a8704e84e99a22d580303eb8a
2014-10-13 18:19:04 +02:00
245c4a6737 Correctly use the AC_CANONICAL_* macros
http://www.gnu.org/software/autoconf/manual/autoconf.html#Using-System-Type

Signed-off-by: Timothy Gu <timothygu99@gmail.com>
(cherry picked from commit 63c2fc02ce)

Change-Id: I40a13e84f5266ed20bc4db098502b1610ab71206
2014-10-13 18:18:58 +02:00
40aa8b69b0 cosmetics
fix some indent/whitespace, remove a few duplicate includes, extra
semi-colons

(cherry picked from commit e300c9d819)

Change-Id: If937182b40a21e0f2028496e7b4b06c6e8a41352
2014-10-13 18:18:50 +02:00
2ddcca5efe cosmetics: remove some extraneous 'extern's
(cherry picked from commit f7b4c48bba)

Change-Id: Ib3f0cff37120c51633387dd1c46592c53ab0ba6d
2014-10-13 18:18:44 +02:00
f40dd7c6de vp8enci.h: cosmetics: fix '*' placement
associate with the type

(cherry picked from commit b47fb00ac0)

Change-Id: Icf94f11bf79f6ccee3150e27b228755f8f3f0f37
2014-10-13 18:18:35 +02:00
4610c9c55a bit_writer: cosmetics: rename kFlush() -> Flush()
(cherry picked from commit 4c6dde37b9)

Change-Id: I8907927974188bee85ffade1d75d2e50817aa115
2014-10-13 18:18:31 +02:00
fc3c1750dc dsp: detect mips64 & disable mips32 code
(cherry picked from commit 0524d9e5e8)

Change-Id: Icf68dafd5cf0614ca25b36a0252caa1784ac8059
2014-10-13 18:18:25 +02:00
c1a7955d59 cwebp.1: restore quality description
based on:
d3485d9 cwebp.1: fix quality description placement

Change-Id: I8bd16db7f39cc9ff816cc02c04a455493e550c26
2014-10-13 18:18:24 +02:00
57a7e73d27 correct alpha_dithering_strength ABI check
the ABI wasn't bumped with this addition, but it's more correct to say
it was added with 0x0205 rather than 0x0204

Change-Id: I2ba12a33b612fac16bdfeb8272e76b0ea84f3938
2014-10-13 18:18:22 +02:00
6c83157524 correct WebPMemoryWriterClear ABI check
this function was introduced in 0x0204; fix checks related to this to be
> 0x0203 instead of 0x0202, pointed out on ffmpeg-devel.

Change-Id: I52cd2b98304baf1eb9a83094e2374f2120a1546b
2014-10-13 18:18:19 +02:00
251 changed files with 16876 additions and 45373 deletions

9
.gitignore vendored
View File

@ -19,10 +19,8 @@
/stamp-h1
Makefile
Makefile.in
examples/anim_diff
examples/[cdv]webp
examples/gif2webp
examples/img2webp
examples/webpmux
src/webp/config.h*
src/webp/stamp-h1
@ -32,10 +30,3 @@ src/webp/stamp-h1
*.pdb
/iosbuild
/WebP.framework
CMakeCache.txt
CMakeFiles/
cmake_install.cmake
.gradle
/build
extras/get_disto
extras/webp_quality

View File

@ -6,7 +6,3 @@ Vikas Arora <vikasa@google.com>
<vikasa@google.com> <vikasa@gmail.com>
<vikasa@google.com> <vikaas.arora@gmail.com>
<slobodan.prijic@imgtec.com> <Slobodan.Prijic@imgtec.com>
<vrabaud@google.com> <vincent.rabaud@gmail.com>
Tamar Levy <tamar.levy@intel.com>
<qrczak@google.com> <qrczak>
Hui Su <huisu@google.com>

14
AUTHORS
View File

@ -2,37 +2,25 @@ Contributors:
- Charles Munger (clm at google dot com)
- Christian Duvivier (cduvivier at google dot com)
- Djordje Pesut (djordje dot pesut at imgtec dot com)
- Hui Su (huisu at google dot com)
- James Zern (jzern at google dot com)
- Jan Engelhardt (jengelh at medozas dot de)
- Jehan (jehan at girinstud dot io)
- Johann (johann dot koenig at duck dot com)
- Jovan Zelincevic (jovan dot zelincevic at imgtec dot com)
- Jyrki Alakuijala (jyrki at google dot com)
- Lode Vandevenne (lode at google dot com)
- levytamar82 (tamar dot levy at intel dot com)
- Lou Quillio (louquillio at google dot com)
- Mans Rullgard (mans at mansr dot com)
- Marcin Kowalczyk (qrczak at google dot com)
- Martin Olsson (mnemo at minimum dot se)
- Mikołaj Zalewski (mikolajz at google dot com)
- Mislav Bradac (mislavm at google dot com)
- Nico Weber (thakis at chromium dot org)
- Noel Chromium (noel at chromium dot org)
- Owen Rodley (orodley at google dot com)
- Parag Salasakar (img dot mips1 at gmail dot com)
- Pascal Massimino (pascal dot massimino at gmail dot com)
- Paweł Hajdan, Jr (phajdan dot jr at chromium dot org)
- Pierre Joye (pierre dot php at gmail dot com)
- Sam Clegg (sbc at chromium dot org)
- Scott Hancher (seh at google dot com)
- Scott LaVarnway (slavarnway at google dot com)
- Scott Talbot (s at chikachow dot org)
- Slobodan Prijic (slobodan dot prijic at imgtec dot com)
- Somnath Banerjee (somnath dot banerjee at gmail dot com)
- Sriraman Tallam (tmsriram at google dot com)
- Tamar Levy (tamar dot levy at intel dot com)
- Timothy Gu (timothygu99 at gmail dot com)
- Urvang Joshi (urvang at google dot com)
- Vikas Arora (vikasa at google dot com)
- Vincent Rabaud (vrabaud at google dot com)
- Yang Zhang (yang dot zhang at arm dot com)

View File

@ -1,7 +1,6 @@
LOCAL_PATH := $(call my-dir)
WEBP_CFLAGS := -Wall -DANDROID -DHAVE_MALLOC_H -DHAVE_PTHREAD -DWEBP_USE_THREAD
WEBP_CFLAGS += -fvisibility=hidden
ifeq ($(APP_OPTIM),release)
WEBP_CFLAGS += -finline-functions -ffast-math \
@ -16,138 +15,94 @@ ifneq ($(findstring armeabi-v7a, $(TARGET_ARCH_ABI)),)
# instructions to be generated for armv7a code. Instead target the neon code
# specifically.
NEON := c.neon
USE_CPUFEATURES := yes
else
NEON := c
endif
dec_srcs := \
src/dec/alpha_dec.c \
src/dec/buffer_dec.c \
src/dec/frame_dec.c \
src/dec/idec_dec.c \
src/dec/io_dec.c \
src/dec/quant_dec.c \
src/dec/tree_dec.c \
src/dec/vp8_dec.c \
src/dec/vp8l_dec.c \
src/dec/webp_dec.c \
src/dec/alpha.c \
src/dec/buffer.c \
src/dec/frame.c \
src/dec/idec.c \
src/dec/io.c \
src/dec/quant.c \
src/dec/tree.c \
src/dec/vp8.c \
src/dec/vp8l.c \
src/dec/webp.c \
demux_srcs := \
src/demux/anim_decode.c \
src/demux/demux.c \
dsp_dec_srcs := \
src/dsp/alpha_processing.c \
src/dsp/alpha_processing_mips_dsp_r2.c \
src/dsp/alpha_processing_neon.$(NEON) \
src/dsp/alpha_processing_sse2.c \
src/dsp/alpha_processing_sse41.c \
src/dsp/argb.c \
src/dsp/argb_mips_dsp_r2.c \
src/dsp/argb_sse2.c \
src/dsp/cpu.c \
src/dsp/dec.c \
src/dsp/dec_clip_tables.c \
src/dsp/dec_mips32.c \
src/dsp/dec_mips_dsp_r2.c \
src/dsp/dec_msa.c \
src/dsp/dec_neon.$(NEON) \
src/dsp/dec_sse2.c \
src/dsp/dec_sse41.c \
src/dsp/filters.c \
src/dsp/filters_mips_dsp_r2.c \
src/dsp/filters_msa.c \
src/dsp/filters_neon.$(NEON) \
src/dsp/filters_sse2.c \
src/dsp/lossless.c \
src/dsp/lossless_mips_dsp_r2.c \
src/dsp/lossless_msa.c \
src/dsp/lossless_mips32.c \
src/dsp/lossless_neon.$(NEON) \
src/dsp/lossless_sse2.c \
src/dsp/rescaler.c \
src/dsp/rescaler_mips32.c \
src/dsp/rescaler_mips_dsp_r2.c \
src/dsp/rescaler_msa.c \
src/dsp/rescaler_neon.$(NEON) \
src/dsp/rescaler_sse2.c \
src/dsp/upsampling.c \
src/dsp/upsampling_mips_dsp_r2.c \
src/dsp/upsampling_msa.c \
src/dsp/upsampling_neon.$(NEON) \
src/dsp/upsampling_sse2.c \
src/dsp/yuv.c \
src/dsp/yuv_mips32.c \
src/dsp/yuv_mips_dsp_r2.c \
src/dsp/yuv_sse2.c \
dsp_enc_srcs := \
src/dsp/cost.c \
src/dsp/cost_mips32.c \
src/dsp/cost_mips_dsp_r2.c \
src/dsp/cost_sse2.c \
src/dsp/enc.c \
src/dsp/enc_avx2.c \
src/dsp/enc_mips32.c \
src/dsp/enc_mips_dsp_r2.c \
src/dsp/enc_msa.c \
src/dsp/enc_neon.$(NEON) \
src/dsp/enc_sse2.c \
src/dsp/enc_sse41.c \
src/dsp/lossless_enc.c \
src/dsp/lossless_enc_mips32.c \
src/dsp/lossless_enc_mips_dsp_r2.c \
src/dsp/lossless_enc_msa.c \
src/dsp/lossless_enc_neon.$(NEON) \
src/dsp/lossless_enc_sse2.c \
src/dsp/lossless_enc_sse41.c \
enc_srcs := \
src/enc/alpha_enc.c \
src/enc/analysis_enc.c \
src/enc/backward_references_enc.c \
src/enc/config_enc.c \
src/enc/cost_enc.c \
src/enc/delta_palettization_enc.c \
src/enc/filter_enc.c \
src/enc/frame_enc.c \
src/enc/histogram_enc.c \
src/enc/iterator_enc.c \
src/enc/near_lossless_enc.c \
src/enc/picture_enc.c \
src/enc/picture_csp_enc.c \
src/enc/picture_psnr_enc.c \
src/enc/picture_rescale_enc.c \
src/enc/picture_tools_enc.c \
src/enc/predictor_enc.c \
src/enc/quant_enc.c \
src/enc/syntax_enc.c \
src/enc/token_enc.c \
src/enc/tree_enc.c \
src/enc/vp8l_enc.c \
src/enc/webp_enc.c \
src/enc/alpha.c \
src/enc/analysis.c \
src/enc/backward_references.c \
src/enc/config.c \
src/enc/cost.c \
src/enc/filter.c \
src/enc/frame.c \
src/enc/histogram.c \
src/enc/iterator.c \
src/enc/picture.c \
src/enc/picture_csp.c \
src/enc/picture_psnr.c \
src/enc/picture_rescale.c \
src/enc/picture_tools.c \
src/enc/quant.c \
src/enc/syntax.c \
src/enc/token.c \
src/enc/tree.c \
src/enc/vp8l.c \
src/enc/webpenc.c \
mux_srcs := \
src/mux/anim_encode.c \
src/mux/muxedit.c \
src/mux/muxinternal.c \
src/mux/muxread.c \
utils_dec_srcs := \
src/utils/bit_reader_utils.c \
src/utils/color_cache_utils.c \
src/utils/filters_utils.c \
src/utils/huffman_utils.c \
src/utils/quant_levels_dec_utils.c \
src/utils/random_utils.c \
src/utils/rescaler_utils.c \
src/utils/thread_utils.c \
src/utils/bit_reader.c \
src/utils/color_cache.c \
src/utils/filters.c \
src/utils/huffman.c \
src/utils/quant_levels_dec.c \
src/utils/random.c \
src/utils/rescaler.c \
src/utils/thread.c \
src/utils/utils.c \
utils_enc_srcs := \
src/utils/bit_writer_utils.c \
src/utils/huffman_encode_utils.c \
src/utils/quant_levels_utils.c \
src/utils/bit_writer.c \
src/utils/huffman_encode.c \
src/utils/quant_levels.c \
################################################################################
# libwebpdecoder
@ -165,9 +120,7 @@ LOCAL_C_INCLUDES += $(LOCAL_PATH)/src
# prefer arm over thumb mode for performance gains
LOCAL_ARM_MODE := arm
ifeq ($(USE_CPUFEATURES),yes)
LOCAL_STATIC_LIBRARIES := cpufeatures
endif
LOCAL_STATIC_LIBRARIES := cpufeatures
LOCAL_MODULE := webpdecoder_static
@ -257,10 +210,6 @@ endif
################################################################################
WEBP_SRC_PATH := $(LOCAL_PATH)
include $(WEBP_SRC_PATH)/imageio/Android.mk
include $(WEBP_SRC_PATH)/examples/Android.mk
include $(LOCAL_PATH)/examples/Android.mk
ifeq ($(USE_CPUFEATURES),yes)
$(call import-module,android/cpufeatures)
endif
$(call import-module,android/cpufeatures)

View File

@ -1,189 +0,0 @@
cmake_minimum_required(VERSION 2.8.7)
project(libwebp C)
# Options for coder / decoder executables.
option(WEBP_BUILD_CWEBP "Build the cwebp command line tool." OFF)
option(WEBP_BUILD_DWEBP "Build the dwebp command line tool." OFF)
option(WEBP_BUILD_GIF2WEBP "Build the gif2webp conversion tool." OFF)
option(WEBP_BUILD_IMG2WEBP "Build the img2webp animation tool." OFF)
option(WEBP_EXPERIMENTAL_FEATURES "Build with experimental features." OFF)
option(WEBP_ENABLE_SWAP_16BIT_CSP "Enable byte swap for 16 bit colorspaces." OFF)
set(WEBP_DEP_LIBRARIES)
set(WEBP_DEP_INCLUDE_DIRS)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release" CACHE
"Build type: Release, Debug or RelWithDebInfo" STRING FORCE
)
endif()
include(cmake/config.h.cmake)
################################################################################
# Options.
if(WEBP_ENABLE_SWAP_16BIT_CSP)
add_definitions(-DWEBP_SWAP_16BIT_CSP)
endif()
################################################################################
# Android only.
if(ANDROID)
include_directories(${ANDROID_NDK}/sources/android/cpufeatures)
add_library(cpufeatures STATIC
${ANDROID_NDK}/sources/android/cpufeatures/cpu-features.c
)
target_link_libraries(cpufeatures dl)
set(WEBP_DEP_LIBRARIES ${WEBP_DEP_LIBRARIES} cpufeatures)
set(WEBP_DEP_INCLUDE_DIRS ${WEBP_DEP_INCLUDE_DIRS}
${ANDROID_NDK}/sources/android/cpufeatures
)
endif()
################################################################################
# WebP source files.
# Read the Makefile.am to get the source files.
function(parse_Makefile_am FOLDER VAR)
file(READ ${FOLDER}/Makefile.am MAKEFILE_AM)
string(REGEX MATCHALL "_SOURCES \\+= [^\n]*"
FILES_PER_LINE ${MAKEFILE_AM}
)
set(SRCS ${${VAR}})
foreach(FILES ${FILES_PER_LINE})
string(SUBSTRING ${FILES} 12 -1 FILES)
string(REGEX MATCHALL "[0-9a-z\\._]+"
FILES ${FILES}
)
foreach(FILE ${FILES})
list(APPEND SRCS ${FOLDER}/${FILE})
endforeach()
endforeach()
set(${VAR} ${SRCS} PARENT_SCOPE)
endfunction()
set(WEBP_SRCS)
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/dec "WEBP_SRCS")
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/demux "WEBP_SRCS")
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/dsp "WEBP_SRCS")
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/enc "WEBP_SRCS")
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/utils "WEBP_SRCS")
# Remove the files specific to SIMD we don't use.
foreach(FILE ${WEBP_SIMD_FILES_NOT_TO_INCLUDE})
list(REMOVE_ITEM WEBP_SRCS ${FILE})
endforeach()
# Build the library.
add_definitions(-Wall)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/ ${WEBP_DEP_INCLUDE_DIRS})
add_library(webp ${WEBP_SRCS})
target_link_libraries(webp ${WEBP_DEP_LIBRARIES})
# Change the compile flags for SIMD files we use.
list(LENGTH WEBP_SIMD_FILES_TO_INCLUDE WEBP_SIMD_FILES_TO_INCLUDE_LENGTH)
math(EXPR WEBP_SIMD_FILES_TO_INCLUDE_RANGE
"${WEBP_SIMD_FILES_TO_INCLUDE_LENGTH}-1"
)
foreach(I_FILE RANGE ${WEBP_SIMD_FILES_TO_INCLUDE_RANGE})
list(GET WEBP_SIMD_FILES_TO_INCLUDE ${I_FILE} FILE)
list(GET WEBP_SIMD_FLAGS_TO_INCLUDE ${I_FILE} SIMD_COMPILE_FLAG)
set_source_files_properties(${FILE} PROPERTIES
COMPILE_FLAGS ${SIMD_COMPILE_FLAG}
)
endforeach()
# Build the executables if asked for.
if(WEBP_BUILD_CWEBP OR WEBP_BUILD_DWEBP OR
WEBP_BUILD_GIF2WEBP OR WEBP_BUILD_IMG2WEBP)
# Example utility library.
set(exampleutil_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/examples/stopwatch.h
${CMAKE_CURRENT_SOURCE_DIR}/examples/example_util.c
${CMAKE_CURRENT_SOURCE_DIR}/examples/example_util.h)
add_library(exampleutil ${exampleutil_SRCS})
target_link_libraries(exampleutil webp ${WEBP_DEP_LIBRARIES})
set(imageioutil_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/imageio/imageio_util.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/imageio_util.h)
add_library(imageioutil ${imageioutil_SRCS})
target_link_libraries(imageioutil ${WEBP_DEP_LIBRARIES})
# Image-decoding utility library.
set(imagedec_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/examples/gifdec.c
${CMAKE_CURRENT_SOURCE_DIR}/examples/gifdec.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/image_dec.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/image_dec.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/jpegdec.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/jpegdec.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/metadata.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/metadata.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/pngdec.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/pngdec.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/tiffdec.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/tiffdec.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/webpdec.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/webpdec.h
${CMAKE_CURRENT_SOURCE_DIR}/imageio/wicdec.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/wicdec.h)
add_library(imagedec ${imagedec_SRCS})
target_link_libraries(imagedec webp ${WEBP_DEP_LIBRARIES}
${WEBP_DEP_IMG_LIBRARIES})
# Image-encoding utility library.
set(imageenc_SRCS
${CMAKE_CURRENT_SOURCE_DIR}/imageio/image_enc.c
${CMAKE_CURRENT_SOURCE_DIR}/imageio/image_enc.h)
add_library(imageenc ${imageenc_SRCS})
target_link_libraries(imageenc webp imageioutil
${WEBP_DEP_LIBRARIES} ${WEBP_DEP_IMG_LIBRARIES})
endif()
if(WEBP_BUILD_DWEBP)
# dwebp
include_directories(${WEBP_DEP_IMG_INCLUDE_DIRS})
add_executable(dwebp
${CMAKE_CURRENT_SOURCE_DIR}/examples/dwebp.c
${CMAKE_CURRENT_SOURCE_DIR}/examples/stopwatch.h)
target_link_libraries(dwebp imagedec imageenc webp
exampleutil imageioutil
${WEBP_DEP_LIBRARIES} ${WEBP_DEP_IMG_LIBRARIES}
)
endif()
if(WEBP_BUILD_CWEBP)
# cwebp
include_directories(${WEBP_DEP_IMG_INCLUDE_DIRS})
add_executable(cwebp
${CMAKE_CURRENT_SOURCE_DIR}/examples/cwebp.c
${CMAKE_CURRENT_SOURCE_DIR}/examples/stopwatch.h)
target_link_libraries(cwebp imagedec webp exampleutil imageioutil
${WEBP_DEP_LIBRARIES} ${WEBP_DEP_IMG_LIBRARIES}
)
endif()
if(WEBP_BUILD_GIF2WEBP)
# gif2webp
include_directories(${WEBP_DEP_IMG_INCLUDE_DIRS})
set(GIF2WEBP_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/examples/gif2webp.c)
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/mux "GIF2WEBP_SRCS")
add_executable(gif2webp ${GIF2WEBP_SRCS})
target_link_libraries(gif2webp imagedec webp exampleutil imageioutil
${WEBP_DEP_LIBRARIES} ${WEBP_DEP_IMG_LIBRARIES}
)
endif()
if(WEBP_BUILD_IMG2WEBP)
# img2webp
include_directories(${WEBP_DEP_IMG_INCLUDE_DIRS})
set(IMG2WEBP_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/examples/img2webp.c)
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/src/mux "IMG2WEBP_SRCS")
add_executable(img2webp ${IMG2WEBP_SRCS})
target_link_libraries(img2webp imagedec webp exampleutil imageioutil
${WEBP_DEP_LIBRARIES} ${WEBP_DEP_IMG_LIBRARIES}
)
endif()

5727
ChangeLog

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,3 @@
ACLOCAL_AMFLAGS = -I m4
SUBDIRS = src imageio man
SUBDIRS = src examples man
EXTRA_DIST = COPYING autogen.sh
if WANT_EXTRAS
SUBDIRS += extras
endif
SUBDIRS += examples

View File

@ -11,8 +11,6 @@ LIBWEBPDEMUX_BASENAME = libwebpdemux
ARCH = x86
!ELSE IF ! [ cl 2>&1 | find "x64" > NUL ]
ARCH = x64
!ELSE IF ! [ cl 2>&1 | find "ARM" > NUL ]
ARCH = ARM
!ELSE
!ERROR Unable to auto-detect toolchain architecture! \
If cl.exe is in your PATH rerun nmake with ARCH=<arch>.
@ -31,20 +29,13 @@ CCNODBG = cl.exe $(NOLOGO) /O2 /DNDEBUG
CCDEBUG = cl.exe $(NOLOGO) /Od /Gm /Zi /D_DEBUG /RTC1
CFLAGS = /Isrc $(NOLOGO) /W3 /EHsc /c
CFLAGS = $(CFLAGS) /DWIN32 /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN
CFLAGS = $(CFLAGS) /DHAVE_WINCODEC_H /DWEBP_USE_THREAD
LDFLAGS = /LARGEADDRESSAWARE /MANIFEST /NXCOMPAT /DYNAMICBASE
LDFLAGS = $(LDFLAGS) $(PLATFORM_LDFLAGS)
LNKDLL = link.exe /DLL $(NOLOGO)
LNKEXE = link.exe $(NOLOGO)
LNKLIB = lib.exe $(NOLOGO)
MT = mt.exe $(NOLOGO)
RCNODBG = rc.exe $(NOLOGO) /l"0x0409" # 0x409 = U.S. English
RCDEBUG = $(RCNODBG) /D_DEBUG
!IF "$(ARCH)" == "ARM"
CFLAGS = $(CFLAGS) /DWINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP /DWEBP_USE_THREAD
!ELSE
CFLAGS = $(CFLAGS) /DHAVE_WINCODEC_H /DWEBP_USE_THREAD
!ENDIF
CFGSET = FALSE
!IF "$(OBJDIR)" == ""
@ -84,8 +75,6 @@ OUTPUT_DIRS = $(DIRBIN) $(DIRINC) $(DIRLIB) \
$(DIROBJ)\dsp \
$(DIROBJ)\enc \
$(DIROBJ)\examples \
$(DIROBJ)\extras \
$(DIROBJ)\imageio \
$(DIROBJ)\mux \
$(DIROBJ)\utils \
@ -103,11 +92,9 @@ LIBWEBPMUX_BASENAME = $(LIBWEBPMUX_BASENAME)_debug
LIBWEBPDEMUX_BASENAME = $(LIBWEBPDEMUX_BASENAME)_debug
!ELSE IF "$(CFG)" == "release-dynamic"
CC = $(CCNODBG)
RC = $(RCNODBG)
DLLBUILD = TRUE
!ELSE IF "$(CFG)" == "debug-dynamic"
CC = $(CCDEBUG)
RC = $(RCDEBUG)
RTLIB = $(RTLIBD)
DLLBUILD = TRUE
LIBWEBPDECODER_BASENAME = $(LIBWEBPDECODER_BASENAME)_debug
@ -124,7 +111,9 @@ LIBWEBP = $(DIRLIB)\$(LIBWEBP_BASENAME).lib
LIBWEBPMUX = $(DIRLIB)\$(LIBWEBPMUX_BASENAME).lib
LIBWEBPDEMUX = $(DIRLIB)\$(LIBWEBPDEMUX_BASENAME).lib
!ELSE IF "$(DLLBUILD)" == "TRUE"
DLLC = webp_dll.c
DLLINC = webp_dll.h
DLL_OBJS = $(DIROBJ)\$(DLLC:.c=.obj)
CC = $(CC) /I$(DIROBJ) /FI$(DLLINC) $(RTLIB) /DWEBP_DLL
LIBWEBPDECODER = $(DIRLIB)\$(LIBWEBPDECODER_BASENAME)_dll.lib
LIBWEBP = $(DIRLIB)\$(LIBWEBP_BASENAME)_dll.lib
@ -153,8 +142,6 @@ CFGSET = TRUE
!MESSAGE . features enabled.
!MESSAGE - (empty) - build libwebp-based targets for CFG
!MESSAGE - all - build (de)mux-based targets for CFG
!MESSAGE - gif2webp - requires libgif & >= VS2013
!MESSAGE - anim_diff - requires libgif & >= VS2013
!MESSAGE
!MESSAGE RTLIBCFG controls the runtime library linkage - 'static' or 'dynamic'.
!MESSAGE 'legacy' will produce a Windows 2000 compatible library.
@ -175,160 +162,100 @@ CFGSET = TRUE
#
DEC_OBJS = \
$(DIROBJ)\dec\alpha_dec.obj \
$(DIROBJ)\dec\buffer_dec.obj \
$(DIROBJ)\dec\frame_dec.obj \
$(DIROBJ)\dec\idec_dec.obj \
$(DIROBJ)\dec\io_dec.obj \
$(DIROBJ)\dec\quant_dec.obj \
$(DIROBJ)\dec\tree_dec.obj \
$(DIROBJ)\dec\vp8_dec.obj \
$(DIROBJ)\dec\vp8l_dec.obj \
$(DIROBJ)\dec\webp_dec.obj \
$(DIROBJ)\dec\alpha.obj \
$(DIROBJ)\dec\buffer.obj \
$(DIROBJ)\dec\frame.obj \
$(DIROBJ)\dec\idec.obj \
$(DIROBJ)\dec\io.obj \
$(DIROBJ)\dec\quant.obj \
$(DIROBJ)\dec\tree.obj \
$(DIROBJ)\dec\vp8.obj \
$(DIROBJ)\dec\vp8l.obj \
$(DIROBJ)\dec\webp.obj \
DEMUX_OBJS = \
$(DIROBJ)\demux\anim_decode.obj \
$(DIROBJ)\demux\demux.obj \
DSP_DEC_OBJS = \
$(DIROBJ)\dsp\alpha_processing.obj \
$(DIROBJ)\dsp\alpha_processing_mips_dsp_r2.obj \
$(DIROBJ)\dsp\alpha_processing_neon.obj \
$(DIROBJ)\dsp\alpha_processing_sse2.obj \
$(DIROBJ)\dsp\alpha_processing_sse41.obj \
$(DIROBJ)\dsp\cpu.obj \
$(DIROBJ)\dsp\dec.obj \
$(DIROBJ)\dsp\dec_clip_tables.obj \
$(DIROBJ)\dsp\dec_mips32.obj \
$(DIROBJ)\dsp\dec_mips_dsp_r2.obj \
$(DIROBJ)\dsp\dec_msa.obj \
$(DIROBJ)\dsp\dec_neon.obj \
$(DIROBJ)\dsp\dec_sse2.obj \
$(DIROBJ)\dsp\dec_sse41.obj \
$(DIROBJ)\dsp\filters.obj \
$(DIROBJ)\dsp\filters_mips_dsp_r2.obj \
$(DIROBJ)\dsp\filters_msa.obj \
$(DIROBJ)\dsp\filters_neon.obj \
$(DIROBJ)\dsp\filters_sse2.obj \
$(DIROBJ)\dsp\lossless.obj \
$(DIROBJ)\dsp\lossless_mips_dsp_r2.obj \
$(DIROBJ)\dsp\lossless_msa.obj \
$(DIROBJ)\dsp\lossless_mips32.obj \
$(DIROBJ)\dsp\lossless_neon.obj \
$(DIROBJ)\dsp\lossless_sse2.obj \
$(DIROBJ)\dsp\rescaler.obj \
$(DIROBJ)\dsp\rescaler_mips32.obj \
$(DIROBJ)\dsp\rescaler_mips_dsp_r2.obj \
$(DIROBJ)\dsp\rescaler_msa.obj \
$(DIROBJ)\dsp\rescaler_neon.obj \
$(DIROBJ)\dsp\rescaler_sse2.obj \
$(DIROBJ)\dsp\upsampling.obj \
$(DIROBJ)\dsp\upsampling_mips_dsp_r2.obj \
$(DIROBJ)\dsp\upsampling_msa.obj \
$(DIROBJ)\dsp\upsampling_neon.obj \
$(DIROBJ)\dsp\upsampling_sse2.obj \
$(DIROBJ)\dsp\yuv.obj \
$(DIROBJ)\dsp\yuv_mips32.obj \
$(DIROBJ)\dsp\yuv_mips_dsp_r2.obj \
$(DIROBJ)\dsp\yuv_sse2.obj \
DSP_ENC_OBJS = \
$(DIROBJ)\dsp\argb.obj \
$(DIROBJ)\dsp\argb_mips_dsp_r2.obj \
$(DIROBJ)\dsp\argb_sse2.obj \
$(DIROBJ)\dsp\cost.obj \
$(DIROBJ)\dsp\cost_mips32.obj \
$(DIROBJ)\dsp\cost_mips_dsp_r2.obj \
$(DIROBJ)\dsp\cost_sse2.obj \
$(DIROBJ)\dsp\enc.obj \
$(DIROBJ)\dsp\enc_avx2.obj \
$(DIROBJ)\dsp\enc_mips32.obj \
$(DIROBJ)\dsp\enc_mips_dsp_r2.obj \
$(DIROBJ)\dsp\enc_msa.obj \
$(DIROBJ)\dsp\enc_neon.obj \
$(DIROBJ)\dsp\enc_sse2.obj \
$(DIROBJ)\dsp\enc_sse41.obj \
$(DIROBJ)\dsp\lossless_enc.obj \
$(DIROBJ)\dsp\lossless_enc_mips32.obj \
$(DIROBJ)\dsp\lossless_enc_mips_dsp_r2.obj \
$(DIROBJ)\dsp\lossless_enc_msa.obj \
$(DIROBJ)\dsp\lossless_enc_neon.obj \
$(DIROBJ)\dsp\lossless_enc_sse2.obj \
$(DIROBJ)\dsp\lossless_enc_sse41.obj \
EX_ANIM_UTIL_OBJS = \
$(DIROBJ)\examples\anim_util.obj \
IMAGEIO_DEC_OBJS = \
$(DIROBJ)\imageio\image_dec.obj \
$(DIROBJ)\imageio\jpegdec.obj \
$(DIROBJ)\imageio\metadata.obj \
$(DIROBJ)\imageio\pngdec.obj \
$(DIROBJ)\imageio\tiffdec.obj \
$(DIROBJ)\imageio\webpdec.obj \
$(DIROBJ)\imageio\wicdec.obj \
IMAGEIO_ENC_OBJS = \
$(DIROBJ)\imageio\image_enc.obj \
EX_GIF_DEC_OBJS = \
$(DIROBJ)\examples\gifdec.obj \
EX_FORMAT_DEC_OBJS = \
$(DIROBJ)\examples\jpegdec.obj \
$(DIROBJ)\examples\metadata.obj \
$(DIROBJ)\examples\pngdec.obj \
$(DIROBJ)\examples\tiffdec.obj \
$(DIROBJ)\examples\webpdec.obj \
$(DIROBJ)\examples\wicdec.obj \
EX_UTIL_OBJS = \
$(DIROBJ)\examples\example_util.obj \
ENC_OBJS = \
$(DIROBJ)\enc\alpha_enc.obj \
$(DIROBJ)\enc\analysis_enc.obj \
$(DIROBJ)\enc\backward_references_enc.obj \
$(DIROBJ)\enc\config_enc.obj \
$(DIROBJ)\enc\cost_enc.obj \
$(DIROBJ)\enc\delta_palettization_enc.obj \
$(DIROBJ)\enc\filter_enc.obj \
$(DIROBJ)\enc\frame_enc.obj \
$(DIROBJ)\enc\histogram_enc.obj \
$(DIROBJ)\enc\iterator_enc.obj \
$(DIROBJ)\enc\near_lossless_enc.obj \
$(DIROBJ)\enc\picture_enc.obj \
$(DIROBJ)\enc\picture_csp_enc.obj \
$(DIROBJ)\enc\picture_psnr_enc.obj \
$(DIROBJ)\enc\picture_rescale_enc.obj \
$(DIROBJ)\enc\picture_tools_enc.obj \
$(DIROBJ)\enc\predictor_enc.obj \
$(DIROBJ)\enc\quant_enc.obj \
$(DIROBJ)\enc\syntax_enc.obj \
$(DIROBJ)\enc\token_enc.obj \
$(DIROBJ)\enc\tree_enc.obj \
$(DIROBJ)\enc\vp8l_enc.obj \
$(DIROBJ)\enc\webp_enc.obj \
EXTRAS_OBJS = \
$(DIROBJ)\extras\extras.obj \
$(DIROBJ)\extras\quality_estimate.obj \
IMAGEIO_UTIL_OBJS = \
$(DIROBJ)\imageio\imageio_util.obj \
$(DIROBJ)\enc\alpha.obj \
$(DIROBJ)\enc\analysis.obj \
$(DIROBJ)\enc\backward_references.obj \
$(DIROBJ)\enc\config.obj \
$(DIROBJ)\enc\cost.obj \
$(DIROBJ)\enc\filter.obj \
$(DIROBJ)\enc\frame.obj \
$(DIROBJ)\enc\histogram.obj \
$(DIROBJ)\enc\iterator.obj \
$(DIROBJ)\enc\picture.obj \
$(DIROBJ)\enc\picture_csp.obj \
$(DIROBJ)\enc\picture_psnr.obj \
$(DIROBJ)\enc\picture_rescale.obj \
$(DIROBJ)\enc\picture_tools.obj \
$(DIROBJ)\enc\quant.obj \
$(DIROBJ)\enc\syntax.obj \
$(DIROBJ)\enc\token.obj \
$(DIROBJ)\enc\tree.obj \
$(DIROBJ)\enc\vp8l.obj \
$(DIROBJ)\enc\webpenc.obj \
MUX_OBJS = \
$(DIROBJ)\mux\anim_encode.obj \
$(DIROBJ)\mux\muxedit.obj \
$(DIROBJ)\mux\muxinternal.obj \
$(DIROBJ)\mux\muxread.obj \
UTILS_DEC_OBJS = \
$(DIROBJ)\utils\bit_reader_utils.obj \
$(DIROBJ)\utils\color_cache_utils.obj \
$(DIROBJ)\utils\filters_utils.obj \
$(DIROBJ)\utils\huffman_utils.obj \
$(DIROBJ)\utils\quant_levels_dec_utils.obj \
$(DIROBJ)\utils\rescaler_utils.obj \
$(DIROBJ)\utils\random_utils.obj \
$(DIROBJ)\utils\thread_utils.obj \
$(DIROBJ)\utils\bit_reader.obj \
$(DIROBJ)\utils\color_cache.obj \
$(DIROBJ)\utils\filters.obj \
$(DIROBJ)\utils\huffman.obj \
$(DIROBJ)\utils\quant_levels_dec.obj \
$(DIROBJ)\utils\rescaler.obj \
$(DIROBJ)\utils\random.obj \
$(DIROBJ)\utils\thread.obj \
$(DIROBJ)\utils\utils.obj \
UTILS_ENC_OBJS = \
$(DIROBJ)\utils\bit_writer_utils.obj \
$(DIROBJ)\utils\huffman_encode_utils.obj \
$(DIROBJ)\utils\quant_levels_utils.obj \
$(DIROBJ)\utils\bit_writer.obj \
$(DIROBJ)\utils\huffman_encode.obj \
$(DIROBJ)\utils\quant_levels.obj \
LIBWEBPDECODER_OBJS = $(DEC_OBJS) $(DSP_DEC_OBJS) $(UTILS_DEC_OBJS)
LIBWEBP_OBJS = $(LIBWEBPDECODER_OBJS) $(ENC_OBJS) $(DSP_ENC_OBJS) \
@ -337,51 +264,19 @@ LIBWEBPMUX_OBJS = $(MUX_OBJS) $(LIBWEBPMUX_OBJS)
LIBWEBPDEMUX_OBJS = $(DEMUX_OBJS) $(LIBWEBPDEMUX_OBJS)
OUT_LIBS = $(LIBWEBPDECODER) $(LIBWEBP)
!IF "$(ARCH)" == "ARM"
ex: $(OUT_LIBS)
all: ex
!ELSE
OUT_EXAMPLES = $(DIRBIN)\cwebp.exe $(DIRBIN)\dwebp.exe
EXTRA_EXAMPLES = $(DIRBIN)\vwebp.exe $(DIRBIN)\webpmux.exe \
$(DIRBIN)\img2webp.exe $(DIRBIN)\get_disto.exe \
$(DIRBIN)\webp_quality.exe
EXTRA_EXAMPLES = $(DIRBIN)\vwebp.exe $(DIRBIN)\webpmux.exe
ex: $(OUT_LIBS) $(OUT_EXAMPLES)
all: ex $(EXTRA_EXAMPLES)
# NB: gif2webp.exe and anim_diff.exe are excluded from 'all' as libgif requires
# C99 support which is only available from VS2013 onward.
gif2webp: $(DIRBIN)\gif2webp.exe
anim_diff: $(DIRBIN)\anim_diff.exe
$(DIRBIN)\anim_diff.exe: $(DIROBJ)\examples\anim_diff.obj $(EX_ANIM_UTIL_OBJS)
$(DIRBIN)\anim_diff.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS)
$(DIRBIN)\anim_diff.exe: $(EX_GIF_DEC_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP)
$(DIRBIN)\cwebp.exe: $(DIROBJ)\examples\cwebp.obj $(IMAGEIO_DEC_OBJS)
$(DIRBIN)\cwebp.exe: $(IMAGEIO_UTIL_OBJS)
$(DIRBIN)\dwebp.exe: $(DIROBJ)\examples\dwebp.obj $(IMAGEIO_DEC_OBJS)
$(DIRBIN)\dwebp.exe: $(IMAGEIO_ENC_OBJS)
$(DIRBIN)\dwebp.exe: $(IMAGEIO_UTIL_OBJS)
$(DIRBIN)\gif2webp.exe: $(DIROBJ)\examples\gif2webp.obj $(EX_GIF_DEC_OBJS)
$(DIRBIN)\gif2webp.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBPMUX)
$(DIRBIN)\gif2webp.exe: $(LIBWEBP)
$(DIRBIN)\vwebp.exe: $(DIROBJ)\examples\vwebp.obj $(EX_UTIL_OBJS)
$(DIRBIN)\vwebp.exe: $(IMAGEIO_UTIL_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP)
$(DIRBIN)\cwebp.exe: $(DIROBJ)\examples\cwebp.obj $(EX_FORMAT_DEC_OBJS)
$(DIRBIN)\dwebp.exe: $(DIROBJ)\examples\dwebp.obj
$(DIRBIN)\vwebp.exe: $(DIROBJ)\examples\vwebp.obj
$(DIRBIN)\vwebp.exe: $(EX_UTIL_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP)
$(DIRBIN)\webpmux.exe: $(DIROBJ)\examples\webpmux.obj $(LIBWEBPMUX)
$(DIRBIN)\webpmux.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBP)
$(DIRBIN)\img2webp.exe: $(DIROBJ)\examples\img2webp.obj $(LIBWEBPMUX)
$(DIRBIN)\img2webp.exe: $(IMAGEIO_DEC_OBJS)
$(DIRBIN)\img2webp.exe: $(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBP)
$(DIRBIN)\get_disto.exe: $(DIROBJ)\extras\get_disto.obj
$(DIRBIN)\get_disto.exe: $(IMAGEIO_DEC_OBJS) $(IMAGEIO_UTIL_OBJS) $(LIBWEBP)
$(DIRBIN)\webp_quality.exe: $(DIROBJ)\extras\webp_quality.obj
$(DIRBIN)\webp_quality.exe: $(IMAGEIO_UTIL_OBJS)
$(DIRBIN)\webp_quality.exe: $(EXTRAS_OBJS) $(LIBWEBP)
$(DIRBIN)\webpmux.exe: $(EX_UTIL_OBJS) $(LIBWEBP)
$(OUT_EXAMPLES): $(EX_UTIL_OBJS) $(LIBWEBP)
$(EX_UTIL_OBJS) $(IMAGEIO_UTIL_OBJS): $(OUTPUT_DIRS)
$(IMAGEIO_DEC_OBJS) $(IMAGEIO_ENC_OBJS) $(EXTRAS_OBJS): $(OUTPUT_DIRS)
!ENDIF # ARCH == ARM
$(EX_UTIL_OBJS) $(EX_FORMAT_DEC_OBJS): $(OUTPUT_DIRS)
experimental:
$(MAKE) /f Makefile.vc \
@ -397,29 +292,20 @@ $(LIBWEBP_OBJS) $(LIBWEBPMUX_OBJS) $(LIBWEBPDEMUX_OBJS): $(OUTPUT_DIRS)
!IF "$(DLLBUILD)" == "TRUE"
$(LIBWEBP_OBJS) $(LIBWEBPMUX_OBJS) $(LIBWEBPDEMUX_OBJS): \
$(DIROBJ)\$(DLLINC)
$(DIROBJ)\$(DLLINC) $(DIROBJ)\$(DLLC)
{$(DIROBJ)}.c{$(DIROBJ)}.obj:
$(CC) $(CFLAGS) /Fd$(LIBWEBP_PDBNAME) /Fo$@ $<
{src}.rc{$(DIROBJ)}.res:
$(RC) /fo$@ $<
{src\demux}.rc{$(DIROBJ)\demux}.res:
$(RC) /fo$@ $<
{src\mux}.rc{$(DIROBJ)\mux}.res:
$(RC) /fo$@ $<
$(LIBWEBP): $(DIROBJ)\$(LIBWEBP_BASENAME:_debug=).res
$(LIBWEBPDECODER): $(DIROBJ)\$(LIBWEBPDECODER_BASENAME:_debug=).res
$(LIBWEBPMUX): $(LIBWEBP) $(DIROBJ)\mux\$(LIBWEBPMUX_BASENAME:_debug=).res
$(LIBWEBPDEMUX): $(LIBWEBP) $(DIROBJ)\demux\$(LIBWEBPDEMUX_BASENAME:_debug=).res
$(LIBWEBPMUX): $(LIBWEBP)
$(LIBWEBPDEMUX): $(LIBWEBP)
$(LIBWEBPDECODER) $(LIBWEBP) $(LIBWEBPMUX) $(LIBWEBPDEMUX):
$(LNKDLL) /out:$(DIRBIN)\$(@B:_dll=.dll) /implib:$@ $(LFLAGS) $**
-xcopy $(DIROBJ)\*.pdb $(DIRLIB) /y
clean::
@-erase /s $(DIROBJ)\$(DLLINC) 2> NUL
@-erase /s $(DIROBJ)\$(DLLC) $(DIROBJ)\$(DLLINC) 2> NUL
!ELSE
$(LIBWEBPDECODER) $(LIBWEBP) $(LIBWEBPMUX) $(LIBWEBPDEMUX):
$(LNKLIB) /out:$@ $**
@ -436,31 +322,25 @@ $(DIROBJ)\$(DLLINC):
@echo #define WEBP_EXTERN(type) __declspec(dllexport) type >> $@
@echo #endif /* WEBP_DLL_H_ */ >> $@
# expose a WebPFree() function for use in managed code
$(DIROBJ)\$(DLLC): $(DIROBJ)\$(DLLINC)
@echo #include ^<stdlib.h^> > $@
@echo #include "webp_dll.h" >> $@
@echo // This function should be used in place of free() for memory >> $@
@echo // returned by the WebP API. >> $@
@echo WEBP_EXTERN(void) WebPFree(void* ptr) { >> $@
@echo free(ptr); >> $@
@echo } >> $@
.SUFFIXES: .c .obj .res .exe
# File-specific flag builds. Note batch rules take precedence over wildcards,
# so for now name each file individually.
$(DIROBJ)\dsp\enc_avx2.obj: src\dsp\enc_avx2.c
$(CC) $(CFLAGS) $(AVX2_FLAGS) /Fd$(LIBWEBP_PDBNAME) /Fo$(DIROBJ)\dsp\ \
src\dsp\$(@B).c
$(DIROBJ)\examples\anim_diff.obj: examples\anim_diff.c
$(CC) $(CFLAGS) /DWEBP_HAVE_GIF /Fd$(LIBWEBP_PDBNAME) \
/Fo$(DIROBJ)\examples\ examples\$(@B).c
$(DIROBJ)\examples\anim_util.obj: examples\anim_util.c
$(CC) $(CFLAGS) /DWEBP_HAVE_GIF /Fd$(LIBWEBP_PDBNAME) \
/Fo$(DIROBJ)\examples\ examples\$(@B).c
$(DIROBJ)\examples\gif2webp.obj: examples\gif2webp.c
$(CC) $(CFLAGS) /DWEBP_HAVE_GIF /Fd$(LIBWEBP_PDBNAME) \
/Fo$(DIROBJ)\examples\ examples\$(@B).c
$(DIROBJ)\examples\gifdec.obj: examples\gifdec.c
$(CC) $(CFLAGS) /DWEBP_HAVE_GIF /Fd$(LIBWEBP_PDBNAME) \
/Fo$(DIROBJ)\examples\ examples\$(@B).c
# Batch rules
{examples}.c{$(DIROBJ)\examples}.obj::
$(CC) $(CFLAGS) /Fd$(DIROBJ)\examples\ /Fo$(DIROBJ)\examples\ $<
{extras}.c{$(DIROBJ)\extras}.obj::
$(CC) $(CFLAGS) /Fd$(DIROBJ)\extras\ /Fo$(DIROBJ)\extras\ $<
{imageio}.c{$(DIROBJ)\imageio}.obj::
$(CC) $(CFLAGS) /Fd$(DIROBJ)\imageio\ /Fo$(DIROBJ)\imageio\ $<
{src\dec}.c{$(DIROBJ)\dec}.obj::
$(CC) $(CFLAGS) /Fd$(LIBWEBP_PDBNAME) /Fo$(DIROBJ)\dec\ $<
{src\demux}.c{$(DIROBJ)\demux}.obj::
@ -480,12 +360,6 @@ $(DIROBJ)\examples\gifdec.obj: examples\gifdec.c
$(MT) -manifest $@.manifest -outputresource:$@;1
del $@.manifest
{$(DIROBJ)\extras}.obj{$(DIRBIN)}.exe:
$(LNKEXE) $(LDFLAGS) /OUT:$@ $** \
ole32.lib windowscodecs.lib shlwapi.lib
$(MT) -manifest $@.manifest -outputresource:$@;1
del $@.manifest
clean::
@-erase /s $(DIROBJ)\*.dll 2> NUL
@-erase /s $(DIROBJ)\*.exp 2> NUL

68
NEWS
View File

@ -1,68 +1,3 @@
- 1/26/2017: version 0.6.0
* lossless performance and compression improvements
* miscellaneous performance improvements (SSE2, NEON, MSA)
* webpmux gained a -duration option allowing for frame timing modification
* new img2webp utility allowing a sequence of images to be converted to
animated webp
* API changes:
- libwebp:
WebPPictureSharpARGBToYUVA
WebPPlaneDistortion
- libwebpmux / gif2webp:
WebPAnimEncoderOptions: kmax <= 0 now disables keyframes, kmax == 1
forces all keyframes. See mux.h and the gif2webp
manpage for details.
- 12/13/2016: version 0.5.2
This is a binary compatible release.
This release covers CVE-2016-8888 and CVE-2016-9085.
* further security related hardening in the tools; fixes to
gif2webp/AnimEncoder (issues #310, #314, #316, #322), cwebp/libwebp (issue
#312)
* full libwebp (encoder & decoder) iOS framework; libwebpdecoder
WebP.framework renamed to WebPDecoder.framework (issue #307)
* CMake support for Android Studio (2.2)
* miscellaneous build related fixes (issue #306, #313)
* miscellaneous documentation improvements (issue #225)
* minor lossy encoder fixes and improvements
- 6/14/2016: version 0.5.1
This is a binary compatible release.
* miscellaneous bug fixes (issues #280, #289)
* reverted alpha plane encoding with color cache for compatibility with
libwebp 0.4.0->0.4.3 (issues #291, #298)
* lossless encoding performance improvements
* memory reduction in both lossless encoding and decoding
* force mux output to be in the extended format (VP8X) when undefined chunks
are present (issue #294)
* gradle, cmake build support
* workaround for compiler bug causing 64-bit decode failures on android
devices using clang-3.8 in the r11c NDK
* various WebPAnimEncoder improvements
- 12/17/2015: version 0.5.0
* miscellaneous bug & build fixes (issues #234, #258, #274, #275, #278)
* encoder & decoder speed-ups on x86/ARM/MIPS for lossy & lossless
- note! YUV->RGB conversion was sped-up, but the results will be slightly
different from previous releases
* various lossless encoder improvements
* gif2webp improvements, -min_size option added
* tools fully support input from stdin and output to stdout (issue #168)
* New WebPAnimEncoder API for creating animations
* New WebPAnimDecoder API for decoding animations
* other API changes:
- libwebp:
WebPPictureSmartARGBToYUVA() (-pre 4 in cwebp)
WebPConfig::exact (-exact in cwebp; -alpha_cleanup is now the default)
WebPConfig::near_lossless (-near_lossless in cwebp)
WebPFree() (free'ing webp allocated memory in other languages)
WebPConfigLosslessPreset()
WebPMemoryWriterClear()
- libwebpdemux: removed experimental fragment related fields and functions
- libwebpmux: WebPMuxSetCanvasSize()
* new libwebpextras library with some uncommon import functions:
WebPImportGray/WebPImportRGB565/WebPImportRGB4444
- 10/15/15: version 0.4.4
This is a binary compatible release.
* rescaling out-of-bounds read fix (issue #254)
@ -161,8 +96,7 @@
- 9/19/11: version 0.1.3
* Advanced decoding APIs.
* On-the-fly cropping and rescaling of images.
* SSE2 instructions for decoding performance optimizations on x86 based
platforms.
* SSE2 instructions for decoding performance optimizations on x86 based platforms.
* Support Multi-threaded decoding.
* 40% improvement in Decoding performance.
* Add support for RGB565, RGBA4444 & ARGB image colorspace.

189
README
View File

@ -4,7 +4,7 @@
\__\__/\____/\_____/__/ ____ ___
/ _/ / \ \ / _ \/ _/
/ \_/ / / \ \ __/ \__
\____/____/\_____/_____/____/v0.6.0
\____/____/\_____/_____/____/v0.4.4
Description:
============
@ -15,12 +15,11 @@ as well as the command line tools 'cwebp' and 'dwebp'.
See http://developers.google.com/speed/webp
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
Latest sources are available from http://www.webmproject.org/code/
It is released under the same license as the WebM project.
See http://www.webmproject.org/license/software/ or the
"COPYING" file for details. An additional intellectual
file "COPYING" file for details. An additional intellectual
property rights grant can be found in the file PATENTS.
Building:
@ -54,12 +53,6 @@ Please refer to makefile.unix for additional details and customizations.
Using autoconf tools:
---------------------
Prerequisites:
A compiler (e.g., gcc), make, autoconf, automake, libtool.
On a Debian-like system the following should install everything you need for a
minimal build:
$ sudo apt-get install gcc make autoconf automake libtool
When building from git sources, you will need to run autogen.sh to generate the
configure script.
@ -84,73 +77,6 @@ be installed independently using a minor modification in the corresponding
Makefile.am configure files (see comments there). See './configure --help' for
more options.
Building for MIPS Linux:
------------------------
MIPS Linux toolchain stable available releases can be found at:
https://community.imgtec.com/developers/mips/tools/codescape-mips-sdk/available-releases/
# Add toolchain to PATH
export PATH=$PATH:/path/to/toolchain/bin
# 32-bit build for mips32r5 (p5600)
HOST=mips-mti-linux-gnu
MIPS_CFLAGS="-O3 -mips32r5 -mabi=32 -mtune=p5600 -mmsa -mfp64 \
-msched-weight -mload-store-pairs -fPIE"
MIPS_LDFLAGS="-mips32r5 -mabi=32 -mmsa -mfp64 -pie"
# 64-bit build for mips64r6 (i6400)
HOST=mips-img-linux-gnu
MIPS_CFLAGS="-O3 -mips64r6 -mabi=64 -mtune=i6400 -mmsa -mfp64 \
-msched-weight -mload-store-pairs -fPIE"
MIPS_LDFLAGS="-mips64r6 -mabi=64 -mmsa -mfp64 -pie"
./configure --host=${HOST} --build=`config.guess` \
CC="${HOST}-gcc -EL" \
CFLAGS="$MIPS_CFLAGS" \
LDFLAGS="$MIPS_LDFLAGS"
make
make install
CMake:
------
The support for CMake is minimal: it only helps you compile libwebp, cwebp and
dwebp.
Prerequisites:
A compiler (e.g., gcc with autotools) and CMake.
On a Debian-like system the following should install everything you need for a
minimal build:
$ sudo apt-get install build-essential cmake
When building from git sources, you will need to run cmake to generate the
configure script.
mkdir build && cd build && cmake ../
make
make install
If you also want cwebp or dwebp, you will need to enable them through CMake:
cmake -DWEBP_BUILD_CWEBP=ON -DWEBP_BUILD_DWEBP=ON ../
or through your favorite interface (like ccmake or cmake-qt-gui).
Gradle:
-------
The support for Gradle is minimal: it only helps you compile libwebp, cwebp and
dwebp and webpmux_example.
Prerequisites:
A compiler (e.g., gcc with autotools) and gradle.
On a Debian-like system the following should install everything you need for a
minimal build:
$ sudo apt-get install build-essential gradle
When building from git sources, you will need to run the Gradle wrapper with the
appropriate target, e.g. :
./gradlew buildAllExecutables
SWIG bindings:
--------------
@ -218,30 +144,26 @@ If input size (-s) for an image is not specified, it is
assumed to be a PNG, JPEG, TIFF or WebP file.
Options:
-h / -help ............. short help
-H / -longhelp ......... long help
-q <float> ............. quality factor (0:small..100:big), default=75
-alpha_q <int> ......... transparency-compression quality (0..100),
default=100
-h / -help ............ short help
-H / -longhelp ........ long help
-q <float> ............. quality factor (0:small..100:big)
-alpha_q <int> ......... transparency-compression quality (0..100)
-preset <string> ....... preset setting, one of:
default, photo, picture,
drawing, icon, text
-preset must come first, as it overwrites other parameters
-z <int> ............... activates lossless preset with given
level in [0:fast, ..., 9:slowest]
-m <int> ............... compression method (0=fast, 6=slowest), default=4
-segments <int> ........ number of segments to use (1..4), default=4
-m <int> ............... compression method (0=fast, 6=slowest)
-segments <int> ........ number of segments to use (1..4)
-size <int> ............ target size (in bytes)
-psnr <float> .......... target PSNR (in dB. typically: 42)
-s <int> <int> ......... input size (width x height) for YUV
-sns <int> ............. spatial noise shaping (0:off, 100:max), default=50
-f <int> ............... filter strength (0=off..100), default=60
-sharpness <int> ....... filter sharpness (0:most .. 7:least sharp), default=0
-sns <int> ............. spatial noise shaping (0:off, 100:max)
-f <int> ............... filter strength (0=off..100)
-sharpness <int> ....... filter sharpness (0:most .. 7:least sharp)
-strong ................ use strong filter instead of simple (default)
-nostrong .............. use simple filter instead of strong
-sharp_yuv ............. use sharper (and slower) RGB->YUV conversion
-partition_limit <int> . limit quality to fit the 512k limit on
the first partition (0=no degradation ... 100=full)
-pass <int> ............ analysis pass number (1..10)
@ -254,18 +176,16 @@ Options:
-print_ssim ............ prints averaged SSIM distortion
-print_lsim ............ prints local-similarity distortion
-d <file.pgm> .......... dump the compressed output (PGM file)
-alpha_method <int> .... transparency-compression method (0..1), default=1
-alpha_method <int> .... transparency-compression method (0..1)
-alpha_filter <string> . predictive filtering for alpha plane,
one of: none, fast (default) or best
-exact ................. preserve RGB values in transparent area, default=off
-alpha_cleanup ......... clean RGB values in transparent area
-blend_alpha <hex> ..... blend colors against background color
expressed as RGB values written in
hexadecimal, e.g. 0xc0e0d0 for red=0xc0
green=0xe0 and blue=0xd0
-noalpha ............... discard any transparency information
-lossless .............. encode image losslessly, default=off
-near_lossless <int> ... use near-lossless image
preprocessing (0..100=off), default=100
-lossless .............. encode image losslessly
-hint <string> ......... specify image characteristics hint,
one of: photo, picture or graph
@ -343,21 +263,18 @@ Use following options to convert into alternate image formats:
-yuv ......... save the raw YUV samples in flat layout
Other options are:
-version ..... print version number and exit
-version .... print version number and exit
-nofancy ..... don't use the fancy YUV420 upscaler
-nofilter .... disable in-loop filtering
-nodither .... disable dithering
-dither <d> .. dithering strength (in 0..100)
-alpha_dither use alpha-plane dithering if needed
-mt .......... use multi-threading
-crop <x> <y> <w> <h> ... crop output with the given rectangle
-resize <w> <h> ......... scale the output (*after* any cropping)
-flip ........ flip the output vertically
-scale <w> <h> .......... scale the output (*after* any cropping)
-alpha ....... only save the alpha plane
-incremental . use incremental decoding (useful for tests)
-h ........... this help message
-v ........... verbose (e.g. print encoding/decoding times)
-quiet ....... quiet mode, don't print anything
-h ....... this help message
-v ....... verbose (e.g. print encoding/decoding times)
-noasm ....... disable all assembly optimizations
Visualization tool:
@ -372,20 +289,18 @@ Usage: vwebp in_file [options]
Decodes the WebP image file and visualize it using OpenGL
Options are:
-version ..... print version number and exit
-version .... print version number and exit
-noicc ....... don't use the icc profile if present
-nofancy ..... don't use the fancy YUV420 upscaler
-nofilter .... disable in-loop filtering
-dither <int> dithering strength (0..100), default=50
-noalphadither disable alpha plane dithering
-mt .......... use multi-threading
-info ........ print info
-h ........... this help message
-h ....... this help message
Keyboard shortcuts:
'c' ................ toggle use of color profile
'i' ................ overlay file information
'd' ................ disable blending & disposal (debug)
'q' / 'Q' / ESC .... quit
Building:
@ -414,37 +329,6 @@ $ make -f makefile.unix examples/vwebp
> nmake /f Makefile.vc CFG=release-static \
../obj/x64/release-static/bin/vwebp.exe
Animation creation tool:
========================
The utility 'img2webp' can turn a sequence of input images (PNG, JPEG, ...)
into an animated WebP file. It offers fine control over duration, encoding
modes, etc.
Usage:
img2webp [file-level options] [image files...] [per-frame options...]
File-level options (only used at the start of compression):
-min_size ............ minimize size
-loop <int> .......... loop count (default: 0, = infinite loop)
-kmax <int> .......... maximum number of frame between key-frames
(0=only keyframes)
-kmin <int> .......... minimum number of frame between key-frames
(0=disable key-frames altogether)
-mixed ............... use mixed lossy/lossless automatic mode
-v ................... verbose mode
-h ................... this help
Per-frame options (only used for subsequent images input):
-d <int> ............. frame duration in ms (default: 100)
-lossless ........... use lossless mode (default)
-lossy ... ........... use lossy mode
-q <float> ........... quality
-m <int> ............. method to use
example: img2webp -loop 2 in0.png -lossy in1.jpg
-d 80 in2.tiff -o out.webp
Animated GIF conversion:
========================
Animated GIF files can be converted to WebP files with animation using the
@ -454,16 +338,12 @@ vwebp.
Usage:
gif2webp [options] gif_file -o webp_file
Options:
-h / -help ............. this help
-h / -help ............ this help
-lossy ................. encode image using lossy compression
-mixed ................. for each frame in the image, pick lossy
or lossless compression heuristically
-q <float> ............. quality factor (0:small..100:big)
-m <int> ............... compression method (0=fast, 6=slowest)
-min_size .............. minimize output size (default:off)
lossless compression by default; can be
combined with -q, -m, -lossy or -mixed
options
-kmin <int> ............ min distance between key frames
-kmax <int> ............ max distance between key frames
-f <int> ............... filter strength (0=off..100)
@ -486,29 +366,6 @@ or using autoconf:
$ ./configure --enable-everything
$ make
Comparison of animated images:
==============================
Test utility anim_diff under examples/ can be used to compare two animated
images (each can be GIF or WebP).
Usage: anim_diff <image1> <image2> [options]
Options:
-dump_frames <folder> dump decoded frames in PAM format
-min_psnr <float> ... minimum per-frame PSNR
-raw_comparison ..... if this flag is not used, RGB is
premultiplied before comparison
Building:
---------
With the libgif development files and a C++ compiler installed, anim_diff can
be built using makefile.unix:
$ make -f makefile.unix examples/anim_diff
or using autoconf:
$ ./configure --enable-everything
$ make
Encoding API:
=============
@ -738,7 +595,7 @@ an otherwise too-large picture. Some CPU can be saved too, incidentally.
Bugs:
=====
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
Patches welcome! See this page to get started:
http://www.webmproject.org/code/contribute/submitting-patches/

View File

@ -1,7 +1,7 @@
 __ __ ____ ____ ____ __ __ _ __ __
/ \\/ \/ _ \/ _ \/ _ \/ \ \/ \___/_ / _\
\ / __/ _ \ __/ / / (_/ /__
\__\__/\_____/_____/__/ \__//_/\_____/__/___/v0.4.0
\__\__/\_____/_____/__/ \__//_/\_____/__/___/v0.2.2
Description:
@ -25,8 +25,6 @@ A list of options is available using the -help command line flag:
> webpmux -help
Usage: webpmux -get GET_OPTIONS INPUT -o OUTPUT
webpmux -set SET_OPTIONS INPUT -o OUTPUT
webpmux -duration DURATION_OPTIONS [-duration ...]
INPUT -o OUTPUT
webpmux -strip STRIP_OPTIONS INPUT -o OUTPUT
webpmux -frame FRAME_OPTIONS [-frame...] [-loop LOOP_COUNT]
[-bgcolor BACKGROUND_COLOR] -o OUTPUT
@ -50,17 +48,6 @@ SET_OPTIONS:
'file.exif' contains the EXIF metadata to be set
'file.xmp' contains the XMP metadata to be set
DURATION_OPTIONS:
Set duration of selected frames:
duration set duration for each frames
duration,frame set duration of a particular frame
duration,start,end set duration of frames in the
interval [start,end])
where: 'duration' is the duration in milliseconds
'start' is the start frame index
'end' is the inclusive end frame index
The special 'end' value '0' means: last frame.
STRIP_OPTIONS:
Strip color profile/metadata:
icc strip ICC profile
@ -146,7 +133,7 @@ WebP files. This API currently supports reading of XMP/EXIF metadata, ICC
profile and animated images. Other features may be added in subsequent
releases.
Code example: Demuxing WebP data to extract all the frames, ICC profile
Code Example: Demuxing WebP data to extract all the frames, ICC profile
and EXIF/XMP metadata.
WebPDemuxer* demux = WebPDemux(&webp_data);
@ -183,35 +170,11 @@ and EXIF/XMP metadata.
For a detailed Demux API reference, please refer to the header file
(src/webp/demux.h).
AnimEncoder API:
================
The AnimEncoder API can be used to create animated WebP images.
Code example:
WebPAnimEncoderOptions enc_options;
WebPAnimEncoderOptionsInit(&enc_options);
// ... (Tune 'enc_options' as needed).
WebPAnimEncoder* enc = WebPAnimEncoderNew(width, height, &enc_options);
while(<there are more frames>) {
WebPConfig config;
WebPConfigInit(&config);
// ... (Tune 'config' as needed).
WebPAnimEncoderAdd(enc, frame, duration, &config);
}
WebPAnimEncoderAssemble(enc, webp_data);
WebPAnimEncoderDelete(enc);
// ... (Write the 'webp_data' to a file, or re-mux it further).
For a detailed AnimEncoder API reference, please refer to the header file
(src/webp/mux.h).
Bugs:
=====
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
Patches welcome! See this page to get started:
http://www.webmproject.org/code/contribute/submitting-patches/

View File

@ -1,404 +0,0 @@
// Define dependencies.
buildscript {
repositories {
maven {
url "https://jcenter.bintray.com"
}
}
dependencies {
classpath "com.android.tools.build:gradle:${ANDROID_GRADLE_PLUGIN_VERSION}"
}
}
// Define versions in the project.
project.ext {
buildToolsVersion = "${BUILD_TOOLS_VERSION}"
compileSdkVersion = COMPILE_SDK_VERSION.toInteger()
}
// Core libraries and executables.
apply plugin: "c"
def NEON
model {
buildTypes {
debug
release
}
platforms {
arm {
architecture "arm"
}
arm64 {
architecture "arm64"
}
x86 {
architecture "x86"
}
x64 {
architecture "x86_64"
}
mips32r2
mips32r5
mips64r6
}
toolChains {
gcc(Gcc) {
target("mips32r2") {
cCompiler.args "-mips32r2"
}
target("mips32r5") {
cCompiler.args "-mips32r5"
}
target("mips64r6") {
cCompiler.args "-mips64r6"
}
}
}
binaries {
all {
if (toolChain in Gcc) {
cCompiler.args "-fPIC"
cCompiler.args "-Wall"
cCompiler.define "ANDROID"
cCompiler.define "HAVE_MALLOC_H"
}
// Optimizations.
if (buildType == buildTypes.release) {
if (toolChain in Gcc) {
cCompiler.args "-finline-functions"
cCompiler.args "-ffast-math"
cCompiler.args "-ffunction-sections"
cCompiler.args "-fdata-sections"
}
if (toolChain in Clang) {
cCompiler.args "-frename-registers -s"
}
}
// Check for NEON usage.
if (getTargetPlatform() == "arm" || getTargetPlatform() == "arm64") {
NEON = "c.neon"
} else {
NEON = "c"
}
}
// Link to pthread for shared libraries.
withType(SharedLibraryBinarySpec) {
if (toolChain in Gcc) {
cCompiler.define "HAVE_PTHREAD"
cCompiler.define "WEBP_USE_THREAD"
linker.args "-pthread"
}
}
}
components {
webp(NativeLibrarySpec) {
sources {
c {
source {
srcDir "src/dec"
include "alpha_dec.c"
include "buffer_dec.c"
include "frame_dec.c"
include "idec_dec.c"
include "io_dec.c"
include "quant_dec.c"
include "tree_dec.c"
include "vp8_dec.c"
include "vp8l_dec.c"
include "webp_dec.c"
srcDir "src/dsp"
include "alpha_processing.c"
include "alpha_processing_mips_dsp_r2.c"
include "alpha_processing_neon.$NEON"
include "alpha_processing_sse2.c"
include "alpha_processing_sse41.c"
include "argb.c"
include "argb_mips_dsp_r2.c"
include "argb_sse2.c"
include "cpu.c"
include "dec.c"
include "dec_clip_tables.c"
include "dec_mips32.c"
include "dec_mips_dsp_r2.c"
include "dec_msa.c"
include "dec_neon.$NEON"
include "dec_sse2.c"
include "dec_sse41.c"
include "filters.c"
include "filters_mips_dsp_r2.c"
include "filters_msa.c"
include "filters_neon.$NEON"
include "filters_sse2.c"
include "lossless.c"
include "lossless_mips_dsp_r2.c"
include "lossless_msa.c"
include "lossless_neon.$NEON"
include "lossless_sse2.c"
include "rescaler.c"
include "rescaler_mips32.c"
include "rescaler_mips_dsp_r2.c"
include "rescaler_msa.c"
include "rescaler_neon.$NEON"
include "rescaler_sse2.c"
include "upsampling.c"
include "upsampling_mips_dsp_r2.c"
include "upsampling_msa.c"
include "upsampling_neon.$NEON"
include "upsampling_sse2.c"
include "yuv.c"
include "yuv_mips32.c"
include "yuv_mips_dsp_r2.c"
include "yuv_sse2.c"
srcDir "src/utils"
include "bit_reader_utils.c"
include "color_cache_utils.c"
include "filters_utils.c"
include "huffman_utils.c"
include "quant_levels_dec_utils.c"
include "random_utils.c"
include "rescaler_utils.c"
include "thread_utils.c"
include "utils.c"
srcDir "src/dsp"
include "cost.c"
include "cost_mips32.c"
include "cost_mips_dsp_r2.c"
include "cost_sse2.c"
include "enc.c"
include "enc_avx2.c"
include "enc_mips32.c"
include "enc_mips_dsp_r2.c"
include "enc_msa.c"
include "enc_neon.$NEON"
include "enc_sse2.c"
include "enc_sse41.c"
include "lossless_enc.c"
include "lossless_enc_mips32.c"
include "lossless_enc_mips_dsp_r2.c"
include "lossless_enc_msa.c"
include "lossless_enc_neon.$NEON"
include "lossless_enc_sse2.c"
include "lossless_enc_sse41.c"
srcDir "src/enc"
include "alpha_enc.c"
include "analysis_enc.c"
include "backward_references_enc.c"
include "config_enc.c"
include "cost_enc.c"
include "delta_palettization_enc.c"
include "filter_enc.c"
include "frame_enc.c"
include "histogram_enc.c"
include "iterator_enc.c"
include "near_lossless_enc.c"
include "picture_enc.c"
include "picture_csp_enc.c"
include "picture_psnr_enc.c"
include "picture_rescale_enc.c"
include "picture_tools_enc.c"
include "predictor_enc.c"
include "quant_enc.c"
include "syntax_enc.c"
include "token_enc.c"
include "tree_enc.c"
include "vp8l_enc.c"
include "webp_enc.c"
srcDir "src/utils"
include "bit_writer_utils.c"
include "huffman_encode_utils.c"
include "quant_levels_utils.c"
}
exportedHeaders {
srcDir "src"
}
}
}
}
webpdemux(NativeLibrarySpec) {
sources {
c {
source {
srcDir "src/demux"
include "anim_decode.c"
include "demux.c"
}
}
}
}
webpmux(NativeLibrarySpec) {
sources {
c {
source {
srcDir "src/mux/"
include "anim_encode.c"
include "muxedit.c"
include "muxinternal.c"
include "muxread.c"
}
}
}
}
// Executables from examples.
example_util(NativeLibrarySpec) {
binaries {
all {
lib library: "webp", linkage: "static"
}
}
sources {
c {
source {
srcDir "./examples"
include "example_util.c"
}
}
}
}
imageio_util(NativeLibrarySpec) {
binaries {
all {
lib library: "webp", linkage: "static"
}
}
sources {
c {
source {
srcDir "./imageio"
include "imageio_util.c"
}
}
}
}
imagedec(NativeLibrarySpec) {
binaries {
all {
lib library: "webp", linkage: "static"
}
}
sources {
c {
source {
srcDir "./imageio"
include "image_dec.c"
include "jpegdec.c"
include "metadata.c"
include "pngdec.c"
include "tiffdec.c"
include "webpdec.c"
}
}
}
}
imageenc(NativeLibrarySpec) {
binaries {
all {
lib library: "webp", linkage: "static"
lib library: "imageio_util", linkage: "static"
}
}
sources {
c {
source {
srcDir "./imageio"
include "image_enc.c"
}
}
}
}
cwebp(NativeExecutableSpec) {
binaries {
all {
lib library: "example_util", linkage: "static"
lib library: "imagedec", linkage: "static"
lib library: "imageio_util", linkage: "static"
lib library: "webp", linkage: "static"
}
}
sources {
c {
source {
srcDir "./examples"
include "cwebp.c"
}
}
}
}
dwebp(NativeExecutableSpec) {
binaries {
all {
lib library: "example_util", linkage: "static"
lib library: "imagedec", linkage: "static"
lib library: "imageenc", linkage: "static"
lib library: "imageio_util", linkage: "static"
lib library: "webp"
}
}
sources {
c {
source {
srcDir "./examples"
include "dwebp.c"
}
}
}
}
webpmux_example(NativeExecutableSpec) {
binaries {
all {
lib library: "example_util", linkage: "static"
lib library: "imageio_util", linkage: "static"
lib library: "webpmux", linkage: "static"
lib library: "webp"
}
}
sources {
c {
source {
srcDir "./examples"
include "webpmux.c"
}
}
}
}
img2webp_example(NativeExecutableSpec) {
binaries {
all {
lib library: "example_util", linkage: "static"
lib library: "imagedec", linkage: "static"
lib library: "imageio_util", linkage: "static"
lib library: "webpmux", linkage: "static"
lib library: "webp"
}
}
sources {
c {
source {
srcDir "./examples"
include "img2webp.c"
}
}
}
}
}
tasks {
// Task to test all possible configurations.
buildAllExecutables(Task) {
dependsOn $.binaries.findAll { it.buildable }
}
}
}
// Task to generate the wrapper.
task wrapper(type: Wrapper) {
gradleVersion = '2.13'
}

View File

@ -1,141 +0,0 @@
# Generate the config.h to compile with specific intrinsics / libs.
## Check for compiler options.
include(CheckCSourceCompiles)
check_c_source_compiles("
int main(void) {
(void)__builtin_bswap16(0);
return 0;
}
"
HAVE_BUILTIN_BSWAP16
)
check_c_source_compiles("
int main(void) {
(void)__builtin_bswap32(0);
return 0;
}
"
HAVE_BUILTIN_BSWAP32
)
check_c_source_compiles("
int main(void) {
(void)__builtin_bswap64(0);
return 0;
}
"
HAVE_BUILTIN_BSWAP64
)
## Check for libraries.
find_package(Threads)
if(Threads_FOUND)
if(CMAKE_USE_PTHREADS_INIT)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread")
endif()
foreach(PTHREAD_TEST HAVE_PTHREAD_PRIO_INHERIT PTHREAD_CREATE_UNDETACHED)
check_c_source_compiles("
#include <pthread.h>
int main (void) {
int attr = ${PTHREAD_TEST};
return attr;
}
" ${PTHREAD_TEST}
)
endforeach()
list(APPEND WEBP_DEP_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
endif()
set(WEBP_USE_THREAD ${Threads_FOUND})
# TODO: this seems unused, check with autotools.
set(LT_OBJDIR ".libs/")
# Only useful for vwebp, so useless for now.
# find_package(OpenGL)
# set(WEBP_HAVE_GL ${OPENGL_FOUND})
# set(WEBP_DEP_INCLUDE_DIRS ${WEBP_DEP_INCLUDE_DIRS} ${OPENGL_INCLUDE_DIRS})
# set(WEBP_DEP_LIBRARIES ${WEBP_DEP_LIBRARIES} ${OPENGL_LIBRARIES})
# Find the standard C math library.
find_library(MATH_LIBRARY NAMES m)
if(MATH_LIBRARY)
list(APPEND WEBP_DEP_LIBRARIES ${MATH_LIBRARY})
endif()
# Find the standard image libraries.
set(WEBP_DEP_IMG_LIBRARIES)
set(WEBP_DEP_IMG_INCLUDE_DIRS)
foreach(I_LIB PNG JPEG TIFF GIF)
find_package(${I_LIB})
set(WEBP_HAVE_${I_LIB} ${${I_LIB}_FOUND})
if(${I_LIB}_FOUND)
list(APPEND WEBP_DEP_IMG_LIBRARIES ${${I_LIB}_LIBRARIES})
list(APPEND WEBP_DEP_IMG_INCLUDE_DIRS ${${I_LIB}_INCLUDE_DIRS})
endif()
endforeach()
## Check for specific headers.
include(CheckIncludeFiles)
check_include_files("stdlib.h;stdarg.h;string.h;float.h" STDC_HEADERS)
check_include_files(dlfcn.h HAVE_DLFCN_H)
check_include_files(GLUT/glut.h HAVE_GLUT_GLUT_H)
check_include_files(GL/glut.h HAVE_GL_GLUT_H)
check_include_files(inttypes.h HAVE_INTTYPES_H)
check_include_files(memory.h HAVE_MEMORY_H)
check_include_files(OpenGL/glut.h HAVE_OPENGL_GLUT_H)
check_include_files(shlwapi.h HAVE_SHLWAPI_H)
check_include_files(stdint.h HAVE_STDINT_H)
check_include_files(stdlib.h HAVE_STDLIB_H)
check_include_files(strings.h HAVE_STRINGS_H)
check_include_files(string.h HAVE_STRING_H)
check_include_files(sys/stat.h HAVE_SYS_STAT_H)
check_include_files(sys/types.h HAVE_SYS_TYPES_H)
check_include_files(unistd.h HAVE_UNISTD_H)
check_include_files(wincodec.h HAVE_WINCODEC_H)
check_include_files(windows.h HAVE_WINDOWS_H)
# Windows specifics
if(HAVE_WINCODEC_H)
list(APPEND WEBP_DEP_LIBRARIES shlwapi ole32 windowscodecs)
endif()
## Check for SIMD extensions.
include(${CMAKE_CURRENT_LIST_DIR}/cpu.cmake)
## Define extra info.
set(PACKAGE ${PROJECT_NAME})
set(PACKAGE_NAME ${PROJECT_NAME})
# Read from configure.ac.
file(READ ${CMAKE_CURRENT_SOURCE_DIR}/configure.ac CONFIGURE_AC)
string(REGEX MATCHALL "\\[([0-9a-z\\.:/]*)\\]"
CONFIGURE_AC_PACKAGE_INFO ${CONFIGURE_AC}
)
function(strip_bracket VAR)
string(LENGTH ${${VAR}} TMP_LEN)
math(EXPR TMP_LEN ${TMP_LEN}-2)
string(SUBSTRING ${${VAR}} 1 ${TMP_LEN} TMP_SUB)
set(${VAR} ${TMP_SUB} PARENT_SCOPE)
endfunction()
list(GET CONFIGURE_AC_PACKAGE_INFO 1 PACKAGE_VERSION)
strip_bracket(PACKAGE_VERSION)
list(GET CONFIGURE_AC_PACKAGE_INFO 2 PACKAGE_BUGREPORT)
strip_bracket(PACKAGE_BUGREPORT)
list(GET CONFIGURE_AC_PACKAGE_INFO 3 PACKAGE_URL)
strip_bracket(PACKAGE_URL)
# Build more info.
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_TARNAME ${PACKAGE_NAME})
set(VERSION ${PACKAGE_VERSION})
## Generate the config.h header.
configure_file(${CMAKE_CURRENT_LIST_DIR}/config.h.in
${CMAKE_CURRENT_BINARY_DIR}/include/webp/config.h)
add_definitions(-DHAVE_CONFIG_H)
# The webp folder is included as we reference config.h as
# ../webp/config.h or webp/config.h
include_directories(${CMAKE_CURRENT_BINARY_DIR}/include
${CMAKE_CURRENT_BINARY_DIR}/include/webp
)

View File

@ -1,143 +0,0 @@
/* Adapted from the autotools src/webp/config.h.in. */
/* Define if building universal (internal helper macro) */
/* TODO: handle properly in CMake */
#cmakedefine AC_APPLE_UNIVERSAL_BUILD 1
/* Set to 1 if __builtin_bswap16 is available */
#cmakedefine HAVE_BUILTIN_BSWAP16 1
/* Set to 1 if __builtin_bswap32 is available */
#cmakedefine HAVE_BUILTIN_BSWAP32 1
/* Set to 1 if __builtin_bswap64 is available */
#cmakedefine HAVE_BUILTIN_BSWAP64 1
/* Define to 1 if you have the <dlfcn.h> header file. */
#cmakedefine HAVE_DLFCN_H 1
/* Define to 1 if you have the <GLUT/glut.h> header file. */
#cmakedefine HAVE_GLUT_GLUT_H 1
/* Define to 1 if you have the <GL/glut.h> header file. */
#cmakedefine HAVE_GL_GLUT_H 1
/* Define to 1 if you have the <inttypes.h> header file. */
#cmakedefine HAVE_INTTYPES_H 1
/* Define to 1 if you have the <memory.h> header file. */
#cmakedefine HAVE_MEMORY_H 1
/* Define to 1 if you have the <OpenGL/glut.h> header file. */
#cmakedefine HAVE_OPENGL_GLUT_H 1
/* Have PTHREAD_PRIO_INHERIT. */
#cmakedefine HAVE_PTHREAD_PRIO_INHERIT @HAVE_PTHREAD_PRIO_INHERIT@
/* Define to 1 if you have the <shlwapi.h> header file. */
#cmakedefine HAVE_SHLWAPI_H 1
/* Define to 1 if you have the <stdint.h> header file. */
#cmakedefine HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#cmakedefine HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#cmakedefine HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#cmakedefine HAVE_STRING_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#cmakedefine HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#cmakedefine HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#cmakedefine HAVE_UNISTD_H 1
/* Define to 1 if you have the <wincodec.h> header file. */
#cmakedefine HAVE_WINCODEC_H 1
/* Define to 1 if you have the <windows.h> header file. */
#cmakedefine HAVE_WINDOWS_H 1
/* Define to the sub-directory in which libtool stores uninstalled libraries.
*/
/* TODO: handle properly in CMake */
#cmakedefine LT_OBJDIR "@LT_OBJDIR@"
/* Name of package */
#cmakedefine PACKAGE "@PROJECT_NAME@"
/* Define to the address where bug reports for this package should be sent. */
#cmakedefine PACKAGE_BUGREPORT "@PACKAGE_BUGREPORT@"
/* Define to the full name of this package. */
#cmakedefine PACKAGE_NAME "@PACKAGE_NAME@"
/* Define to the full name and version of this package. */
#cmakedefine PACKAGE_STRING "@PACKAGE_STRING@"
/* Define to the one symbol short name of this package. */
#cmakedefine PACKAGE_TARNAME "@PACKAGE_TARNAME@"
/* Define to the home page for this package. */
#cmakedefine PACKAGE_URL "@PACKAGE_URL@"
/* Define to the version of this package. */
#cmakedefine PACKAGE_VERSION "@PACKAGE_VERSION@"
/* Define to necessary symbol if this constant uses a non-standard name on
your system. */
#cmakedefine PTHREAD_CREATE_JOINABLE 1
/* Define to 1 if you have the ANSI C header files. */
#cmakedefine STDC_HEADERS 1
/* Version number of package */
#cmakedefine VERSION "@VERSION@"
/* Enable experimental code */
#cmakedefine WEBP_EXPERIMENTAL_FEATURES 1
/* Set to 1 if AVX2 is supported */
#cmakedefine WEBP_HAVE_AVX2 1
/* Set to 1 if GIF library is installed */
#cmakedefine WEBP_HAVE_GIF 1
/* Set to 1 if OpenGL is supported */
#cmakedefine WEBP_HAVE_GL 1
/* Set to 1 if JPEG library is installed */
#cmakedefine WEBP_HAVE_JPEG 1
/* Set to 1 if PNG library is installed */
#cmakedefine WEBP_HAVE_PNG 1
/* Set to 1 if SSE2 is supported */
#cmakedefine WEBP_HAVE_SSE2 1
/* Set to 1 if SSE4.1 is supported */
#cmakedefine WEBP_HAVE_SSE41 1
/* Set to 1 if TIFF library is installed */
#cmakedefine WEBP_HAVE_TIFF 1
/* Undefine this to disable thread support. */
#cmakedefine WEBP_USE_THREAD 1
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
# undef WORDS_BIGENDIAN
# endif
#endif

View File

@ -1,113 +0,0 @@
## Check for SIMD extensions.
function(webp_check_compiler_flag WEBP_SIMD_FLAG)
unset(WEBP_HAVE_FLAG_${WEBP_SIMD_FLAG} CACHE)
check_c_source_compiles("
#include \"${CMAKE_CURRENT_LIST_DIR}/../src/dsp/dsp.h\"
int main(void) {
#if !defined(WEBP_USE_${WEBP_SIMD_FLAG})
this is not valid code
#endif
return 0;
}
" WEBP_HAVE_FLAG_${WEBP_SIMD_FLAG}
)
if(WEBP_HAVE_FLAG_${WEBP_SIMD_FLAG})
set(WEBP_HAVE_${WEBP_SIMD_FLAG} 1 PARENT_SCOPE)
else()
set(WEBP_HAVE_${WEBP_SIMD_FLAG} 0 PARENT_SCOPE)
endif()
endfunction()
# those are included in the names of WEBP_USE_* in c++ code.
set(WEBP_SIMD_FLAGS "SSE2;SSE41;AVX2;MIPS32;MIPS_DSP_R2;NEON;MSA")
set(WEBP_SIMD_FILE_EXTENSIONS "_sse2.c;_sse41.c;_avx2.c;_mips32.c;_mips_dsp_r2.c;_neon.c;_msa.c")
if(MSVC)
# MSVC does not have a SSE4 flag but AVX2 support implies
# SSE4 support.
set(SIMD_ENABLE_FLAGS "/arch:SSE2;/arch:AVX2;/arch:AVX2;;;;")
set(SIMD_DISABLE_FLAGS)
else()
set(SIMD_ENABLE_FLAGS "-msse2;-msse4.1;-mavx2;-mips32;-mdspr2;-mfpu=neon;-mmsa")
set(SIMD_DISABLE_FLAGS "-mno-sse2;-mno-sse4.1;-mno-avx2;;-mno-dspr2;;-mno-msa")
endif()
set(WEBP_SIMD_FILES_TO_NOT_INCLUDE)
set(WEBP_SIMD_FILES_TO_INCLUDE)
set(WEBP_SIMD_FLAGS_TO_INCLUDE)
if(${ANDROID})
if(${ANDROID_ABI} STREQUAL "armeabi-v7a")
# This is because Android studio uses the configuration
# "-march=armv7-a -mfloat-abi=softfp -mfpu=vfpv3-d16"
# that does not trigger neon optimizations but should
# (as this configuration does not exist anymore).
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon ")
endif()
endif()
list(LENGTH WEBP_SIMD_FLAGS WEBP_SIMD_FLAGS_LENGTH)
math(EXPR WEBP_SIMD_FLAGS_RANGE "${WEBP_SIMD_FLAGS_LENGTH} - 1")
foreach(I_SIMD RANGE ${WEBP_SIMD_FLAGS_RANGE})
list(GET WEBP_SIMD_FLAGS ${I_SIMD} WEBP_SIMD_FLAG)
# First try with no extra flag added as the compiler might have default flags
# (especially on Android).
unset(WEBP_HAVE_${WEBP_SIMD_FLAG} CACHE)
set(CMAKE_REQUIRED_FLAGS)
webp_check_compiler_flag(${WEBP_SIMD_FLAG})
if(NOT WEBP_HAVE_${WEBP_SIMD_FLAG})
list(GET SIMD_ENABLE_FLAGS ${I_SIMD} SIMD_COMPILE_FLAG)
set(CMAKE_REQUIRED_FLAGS ${SIMD_COMPILE_FLAG})
webp_check_compiler_flag(${WEBP_SIMD_FLAG})
else()
set(SIMD_COMPILE_FLAG " ")
endif()
# Check which files we should include or not.
list(GET WEBP_SIMD_FILE_EXTENSIONS ${I_SIMD} WEBP_SIMD_FILE_EXTENSION)
file(GLOB SIMD_FILES "${CMAKE_CURRENT_LIST_DIR}/../"
"src/dsp/*${WEBP_SIMD_FILE_EXTENSION}"
)
if(WEBP_HAVE_${WEBP_SIMD_FLAG})
# Memorize the file and flags.
foreach(FILE ${SIMD_FILES})
list(APPEND WEBP_SIMD_FILES_TO_INCLUDE ${FILE})
list(APPEND WEBP_SIMD_FLAGS_TO_INCLUDE ${SIMD_COMPILE_FLAG})
endforeach()
else()
# Remove the file from the list.
foreach(FILE ${SIMD_FILES})
list(APPEND WEBP_SIMD_FILES_NOT_TO_INCLUDE ${FILE})
endforeach()
# Explicitly disable SIMD.
if(SIMD_DISABLE_FLAGS)
list(GET SIMD_DISABLE_FLAGS ${I_SIMD} SIMD_COMPILE_FLAG)
include(CheckCCompilerFlag)
if(SIMD_COMPILE_FLAG)
unset(HAS_COMPILE_FLAG CACHE)
check_c_compiler_flag(${SIMD_COMPILE_FLAG} HAS_COMPILE_FLAG)
if(HAS_COMPILE_FLAG)
# Do one more check for Clang to circumvent CMake issue 13194.
if(COMMAND check_compiler_flag_common_patterns)
# Only in CMake 3.0 and above.
check_compiler_flag_common_patterns(COMMON_PATTERNS)
else()
set(COMMON_PATTERNS)
endif()
set(CMAKE_REQUIRED_DEFINITIONS ${SIMD_COMPILE_FLAG})
check_c_source_compiles("int main(void) {return 0;}" FLAG2
FAIL_REGEX "warning: argument unused during compilation:"
${COMMON_PATTERNS}
)
if(NOT FLAG2)
unset(HAS_COMPILE_FLAG CACHE)
endif()
endif()
if(HAS_COMPILE_FLAG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_COMPILE_FLAG}")
endif()
endif()
endif()
endif()
endforeach()

View File

@ -1,4 +1,4 @@
AC_INIT([libwebp], [0.6.0],
AC_INIT([libwebp], [0.4.4],
[https://bugs.chromium.org/p/webp],,
[http://developers.google.com/speed/webp])
AC_CANONICAL_HOST
@ -10,7 +10,6 @@ dnl === it must occur before LT_INIT (AC_PROG_LIBTOOL).
m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
AC_PROG_LIBTOOL
AC_PROG_SED
AM_PROG_CC_C_O
dnl === Enable less verbose output when building.
@ -29,21 +28,8 @@ AC_ARG_ENABLE([everything],
disabled with --disable-target]),
[SET_IF_UNSET([enable_libwebpdecoder], [$enableval])
SET_IF_UNSET([enable_libwebpdemux], [$enableval])
SET_IF_UNSET([enable_libwebpextras], [$enableval])
SET_IF_UNSET([enable_libwebpmux], [$enableval])])
dnl === If --enable-asserts is not defined, define NDEBUG
AC_MSG_CHECKING(whether asserts are enabled)
AC_ARG_ENABLE([asserts],
AS_HELP_STRING([--enable-asserts],
[Enable assert checks]))
if test "x${enable_asserts-no}" = "xno"; then
AM_CPPFLAGS="${AM_CPPFLAGS} -DNDEBUG"
fi
AC_MSG_RESULT(${enable_asserts-no})
AC_SUBST([AM_CPPFLAGS])
AC_ARG_WITH([pkgconfigdir], AS_HELP_STRING([--with-pkgconfigdir=DIR],
[Path to the pkgconfig directory @<:@LIBDIR/pkgconfig@:>@]),
[pkgconfigdir="$withval"], [pkgconfigdir='${libdir}/pkgconfig'])
@ -65,19 +51,15 @@ AC_DEFUN([TEST_AND_ADD_CFLAGS],
[$1="${$1} $2"],
[AC_MSG_RESULT([no])])
CFLAGS="$SAVED_CFLAGS"])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-fvisibility=hidden])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wall])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wdeclaration-after-statement])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wextra])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wfloat-conversion])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat -Wformat-nonliteral])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat -Wformat-security])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat-nonliteral])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wformat-security])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wmissing-declarations])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wmissing-prototypes])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wold-style-definition])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wshadow])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wshorten-64-to-32])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunreachable-code])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunused-but-set-variable])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wunused])
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-Wvla])
@ -94,156 +76,36 @@ AS_IF([test "$GCC" = "yes" ], [
esac
AS_IF([test "$gcc_wht_bug" = "yes"], [
TEST_AND_ADD_CFLAGS([AM_CFLAGS], [-frename-registers])])])
# Use -flax-vector-conversions, if available, when building intrinsics with
# older versions of gcc. The flag appeared in 4.3.x, but if backported, and
# -fno-lax-vector-conversions is set, errors may occur with the intrinsics
# files along with the older system includes, e.g., emmintrin.h.
# Originally observed with cc (GCC) 4.2.1 20070831 patched [FreeBSD] (9.3).
# https://bugs.chromium.org/p/webp/issues/detail?id=274
AS_IF([test "$GCC" = "yes" ], [
case "$host_cpu" in
amd64|i?86|x86_64)
AC_COMPILE_IFELSE(
dnl only check for -flax-vector-conversions with older gcc, skip
dnl clang as it reports itself as 4.2.1, but the flag isn't needed.
[AC_LANG_SOURCE([#if !defined(__clang__) && defined(__GNUC__) && \
((__GNUC__ << 8) | __GNUC_MINOR__) < 0x403
#error old gcc
#endif
int main(void) { return 0; }
])],,
[TEST_AND_ADD_CFLAGS([INTRINSICS_CFLAGS],
[-flax-vector-conversions])])
;;
esac])
AC_SUBST([AM_CFLAGS])
dnl === Check for machine specific flags
AC_ARG_ENABLE([avx2],
AS_HELP_STRING([--disable-avx2],
[Disable detection of AVX2 support
@<:@default=auto@:>@]))
TEST_AND_ADD_CFLAGS([AVX2_FLAGS], [-mavx2])
AS_IF([test -n "$AVX2_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $AVX2_FLAGS"
AC_CHECK_HEADER([immintrin.h],
[AC_DEFINE(WEBP_HAVE_AVX2, [1],
[Set to 1 if AVX2 is supported])],
[AVX2_FLAGS=""],
dnl it's illegal to directly include avx2intrin.h, but it's
dnl included conditionally in immintrin.h, tricky!
[#ifndef __AVX2__
#error avx2 is not enabled
#endif
])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([AVX2_FLAGS])
AS_IF([test "x$enable_avx2" != "xno" -a "x$enable_sse4_1" != "xno" \
-a "x$enable_sse2" != "xno"], [
AVX2_CFLAGS="$INTRINSICS_CFLAGS $AVX2_FLAGS"
TEST_AND_ADD_CFLAGS([AVX2_FLAGS], [-mavx2])
AS_IF([test -n "$AVX2_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $AVX2_FLAGS"
AC_CHECK_HEADER([immintrin.h],
[AC_DEFINE(WEBP_HAVE_AVX2, [1],
[Set to 1 if AVX2 is supported])],
[AVX2_FLAGS=""],
dnl it's illegal to directly include avx2intrin.h, but it's
dnl included conditionally in immintrin.h, tricky!
[#ifndef __AVX2__
#error avx2 is not enabled
#endif
])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([AVX2_FLAGS])])
AC_ARG_ENABLE([sse4.1],
AS_HELP_STRING([--disable-sse4.1],
[Disable detection of SSE4.1 support
@<:@default=auto@:>@]))
AS_IF([test "x$enable_sse4_1" != "xno" -a "x$enable_sse2" != "xno"], [
SSE41_FLAGS="$INTRINSICS_CFLAGS $SSE41_FLAGS"
TEST_AND_ADD_CFLAGS([SSE41_FLAGS], [-msse4.1])
AS_IF([test -n "$SSE41_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $SSE41_FLAGS"
AC_CHECK_HEADER([smmintrin.h],
[AC_DEFINE(WEBP_HAVE_SSE41, [1],
[Set to 1 if SSE4.1 is supported])],
[SSE41_FLAGS=""])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([SSE41_FLAGS])])
AC_ARG_ENABLE([sse2],
AS_HELP_STRING([--disable-sse2],
[Disable detection of SSE2 support
@<:@default=auto@:>@]))
AS_IF([test "x$enable_sse2" != "xno"], [
SSE2_FLAGS="$INTRINSICS_CFLAGS $SSE2_FLAGS"
TEST_AND_ADD_CFLAGS([SSE2_FLAGS], [-msse2])
AS_IF([test -n "$SSE2_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $SSE2_FLAGS"
AC_CHECK_HEADER([emmintrin.h],
[AC_DEFINE(WEBP_HAVE_SSE2, [1],
[Set to 1 if SSE2 is supported])],
[SSE2_FLAGS=""])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([SSE2_FLAGS])])
AC_ARG_ENABLE([neon],
AS_HELP_STRING([--disable-neon],
[Disable detection of NEON support
@<:@default=auto@:>@]))
AC_ARG_ENABLE([neon_rtcd],
AS_HELP_STRING([--disable-neon-rtcd],
[Disable runtime detection of NEON support via
/proc/cpuinfo on Linux hosts
@<:@default=auto@:>@]))
# For ARM(7) hosts:
# Both NEON flags unset and NEON support detected = build all modules with NEON
# NEON detected with the use of -mfpu=neon = build only NEON modules with NEON
AS_IF([test "x$enable_neon" != "xno"], [
case "$host_cpu" in
arm|armv7*)
# Test for NEON support without flags before falling back to -mfpu=neon
for flag in '' '-mfpu=neon'; do
LOCAL_NEON_FLAGS="$INTRINSICS_CFLAGS $NEON_FLAGS"
TEST_AND_ADD_CFLAGS([LOCAL_NEON_FLAGS], [$flag])
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $LOCAL_NEON_FLAGS"
dnl Note AC_LANG_PROGRAM([]) uses an old-style main definition.
AC_COMPILE_IFELSE([AC_LANG_SOURCE([
#include <arm_neon.h>
int main(void) {
int8x8_t v = vdup_n_s8(0);
(void)v;
return 0;
}])],
[NEON_FLAGS="$(echo $LOCAL_NEON_FLAGS | $SED 's/^ *//')"
AS_IF([test -n "$NEON_FLAGS"], [
AS_IF([test "${host_os%%-*}" = "linux" -o \
"x$enable_neon_rtcd" = "xno"], [
CFLAGS=$SAVED_CFLAGS
AC_DEFINE(WEBP_HAVE_NEON, [1], [Set to 1 if NEON is supported])
break
],[
AC_MSG_WARN(m4_normalize([NEON runtime cpu-detection is
unavailable for ${host_os%%-*}. Force
with CFLAGS=-mfpu=neon or
--disable-neon-rtcd.]))
enable_neon_rtcd=no
NEON_FLAGS=""
])
],[
CFLAGS=$SAVED_CFLAGS
AC_DEFINE(WEBP_HAVE_NEON, [1], [Set to 1 if NEON is supported])
break
])])
CFLAGS=$SAVED_CFLAGS
done
AS_IF([test -n "$NEON_FLAGS"], [
# If NEON is available and rtcd is disabled apply NEON_FLAGS globally.
AS_IF([test "x$enable_neon_rtcd" = "xno"], [
AM_CFLAGS="$AM_CFLAGS $NEON_FLAGS"
NEON_FLAGS=""],
[AC_DEFINE(WEBP_HAVE_NEON_RTCD, [1],
[Set to 1 if runtime detection of NEON is enabled])])])
;;
esac
AC_SUBST([NEON_FLAGS])])
TEST_AND_ADD_CFLAGS([SSE2_FLAGS], [-msse2])
AS_IF([test -n "$SSE2_FLAGS"], [
SAVED_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $SSE2_FLAGS"
AC_CHECK_HEADER([emmintrin.h],
[AC_DEFINE(WEBP_HAVE_SSE2, [1],
[Set to 1 if SSE2 is supported])],
[SSE2_FLAGS=""])
CFLAGS=$SAVED_CFLAGS])
AC_SUBST([SSE2_FLAGS])
dnl === CLEAR_LIBVARS([var_pfx])
dnl === Clears <var_pfx>_{INCLUDES,LIBS}.
@ -288,7 +150,7 @@ dnl === AC_DEFINE'ing <define> if successful.
AC_DEFUN([CHECK_FOR_BUILTIN],
[AC_LANG_PUSH([C])
AC_MSG_CHECKING([for $1])
AC_LINK_IFELSE([AC_LANG_PROGRAM([], [(void)$1($2)])],
AC_LINK_IFELSE([AC_LANG_PROGRAM([], [$1($2)])],
[AC_MSG_RESULT([yes])
AC_DEFINE([$3], [1],
[Set to 1 if $1 is available])],
@ -543,24 +405,13 @@ AS_IF([test "x$enable_gif" != "xno"], [
)
LIBCHECK_EPILOGUE([GIF])
if test "$gif_support" = "yes" -a \
"$enable_libwebpdemux" = "yes"; then
build_animdiff=yes
fi
if test "$gif_support" = "yes" -a \
"$enable_libwebpmux" = "yes"; then
build_gif2webp=yes
fi
])
AM_CONDITIONAL([BUILD_ANIMDIFF], [test "${build_animdiff}" = "yes"])
AM_CONDITIONAL([BUILD_GIF2WEBP], [test "${build_gif2webp}" = "yes"])
if test "$enable_libwebpmux" = "yes"; then
build_img2webp=yes
fi
AM_CONDITIONAL([BUILD_IMG2WEBP], [test "${build_img2webp}" = "yes"])
dnl === check for WIC support ===
AC_ARG_ENABLE([wic],
@ -613,6 +464,19 @@ if test "$enable_wic" = "yes"; then
fi
esac
dnl === If --enable-aligned is defined, define WEBP_FORCE_ALIGNED
AC_MSG_CHECKING(if --enable-aligned option is specified)
AC_ARG_ENABLE([aligned],
AS_HELP_STRING([--enable-aligned],
[Force aligned memory operations in non-dsp code
(may be slower)]))
if test "$enable_aligned" = "yes"; then
AC_DEFINE(WEBP_FORCE_ALIGNED, [1],
[Define to 1 to force aligned memory operations])
fi
AC_MSG_RESULT(${enable_aligned-no})
dnl === If --enable-swap-16bit-csp is defined, add -DWEBP_SWAP_16BIT_CSP
USE_SWAP_16BIT_CSP=""
@ -663,21 +527,13 @@ AC_ARG_ENABLE([libwebpdecoder],
AC_MSG_RESULT(${enable_libwebpdecoder-no})
AM_CONDITIONAL([BUILD_LIBWEBPDECODER], [test "$enable_libwebpdecoder" = "yes"])
dnl === Check whether libwebpextras should be built
AC_MSG_CHECKING(whether libwebpextras is to be built)
AC_ARG_ENABLE([libwebpextras],
AS_HELP_STRING([--enable-libwebpextras],
[Build libwebpextras @<:@default=no@:>@]))
AC_MSG_RESULT(${enable_libwebpextras-no})
AM_CONDITIONAL([WANT_EXTRAS], [test "$enable_libwebpextras" = "yes"])
dnl =========================
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_HEADERS([src/webp/config.h])
AC_CONFIG_FILES([Makefile src/Makefile man/Makefile \
examples/Makefile extras/Makefile imageio/Makefile \
src/dec/Makefile src/enc/Makefile src/dsp/Makefile \
examples/Makefile src/dec/Makefile \
src/enc/Makefile src/dsp/Makefile \
src/demux/Makefile src/mux/Makefile \
src/utils/Makefile \
src/libwebp.pc src/libwebpdecoder.pc \
@ -697,7 +553,6 @@ libwebp: yes
libwebpdecoder: ${enable_libwebpdecoder-no}
libwebpdemux: ${enable_libwebpdemux-no}
libwebpmux: ${enable_libwebpmux-no}
libwebpextras: ${enable_libwebpextras-no}
Tools:
cwebp : yes
@ -713,9 +568,7 @@ dwebp : yes
PNG : ${png_support-no}
WIC : ${wic_support-no}
GIF support : ${gif_support-no}
anim_diff : ${build_animdiff-no}
gif2webp : ${build_gif2webp-no}
img2webp : ${build_img2webp-no}
webpmux : ${enable_libwebpmux-no}
vwebp : ${build_vwebp-no}
])

View File

@ -429,11 +429,6 @@ int8 ColorTransformDelta(int8 t, int8 c) {
}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A conversion from the 8-bit unsigned representation (uint8) to the 8-bit
signed one (int8) is required before calling ColorTransformDelta().
It should be performed using 8-bit two's complement (that is: uint8 range
\[128-255\] is mapped to the \[-128, -1\] range of its converted int8 value).
The multiplication is to be done using more precision (with at least
16-bit dynamics). The sign extension property of the shift operation
does not matter here: only the lowest 8 bits are used from the result,

View File

@ -24,10 +24,15 @@ include $(CLEAR_VARS)
# minor modification to their Android.mk files.
LOCAL_SRC_FILES := \
cwebp.c \
jpegdec.c \
metadata.c \
pngdec.c \
tiffdec.c \
webpdec.c \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_STATIC_LIBRARIES := example_util imageio_util imagedec webp
LOCAL_STATIC_LIBRARIES := example_util webp
LOCAL_MODULE := cwebp
@ -43,7 +48,7 @@ LOCAL_SRC_FILES := \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_STATIC_LIBRARIES := example_util imagedec imageenc webp
LOCAL_STATIC_LIBRARIES := example_util webp
LOCAL_MODULE := dwebp
@ -59,24 +64,8 @@ LOCAL_SRC_FILES := \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_STATIC_LIBRARIES := example_util imageio_util webpmux webp
LOCAL_STATIC_LIBRARIES := example_util webpmux webp
LOCAL_MODULE := webpmux_example
include $(BUILD_EXECUTABLE)
################################################################################
# img2webp
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
img2webp.c \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_STATIC_LIBRARIES := example_util imageio_util imagedec webpmux webp
LOCAL_MODULE := img2webp_example
include $(BUILD_EXECUTABLE)

View File

@ -1,75 +1,54 @@
AM_CPPFLAGS += -I$(top_builddir)/src -I$(top_srcdir)/src
AM_CPPFLAGS = -I$(top_builddir)/src -I$(top_srcdir)/src
bin_PROGRAMS = dwebp cwebp
if BUILD_ANIMDIFF
noinst_PROGRAMS = anim_diff
endif
if BUILD_GIF2WEBP
bin_PROGRAMS += gif2webp
endif
if BUILD_IMG2WEBP
bin_PROGRAMS += img2webp
if BUILD_VWEBP
bin_PROGRAMS += vwebp
endif
if WANT_MUX
bin_PROGRAMS += webpmux
endif
if BUILD_VWEBP
bin_PROGRAMS += vwebp
if BUILD_GIF2WEBP
bin_PROGRAMS += gif2webp
endif
noinst_LTLIBRARIES = libexample_util.la
noinst_LTLIBRARIES = libexampleutil.la
libexample_util_la_SOURCES = example_util.c example_util.h
libexample_util_la_LIBADD = ../src/libwebp.la
anim_diff_SOURCES = anim_diff.c anim_util.c anim_util.h
anim_diff_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE) $(GIF_INCLUDES)
anim_diff_LDADD = ../src/demux/libwebpdemux.la
anim_diff_LDADD += libexample_util.la ../imageio/libimageio_util.la
anim_diff_LDADD += $(GIF_LIBS) -lm
cwebp_SOURCES = cwebp.c stopwatch.h
cwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
cwebp_LDADD = libexample_util.la ../imageio/libimageio_util.la
cwebp_LDADD += ../imageio/libimagedec.la ../src/libwebp.la
cwebp_LDADD += $(JPEG_LIBS) $(PNG_LIBS) $(TIFF_LIBS)
libexampleutil_la_SOURCES = example_util.c example_util.h stopwatch.h
dwebp_SOURCES = dwebp.c stopwatch.h
dwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
dwebp_CPPFLAGS += $(JPEG_INCLUDES) $(PNG_INCLUDES)
dwebp_LDADD = libexample_util.la
dwebp_LDADD += ../imageio/libimagedec.la
dwebp_LDADD += ../imageio/libimageenc.la
dwebp_LDADD += ../imageio/libimageio_util.la
dwebp_LDADD += ../src/libwebp.la
dwebp_LDADD +=$(PNG_LIBS) $(JPEG_LIBS)
dwebp_LDADD = libexampleutil.la $(PNG_LIBS) $(JPEG_LIBS)
gif2webp_SOURCES = gif2webp.c gifdec.c gifdec.h
cwebp_SOURCES = cwebp.c metadata.c metadata.h stopwatch.h
cwebp_SOURCES += jpegdec.c jpegdec.h
cwebp_SOURCES += pngdec.c pngdec.h
cwebp_SOURCES += tiffdec.c tiffdec.h
cwebp_SOURCES += webpdec.c webpdec.h
cwebp_SOURCES += wicdec.c wicdec.h
cwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
cwebp_CPPFLAGS += $(JPEG_INCLUDES) $(PNG_INCLUDES) $(TIFF_INCLUDES)
cwebp_LDADD = libexampleutil.la ../src/libwebp.la
cwebp_LDADD += $(JPEG_LIBS) $(PNG_LIBS) $(TIFF_LIBS)
gif2webp_SOURCES = gif2webp.c gif2webp_util.c
gif2webp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE) $(GIF_INCLUDES)
gif2webp_LDADD = libexample_util.la ../imageio/libimageio_util.la
gif2webp_LDADD += ../src/mux/libwebpmux.la ../src/libwebp.la $(GIF_LIBS)
vwebp_SOURCES = vwebp.c
vwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE) $(GL_INCLUDES)
vwebp_LDADD = libexample_util.la ../imageio/libimageio_util.la
vwebp_LDADD += ../src/demux/libwebpdemux.la $(GL_LIBS)
gif2webp_LDADD = libexampleutil.la ../src/mux/libwebpmux.la ../src/libwebp.la
gif2webp_LDADD += $(GIF_LIBS)
webpmux_SOURCES = webpmux.c
webpmux_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
webpmux_LDADD = libexample_util.la ../imageio/libimageio_util.la
webpmux_LDADD += ../src/mux/libwebpmux.la ../src/libwebp.la
webpmux_LDADD = libexampleutil.la ../src/mux/libwebpmux.la ../src/libwebp.la
img2webp_SOURCES = img2webp.c
img2webp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
img2webp_LDADD = libexample_util.la ../imageio/libimageio_util.la
img2webp_LDADD += ../imageio/libimagedec.la
img2webp_LDADD += ../src/mux/libwebpmux.la ../src/libwebp.la
img2webp_LDADD += $(PNG_LIBS) $(JPEG_LIBS) $(TIFF_LIBS)
vwebp_SOURCES = vwebp.c
vwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE) $(GL_INCLUDES)
vwebp_LDADD = libexampleutil.la ../src/demux/libwebpdemux.la $(GL_LIBS)
if BUILD_LIBWEBPDECODER
anim_diff_LDADD += ../src/libwebpdecoder.la
dwebp_LDADD += ../src/libwebpdecoder.la
vwebp_LDADD += ../src/libwebpdecoder.la
else
anim_diff_LDADD += ../src/libwebp.la
dwebp_LDADD += ../src/libwebp.la
vwebp_LDADD += ../src/libwebp.la
endif

View File

@ -1,303 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Checks if given pair of animated GIF/WebP images are identical:
// That is: their reconstructed canvases match pixel-by-pixel and their other
// animation properties (loop count etc) also match.
//
// example: anim_diff foo.gif bar.webp
#include <assert.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h> // for 'strtod'.
#include <string.h> // for 'strcmp'.
#include "./anim_util.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf
#endif
// Returns true if 'a + b' will overflow.
static int AdditionWillOverflow(int a, int b) {
return (b > 0) && (a > INT_MAX - b);
}
static int FramesAreEqual(const uint8_t* const rgba1,
const uint8_t* const rgba2, int width, int height) {
const int stride = width * 4; // Always true for 'DecodedFrame.rgba'.
return !memcmp(rgba1, rgba2, stride * height);
}
static WEBP_INLINE int PixelsAreSimilar(uint32_t src, uint32_t dst,
int max_allowed_diff) {
const int src_a = (src >> 24) & 0xff;
const int src_r = (src >> 16) & 0xff;
const int src_g = (src >> 8) & 0xff;
const int src_b = (src >> 0) & 0xff;
const int dst_a = (dst >> 24) & 0xff;
const int dst_r = (dst >> 16) & 0xff;
const int dst_g = (dst >> 8) & 0xff;
const int dst_b = (dst >> 0) & 0xff;
return (abs(src_r * src_a - dst_r * dst_a) <= (max_allowed_diff * 255)) &&
(abs(src_g * src_a - dst_g * dst_a) <= (max_allowed_diff * 255)) &&
(abs(src_b * src_a - dst_b * dst_a) <= (max_allowed_diff * 255)) &&
(abs(src_a - dst_a) <= max_allowed_diff);
}
static int FramesAreSimilar(const uint8_t* const rgba1,
const uint8_t* const rgba2,
int width, int height, int max_allowed_diff) {
int i, j;
assert(max_allowed_diff > 0);
for (j = 0; j < height; ++j) {
for (i = 0; i < width; ++i) {
const int stride = width * 4;
const size_t offset = j * stride + i;
if (!PixelsAreSimilar(rgba1[offset], rgba2[offset], max_allowed_diff)) {
return 0;
}
}
}
return 1;
}
// Minimize number of frames by combining successive frames that have at max
// 'max_diff' difference per channel between corresponding pixels.
static void MinimizeAnimationFrames(AnimatedImage* const img, int max_diff) {
uint32_t i;
for (i = 1; i < img->num_frames; ++i) {
DecodedFrame* const frame1 = &img->frames[i - 1];
DecodedFrame* const frame2 = &img->frames[i];
const uint8_t* const rgba1 = frame1->rgba;
const uint8_t* const rgba2 = frame2->rgba;
int should_merge_frames = 0;
// If merging frames will result in integer overflow for 'duration',
// skip merging.
if (AdditionWillOverflow(frame1->duration, frame2->duration)) continue;
if (max_diff > 0) {
should_merge_frames = FramesAreSimilar(rgba1, rgba2, img->canvas_width,
img->canvas_height, max_diff);
} else {
should_merge_frames =
FramesAreEqual(rgba1, rgba2, img->canvas_width, img->canvas_height);
}
if (should_merge_frames) { // Merge 'i+1'th frame into 'i'th frame.
frame1->duration += frame2->duration;
if (i + 1 < img->num_frames) {
memmove(&img->frames[i], &img->frames[i + 1],
(img->num_frames - i - 1) * sizeof(*img->frames));
}
--img->num_frames;
--i;
}
}
}
static int CompareValues(uint32_t a, uint32_t b, const char* output_str) {
if (a != b) {
fprintf(stderr, "%s: %d vs %d\n", output_str, a, b);
return 0;
}
return 1;
}
static int CompareBackgroundColor(uint32_t bg1, uint32_t bg2, int premultiply) {
if (premultiply) {
const int alpha1 = (bg1 >> 24) & 0xff;
const int alpha2 = (bg2 >> 24) & 0xff;
if (alpha1 == 0 && alpha2 == 0) return 1;
}
if (bg1 != bg2) {
fprintf(stderr, "Background color mismatch: 0x%08x vs 0x%08x\n",
bg1, bg2);
return 0;
}
return 1;
}
// Note: As long as frame durations and reconstructed frames are identical, it
// is OK for other aspects like offsets, dispose/blend method to vary.
static int CompareAnimatedImagePair(const AnimatedImage* const img1,
const AnimatedImage* const img2,
int premultiply,
double min_psnr) {
int ok = 1;
const int is_multi_frame_image = (img1->num_frames > 1);
uint32_t i;
ok = CompareValues(img1->canvas_width, img2->canvas_width,
"Canvas width mismatch") && ok;
ok = CompareValues(img1->canvas_height, img2->canvas_height,
"Canvas height mismatch") && ok;
ok = CompareValues(img1->num_frames, img2->num_frames,
"Frame count mismatch") && ok;
if (!ok) return 0; // These are fatal failures, can't proceed.
if (is_multi_frame_image) { // Checks relevant for multi-frame images only.
ok = CompareValues(img1->loop_count, img2->loop_count,
"Loop count mismatch") && ok;
ok = CompareBackgroundColor(img1->bgcolor, img2->bgcolor,
premultiply) && ok;
}
for (i = 0; i < img1->num_frames; ++i) {
// Pixel-by-pixel comparison.
const uint8_t* const rgba1 = img1->frames[i].rgba;
const uint8_t* const rgba2 = img2->frames[i].rgba;
int max_diff;
double psnr;
if (is_multi_frame_image) { // Check relevant for multi-frame images only.
const char format[] = "Frame #%d, duration mismatch";
char tmp[sizeof(format) + 8];
ok = ok && (snprintf(tmp, sizeof(tmp), format, i) >= 0);
ok = ok && CompareValues(img1->frames[i].duration,
img2->frames[i].duration, tmp);
}
GetDiffAndPSNR(rgba1, rgba2, img1->canvas_width, img1->canvas_height,
premultiply, &max_diff, &psnr);
if (min_psnr > 0.) {
if (psnr < min_psnr) {
fprintf(stderr, "Frame #%d, psnr = %.2lf (min_psnr = %f)\n", i,
psnr, min_psnr);
ok = 0;
}
} else {
if (max_diff != 0) {
fprintf(stderr, "Frame #%d, max pixel diff: %d\n", i, max_diff);
ok = 0;
}
}
}
return ok;
}
static void Help(void) {
printf("Usage: anim_diff <image1> <image2> [options]\n");
printf("\nOptions:\n");
printf(" -dump_frames <folder> dump decoded frames in PAM format\n");
printf(" -min_psnr <float> ... minimum per-frame PSNR\n");
printf(" -raw_comparison ..... if this flag is not used, RGB is\n");
printf(" premultiplied before comparison\n");
#ifdef WEBP_EXPERIMENTAL_FEATURES
printf(" -max_diff <int> ..... maximum allowed difference per channel "
" between corresponding pixels in subsequent"
" frames\n");
#endif
}
int main(int argc, const char* argv[]) {
int return_code = -1;
int dump_frames = 0;
const char* dump_folder = NULL;
double min_psnr = 0.;
int got_input1 = 0;
int got_input2 = 0;
int premultiply = 1;
int max_diff = 0;
int i, c;
const char* files[2] = { NULL, NULL };
AnimatedImage images[2];
if (argc < 3) {
Help();
return -1;
}
for (c = 1; c < argc; ++c) {
int parse_error = 0;
if (!strcmp(argv[c], "-dump_frames")) {
if (c < argc - 1) {
dump_frames = 1;
dump_folder = argv[++c];
} else {
parse_error = 1;
}
} else if (!strcmp(argv[c], "-min_psnr")) {
if (c < argc - 1) {
const char* const v = argv[++c];
char* end = NULL;
const double d = strtod(v, &end);
if (end == v) {
parse_error = 1;
fprintf(stderr, "Error! '%s' is not a floating point number.\n", v);
}
min_psnr = d;
} else {
parse_error = 1;
}
} else if (!strcmp(argv[c], "-raw_comparison")) {
premultiply = 0;
#ifdef WEBP_EXPERIMENTAL_FEATURES
} else if (!strcmp(argv[c], "-max_diff")) {
if (c < argc - 1) {
const char* const v = argv[++c];
char* end = NULL;
const int n = (int)strtol(v, &end, 10);
if (end == v) {
parse_error = 1;
fprintf(stderr, "Error! '%s' is not an integer.\n", v);
}
max_diff = n;
} else {
parse_error = 1;
}
#endif
} else {
if (!got_input1) {
files[0] = argv[c];
got_input1 = 1;
} else if (!got_input2) {
files[1] = argv[c];
got_input2 = 1;
} else {
parse_error = 1;
}
}
if (parse_error) {
Help();
return -1;
}
}
if (!got_input2) {
Help();
return -1;
}
if (dump_frames) {
printf("Dumping decoded frames in: %s\n", dump_folder);
}
memset(images, 0, sizeof(images));
for (i = 0; i < 2; ++i) {
printf("Decoding file: %s\n", files[i]);
if (!ReadAnimatedImage(files[i], &images[i], dump_frames, dump_folder)) {
fprintf(stderr, "Error decoding file: %s\n Aborting.\n", files[i]);
return_code = -2;
goto End;
} else {
MinimizeAnimationFrames(&images[i], max_diff);
}
}
if (!CompareAnimatedImagePair(&images[0], &images[1],
premultiply, min_psnr)) {
fprintf(stderr, "\nFiles %s and %s differ.\n", files[0], files[1]);
return_code = -3;
} else {
printf("\nFiles %s and %s are identical.\n", files[0], files[1]);
return_code = 0;
}
End:
ClearAnimatedImage(&images[0]);
ClearAnimatedImage(&images[1]);
return return_code;
}

View File

@ -1,773 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Utilities for animated images
#include "./anim_util.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#ifdef WEBP_HAVE_GIF
#include <gif_lib.h>
#endif
#include "webp/format_constants.h"
#include "webp/decode.h"
#include "webp/demux.h"
#include "../imageio/imageio_util.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf
#endif
static const int kNumChannels = 4;
// -----------------------------------------------------------------------------
// Common utilities.
// Returns true if the frame covers the full canvas.
static int IsFullFrame(int width, int height,
int canvas_width, int canvas_height) {
return (width == canvas_width && height == canvas_height);
}
static int CheckSizeForOverflow(uint64_t size) {
return (size == (size_t)size);
}
static int AllocateFrames(AnimatedImage* const image, uint32_t num_frames) {
uint32_t i;
uint8_t* mem = NULL;
DecodedFrame* frames = NULL;
const uint64_t rgba_size =
(uint64_t)image->canvas_width * kNumChannels * image->canvas_height;
const uint64_t total_size = (uint64_t)num_frames * rgba_size * sizeof(*mem);
const uint64_t total_frame_size = (uint64_t)num_frames * sizeof(*frames);
if (!CheckSizeForOverflow(total_size) ||
!CheckSizeForOverflow(total_frame_size)) {
return 0;
}
mem = (uint8_t*)malloc((size_t)total_size);
frames = (DecodedFrame*)malloc((size_t)total_frame_size);
if (mem == NULL || frames == NULL) {
free(mem);
free(frames);
return 0;
}
free(image->raw_mem);
image->num_frames = num_frames;
image->frames = frames;
for (i = 0; i < num_frames; ++i) {
frames[i].rgba = mem + i * rgba_size;
frames[i].duration = 0;
frames[i].is_key_frame = 0;
}
image->raw_mem = mem;
return 1;
}
void ClearAnimatedImage(AnimatedImage* const image) {
if (image != NULL) {
free(image->raw_mem);
free(image->frames);
image->num_frames = 0;
image->frames = NULL;
image->raw_mem = NULL;
}
}
// Clear the canvas to transparent.
static void ZeroFillCanvas(uint8_t* rgba,
uint32_t canvas_width, uint32_t canvas_height) {
memset(rgba, 0, canvas_width * kNumChannels * canvas_height);
}
// Clear given frame rectangle to transparent.
static void ZeroFillFrameRect(uint8_t* rgba, int rgba_stride, int x_offset,
int y_offset, int width, int height) {
int j;
assert(width * kNumChannels <= rgba_stride);
rgba += y_offset * rgba_stride + x_offset * kNumChannels;
for (j = 0; j < height; ++j) {
memset(rgba, 0, width * kNumChannels);
rgba += rgba_stride;
}
}
// Copy width * height pixels from 'src' to 'dst'.
static void CopyCanvas(const uint8_t* src, uint8_t* dst,
uint32_t width, uint32_t height) {
assert(src != NULL && dst != NULL);
memcpy(dst, src, width * kNumChannels * height);
}
// Copy pixels in the given rectangle from 'src' to 'dst' honoring the 'stride'.
static void CopyFrameRectangle(const uint8_t* src, uint8_t* dst, int stride,
int x_offset, int y_offset,
int width, int height) {
int j;
const int width_in_bytes = width * kNumChannels;
const size_t offset = y_offset * stride + x_offset * kNumChannels;
assert(width_in_bytes <= stride);
src += offset;
dst += offset;
for (j = 0; j < height; ++j) {
memcpy(dst, src, width_in_bytes);
src += stride;
dst += stride;
}
}
// Canonicalize all transparent pixels to transparent black to aid comparison.
static void CleanupTransparentPixels(uint32_t* rgba,
uint32_t width, uint32_t height) {
const uint32_t* const rgba_end = rgba + width * height;
while (rgba < rgba_end) {
const uint8_t alpha = (*rgba >> 24) & 0xff;
if (alpha == 0) {
*rgba = 0;
}
++rgba;
}
}
// Dump frame to a PAM file. Returns true on success.
static int DumpFrame(const char filename[], const char dump_folder[],
uint32_t frame_num, const uint8_t rgba[],
int canvas_width, int canvas_height) {
int ok = 0;
size_t max_len;
int y;
const char* base_name = NULL;
char* file_name = NULL;
FILE* f = NULL;
const char* row;
base_name = strrchr(filename, '/');
base_name = (base_name == NULL) ? filename : base_name + 1;
max_len = strlen(dump_folder) + 1 + strlen(base_name)
+ strlen("_frame_") + strlen(".pam") + 8;
file_name = (char*)malloc(max_len * sizeof(*file_name));
if (file_name == NULL) goto End;
if (snprintf(file_name, max_len, "%s/%s_frame_%d.pam",
dump_folder, base_name, frame_num) < 0) {
fprintf(stderr, "Error while generating file name\n");
goto End;
}
f = fopen(file_name, "wb");
if (f == NULL) {
fprintf(stderr, "Error opening file for writing: %s\n", file_name);
ok = 0;
goto End;
}
if (fprintf(f, "P7\nWIDTH %d\nHEIGHT %d\n"
"DEPTH 4\nMAXVAL 255\nTUPLTYPE RGB_ALPHA\nENDHDR\n",
canvas_width, canvas_height) < 0) {
fprintf(stderr, "Write error for file %s\n", file_name);
goto End;
}
row = (const char*)rgba;
for (y = 0; y < canvas_height; ++y) {
if (fwrite(row, canvas_width * kNumChannels, 1, f) != 1) {
fprintf(stderr, "Error writing to file: %s\n", file_name);
goto End;
}
row += canvas_width * kNumChannels;
}
ok = 1;
End:
if (f != NULL) fclose(f);
free(file_name);
return ok;
}
// -----------------------------------------------------------------------------
// WebP Decoding.
// Returns true if this is a valid WebP bitstream.
static int IsWebP(const WebPData* const webp_data) {
return (WebPGetInfo(webp_data->bytes, webp_data->size, NULL, NULL) != 0);
}
// Read animated WebP bitstream 'file_str' into 'AnimatedImage' struct.
static int ReadAnimatedWebP(const char filename[],
const WebPData* const webp_data,
AnimatedImage* const image, int dump_frames,
const char dump_folder[]) {
int ok = 0;
int dump_ok = 1;
uint32_t frame_index = 0;
int prev_frame_timestamp = 0;
WebPAnimDecoder* dec;
WebPAnimInfo anim_info;
memset(image, 0, sizeof(*image));
dec = WebPAnimDecoderNew(webp_data, NULL);
if (dec == NULL) {
fprintf(stderr, "Error parsing image: %s\n", filename);
goto End;
}
if (!WebPAnimDecoderGetInfo(dec, &anim_info)) {
fprintf(stderr, "Error getting global info about the animation\n");
goto End;
}
// Animation properties.
image->canvas_width = anim_info.canvas_width;
image->canvas_height = anim_info.canvas_height;
image->loop_count = anim_info.loop_count;
image->bgcolor = anim_info.bgcolor;
// Allocate frames.
if (!AllocateFrames(image, anim_info.frame_count)) return 0;
// Decode frames.
while (WebPAnimDecoderHasMoreFrames(dec)) {
DecodedFrame* curr_frame;
uint8_t* curr_rgba;
uint8_t* frame_rgba;
int timestamp;
if (!WebPAnimDecoderGetNext(dec, &frame_rgba, &timestamp)) {
fprintf(stderr, "Error decoding frame #%u\n", frame_index);
goto End;
}
assert(frame_index < anim_info.frame_count);
curr_frame = &image->frames[frame_index];
curr_rgba = curr_frame->rgba;
curr_frame->duration = timestamp - prev_frame_timestamp;
curr_frame->is_key_frame = 0; // Unused.
memcpy(curr_rgba, frame_rgba,
image->canvas_width * kNumChannels * image->canvas_height);
// Needed only because we may want to compare with GIF later.
CleanupTransparentPixels((uint32_t*)curr_rgba,
image->canvas_width, image->canvas_height);
if (dump_frames && dump_ok) {
dump_ok = DumpFrame(filename, dump_folder, frame_index, curr_rgba,
image->canvas_width, image->canvas_height);
if (!dump_ok) { // Print error once, but continue decode loop.
fprintf(stderr, "Error dumping frames to %s\n", dump_folder);
}
}
++frame_index;
prev_frame_timestamp = timestamp;
}
ok = dump_ok;
End:
WebPAnimDecoderDelete(dec);
return ok;
}
// -----------------------------------------------------------------------------
// GIF Decoding.
#ifdef WEBP_HAVE_GIF
// Returns true if this is a valid GIF bitstream.
static int IsGIF(const WebPData* const data) {
return data->size > GIF_STAMP_LEN &&
(!memcmp(GIF_STAMP, data->bytes, GIF_STAMP_LEN) ||
!memcmp(GIF87_STAMP, data->bytes, GIF_STAMP_LEN) ||
!memcmp(GIF89_STAMP, data->bytes, GIF_STAMP_LEN));
}
// GIFLIB_MAJOR is only defined in libgif >= 4.2.0.
#if defined(GIFLIB_MAJOR) && defined(GIFLIB_MINOR)
# define LOCAL_GIF_VERSION ((GIFLIB_MAJOR << 8) | GIFLIB_MINOR)
# define LOCAL_GIF_PREREQ(maj, min) \
(LOCAL_GIF_VERSION >= (((maj) << 8) | (min)))
#else
# define LOCAL_GIF_VERSION 0
# define LOCAL_GIF_PREREQ(maj, min) 0
#endif
#if !LOCAL_GIF_PREREQ(5, 0)
// Added in v5.0
typedef struct {
int DisposalMode;
#define DISPOSAL_UNSPECIFIED 0 // No disposal specified
#define DISPOSE_DO_NOT 1 // Leave image in place
#define DISPOSE_BACKGROUND 2 // Set area to background color
#define DISPOSE_PREVIOUS 3 // Restore to previous content
int UserInputFlag; // User confirmation required before disposal
int DelayTime; // Pre-display delay in 0.01sec units
int TransparentColor; // Palette index for transparency, -1 if none
#define NO_TRANSPARENT_COLOR -1
} GraphicsControlBlock;
static int DGifExtensionToGCB(const size_t GifExtensionLength,
const GifByteType* GifExtension,
GraphicsControlBlock* gcb) {
if (GifExtensionLength != 4) {
return GIF_ERROR;
}
gcb->DisposalMode = (GifExtension[0] >> 2) & 0x07;
gcb->UserInputFlag = (GifExtension[0] & 0x02) != 0;
gcb->DelayTime = GifExtension[1] | (GifExtension[2] << 8);
if (GifExtension[0] & 0x01) {
gcb->TransparentColor = (int)GifExtension[3];
} else {
gcb->TransparentColor = NO_TRANSPARENT_COLOR;
}
return GIF_OK;
}
static int DGifSavedExtensionToGCB(GifFileType* GifFile, int ImageIndex,
GraphicsControlBlock* gcb) {
int i;
if (ImageIndex < 0 || ImageIndex > GifFile->ImageCount - 1) {
return GIF_ERROR;
}
gcb->DisposalMode = DISPOSAL_UNSPECIFIED;
gcb->UserInputFlag = 0;
gcb->DelayTime = 0;
gcb->TransparentColor = NO_TRANSPARENT_COLOR;
for (i = 0; i < GifFile->SavedImages[ImageIndex].ExtensionBlockCount; i++) {
ExtensionBlock* ep = &GifFile->SavedImages[ImageIndex].ExtensionBlocks[i];
if (ep->Function == GRAPHICS_EXT_FUNC_CODE) {
return DGifExtensionToGCB(
ep->ByteCount, (const GifByteType*)ep->Bytes, gcb);
}
}
return GIF_ERROR;
}
#define CONTINUE_EXT_FUNC_CODE 0x00
// Signature was changed in v5.0
#define DGifOpenFileName(a, b) DGifOpenFileName(a)
#endif // !LOCAL_GIF_PREREQ(5, 0)
// Signature changed in v5.1
#if !LOCAL_GIF_PREREQ(5, 1)
#define DGifCloseFile(a, b) DGifCloseFile(a)
#endif
static void GIFDisplayError(const GifFileType* const gif, int gif_error) {
// libgif 4.2.0 has retired PrintGifError() and added GifErrorString().
#if LOCAL_GIF_PREREQ(4, 2)
#if LOCAL_GIF_PREREQ(5, 0)
const char* error_str =
GifErrorString((gif == NULL) ? gif_error : gif->Error);
#else
const char* error_str = GifErrorString();
(void)gif;
#endif
if (error_str == NULL) error_str = "Unknown error";
fprintf(stderr, "GIFLib Error %d: %s\n", gif_error, error_str);
#else
(void)gif;
fprintf(stderr, "GIFLib Error %d: ", gif_error);
PrintGifError();
fprintf(stderr, "\n");
#endif
}
static int IsKeyFrameGIF(const GifImageDesc* prev_desc, int prev_dispose,
const DecodedFrame* const prev_frame,
int canvas_width, int canvas_height) {
if (prev_frame == NULL) return 1;
if (prev_dispose == DISPOSE_BACKGROUND) {
if (IsFullFrame(prev_desc->Width, prev_desc->Height,
canvas_width, canvas_height)) {
return 1;
}
if (prev_frame->is_key_frame) return 1;
}
return 0;
}
static int GetTransparentIndexGIF(GifFileType* gif) {
GraphicsControlBlock first_gcb;
memset(&first_gcb, 0, sizeof(first_gcb));
DGifSavedExtensionToGCB(gif, 0, &first_gcb);
return first_gcb.TransparentColor;
}
static uint32_t GetBackgroundColorGIF(GifFileType* gif) {
const int transparent_index = GetTransparentIndexGIF(gif);
const ColorMapObject* const color_map = gif->SColorMap;
if (transparent_index != NO_TRANSPARENT_COLOR &&
gif->SBackGroundColor == transparent_index) {
return 0x00000000; // Special case: transparent black.
} else if (color_map == NULL || color_map->Colors == NULL
|| gif->SBackGroundColor >= color_map->ColorCount) {
return 0xffffffff; // Invalid: assume white.
} else {
const GifColorType color = color_map->Colors[gif->SBackGroundColor];
return (0xff << 24) |
(color.Red << 16) |
(color.Green << 8) |
(color.Blue << 0);
}
}
// Find appropriate app extension and get loop count from the next extension.
static uint32_t GetLoopCountGIF(const GifFileType* const gif) {
int i;
for (i = 0; i < gif->ImageCount; ++i) {
const SavedImage* const image = &gif->SavedImages[i];
int j;
for (j = 0; (j + 1) < image->ExtensionBlockCount; ++j) {
const ExtensionBlock* const eb1 = image->ExtensionBlocks + j;
const ExtensionBlock* const eb2 = image->ExtensionBlocks + j + 1;
const char* const signature = (const char*)eb1->Bytes;
const int signature_is_ok =
(eb1->Function == APPLICATION_EXT_FUNC_CODE) &&
(eb1->ByteCount == 11) &&
(!memcmp(signature, "NETSCAPE2.0", 11) ||
!memcmp(signature, "ANIMEXTS1.0", 11));
if (signature_is_ok &&
eb2->Function == CONTINUE_EXT_FUNC_CODE && eb2->ByteCount >= 3 &&
eb2->Bytes[0] == 1) {
return ((uint32_t)(eb2->Bytes[2]) << 8) +
((uint32_t)(eb2->Bytes[1]) << 0);
}
}
}
return 0; // Default.
}
// Get duration of 'n'th frame in milliseconds.
static int GetFrameDurationGIF(GifFileType* gif, int n) {
GraphicsControlBlock gcb;
memset(&gcb, 0, sizeof(gcb));
DGifSavedExtensionToGCB(gif, n, &gcb);
return gcb.DelayTime * 10;
}
// Returns true if frame 'target' completely covers 'covered'.
static int CoversFrameGIF(const GifImageDesc* const target,
const GifImageDesc* const covered) {
return target->Left <= covered->Left &&
covered->Left + covered->Width <= target->Left + target->Width &&
target->Top <= covered->Top &&
covered->Top + covered->Height <= target->Top + target->Height;
}
static void RemapPixelsGIF(const uint8_t* const src,
const ColorMapObject* const cmap,
int transparent_color, int len, uint8_t* dst) {
int i;
for (i = 0; i < len; ++i) {
if (src[i] != transparent_color) {
// If a pixel in the current frame is transparent, we don't modify it, so
// that we can see-through the corresponding pixel from an earlier frame.
const GifColorType c = cmap->Colors[src[i]];
dst[4 * i + 0] = c.Red;
dst[4 * i + 1] = c.Green;
dst[4 * i + 2] = c.Blue;
dst[4 * i + 3] = 0xff;
}
}
}
static int ReadFrameGIF(const SavedImage* const gif_image,
const ColorMapObject* cmap, int transparent_color,
int out_stride, uint8_t* const dst) {
const GifImageDesc* image_desc = &gif_image->ImageDesc;
const uint8_t* in;
uint8_t* out;
int j;
if (image_desc->ColorMap) cmap = image_desc->ColorMap;
if (cmap == NULL || cmap->ColorCount != (1 << cmap->BitsPerPixel)) {
fprintf(stderr, "Potentially corrupt color map.\n");
return 0;
}
in = (const uint8_t*)gif_image->RasterBits;
out = dst + image_desc->Top * out_stride + image_desc->Left * kNumChannels;
for (j = 0; j < image_desc->Height; ++j) {
RemapPixelsGIF(in, cmap, transparent_color, image_desc->Width, out);
in += image_desc->Width;
out += out_stride;
}
return 1;
}
// Read animated GIF bitstream from 'filename' into 'AnimatedImage' struct.
static int ReadAnimatedGIF(const char filename[], AnimatedImage* const image,
int dump_frames, const char dump_folder[]) {
uint32_t frame_count;
uint32_t canvas_width, canvas_height;
uint32_t i;
int gif_error;
GifFileType* gif;
gif = DGifOpenFileName(filename, NULL);
if (gif == NULL) {
fprintf(stderr, "Could not read file: %s.\n", filename);
return 0;
}
gif_error = DGifSlurp(gif);
if (gif_error != GIF_OK) {
fprintf(stderr, "Could not parse image: %s.\n", filename);
GIFDisplayError(gif, gif_error);
DGifCloseFile(gif, NULL);
return 0;
}
// Animation properties.
image->canvas_width = (uint32_t)gif->SWidth;
image->canvas_height = (uint32_t)gif->SHeight;
if (image->canvas_width > MAX_CANVAS_SIZE ||
image->canvas_height > MAX_CANVAS_SIZE) {
fprintf(stderr, "Invalid canvas dimension: %d x %d\n",
image->canvas_width, image->canvas_height);
DGifCloseFile(gif, NULL);
return 0;
}
image->loop_count = GetLoopCountGIF(gif);
image->bgcolor = GetBackgroundColorGIF(gif);
frame_count = (uint32_t)gif->ImageCount;
if (frame_count == 0) {
DGifCloseFile(gif, NULL);
return 0;
}
if (image->canvas_width == 0 || image->canvas_height == 0) {
image->canvas_width = gif->SavedImages[0].ImageDesc.Width;
image->canvas_height = gif->SavedImages[0].ImageDesc.Height;
gif->SavedImages[0].ImageDesc.Left = 0;
gif->SavedImages[0].ImageDesc.Top = 0;
if (image->canvas_width == 0 || image->canvas_height == 0) {
fprintf(stderr, "Invalid canvas size in GIF.\n");
DGifCloseFile(gif, NULL);
return 0;
}
}
// Allocate frames.
AllocateFrames(image, frame_count);
canvas_width = image->canvas_width;
canvas_height = image->canvas_height;
// Decode and reconstruct frames.
for (i = 0; i < frame_count; ++i) {
const int canvas_width_in_bytes = canvas_width * kNumChannels;
const SavedImage* const curr_gif_image = &gif->SavedImages[i];
GraphicsControlBlock curr_gcb;
DecodedFrame* curr_frame;
uint8_t* curr_rgba;
memset(&curr_gcb, 0, sizeof(curr_gcb));
DGifSavedExtensionToGCB(gif, i, &curr_gcb);
curr_frame = &image->frames[i];
curr_rgba = curr_frame->rgba;
curr_frame->duration = GetFrameDurationGIF(gif, i);
if (i == 0) { // Initialize as transparent.
curr_frame->is_key_frame = 1;
ZeroFillCanvas(curr_rgba, canvas_width, canvas_height);
} else {
DecodedFrame* const prev_frame = &image->frames[i - 1];
const GifImageDesc* const prev_desc = &gif->SavedImages[i - 1].ImageDesc;
GraphicsControlBlock prev_gcb;
memset(&prev_gcb, 0, sizeof(prev_gcb));
DGifSavedExtensionToGCB(gif, i - 1, &prev_gcb);
curr_frame->is_key_frame =
IsKeyFrameGIF(prev_desc, prev_gcb.DisposalMode, prev_frame,
canvas_width, canvas_height);
if (curr_frame->is_key_frame) { // Initialize as transparent.
ZeroFillCanvas(curr_rgba, canvas_width, canvas_height);
} else {
int prev_frame_disposed, curr_frame_opaque;
int prev_frame_completely_covered;
// Initialize with previous canvas.
uint8_t* const prev_rgba = image->frames[i - 1].rgba;
CopyCanvas(prev_rgba, curr_rgba, canvas_width, canvas_height);
// Dispose previous frame rectangle.
prev_frame_disposed =
(prev_gcb.DisposalMode == DISPOSE_BACKGROUND ||
prev_gcb.DisposalMode == DISPOSE_PREVIOUS);
curr_frame_opaque =
(curr_gcb.TransparentColor == NO_TRANSPARENT_COLOR);
prev_frame_completely_covered =
curr_frame_opaque &&
CoversFrameGIF(&curr_gif_image->ImageDesc, prev_desc);
if (prev_frame_disposed && !prev_frame_completely_covered) {
switch (prev_gcb.DisposalMode) {
case DISPOSE_BACKGROUND: {
ZeroFillFrameRect(curr_rgba, canvas_width_in_bytes,
prev_desc->Left, prev_desc->Top,
prev_desc->Width, prev_desc->Height);
break;
}
case DISPOSE_PREVIOUS: {
int src_frame_num = i - 2;
while (src_frame_num >= 0) {
GraphicsControlBlock src_frame_gcb;
memset(&src_frame_gcb, 0, sizeof(src_frame_gcb));
DGifSavedExtensionToGCB(gif, src_frame_num, &src_frame_gcb);
if (src_frame_gcb.DisposalMode != DISPOSE_PREVIOUS) break;
--src_frame_num;
}
if (src_frame_num >= 0) {
// Restore pixels inside previous frame rectangle to
// corresponding pixels in source canvas.
uint8_t* const src_frame_rgba =
image->frames[src_frame_num].rgba;
CopyFrameRectangle(src_frame_rgba, curr_rgba,
canvas_width_in_bytes,
prev_desc->Left, prev_desc->Top,
prev_desc->Width, prev_desc->Height);
} else {
// Source canvas doesn't exist. So clear previous frame
// rectangle to background.
ZeroFillFrameRect(curr_rgba, canvas_width_in_bytes,
prev_desc->Left, prev_desc->Top,
prev_desc->Width, prev_desc->Height);
}
break;
}
default:
break; // Nothing to do.
}
}
}
}
// Decode current frame.
if (!ReadFrameGIF(curr_gif_image, gif->SColorMap, curr_gcb.TransparentColor,
canvas_width_in_bytes, curr_rgba)) {
DGifCloseFile(gif, NULL);
return 0;
}
if (dump_frames) {
if (!DumpFrame(filename, dump_folder, i, curr_rgba,
canvas_width, canvas_height)) {
DGifCloseFile(gif, NULL);
return 0;
}
}
}
DGifCloseFile(gif, NULL);
return 1;
}
#else
static int IsGIF(const WebPData* const data) {
(void)data;
return 0;
}
static int ReadAnimatedGIF(const char filename[], AnimatedImage* const image,
int dump_frames, const char dump_folder[]) {
(void)filename;
(void)image;
(void)dump_frames;
(void)dump_folder;
fprintf(stderr, "GIF support not compiled. Please install the libgif-dev "
"package before building.\n");
return 0;
}
#endif // WEBP_HAVE_GIF
// -----------------------------------------------------------------------------
int ReadAnimatedImage(const char filename[], AnimatedImage* const image,
int dump_frames, const char dump_folder[]) {
int ok = 0;
WebPData webp_data;
WebPDataInit(&webp_data);
memset(image, 0, sizeof(*image));
if (!ImgIoUtilReadFile(filename, &webp_data.bytes, &webp_data.size)) {
fprintf(stderr, "Error reading file: %s\n", filename);
return 0;
}
if (IsWebP(&webp_data)) {
ok = ReadAnimatedWebP(filename, &webp_data, image, dump_frames,
dump_folder);
} else if (IsGIF(&webp_data)) {
ok = ReadAnimatedGIF(filename, image, dump_frames, dump_folder);
} else {
fprintf(stderr,
"Unknown file type: %s. Supported file types are WebP and GIF\n",
filename);
ok = 0;
}
if (!ok) ClearAnimatedImage(image);
WebPDataClear(&webp_data);
return ok;
}
static void Accumulate(double v1, double v2, double* const max_diff,
double* const sse) {
const double diff = fabs(v1 - v2);
if (diff > *max_diff) *max_diff = diff;
*sse += diff * diff;
}
void GetDiffAndPSNR(const uint8_t rgba1[], const uint8_t rgba2[],
uint32_t width, uint32_t height, int premultiply,
int* const max_diff, double* const psnr) {
const uint32_t stride = width * kNumChannels;
const int kAlphaChannel = kNumChannels - 1;
double f_max_diff = 0.;
double sse = 0.;
uint32_t x, y;
for (y = 0; y < height; ++y) {
for (x = 0; x < stride; x += kNumChannels) {
int k;
const size_t offset = (size_t)y * stride + x;
const int alpha1 = rgba1[offset + kAlphaChannel];
const int alpha2 = rgba2[offset + kAlphaChannel];
Accumulate(alpha1, alpha2, &f_max_diff, &sse);
if (!premultiply) {
for (k = 0; k < kAlphaChannel; ++k) {
Accumulate(rgba1[offset + k], rgba2[offset + k], &f_max_diff, &sse);
}
} else {
// premultiply R/G/B channels with alpha value
for (k = 0; k < kAlphaChannel; ++k) {
Accumulate(rgba1[offset + k] * alpha1 / 255.,
rgba2[offset + k] * alpha2 / 255.,
&f_max_diff, &sse);
}
}
}
}
*max_diff = (int)f_max_diff;
if (*max_diff == 0) {
*psnr = 99.; // PSNR when images are identical.
} else {
sse /= stride * height;
*psnr = 4.3429448 * log(255. * 255. / sse);
}
}

View File

@ -1,63 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Utilities for animated images
#ifndef WEBP_EXAMPLES_ANIM_UTIL_H_
#define WEBP_EXAMPLES_ANIM_UTIL_H_
#ifdef HAVE_CONFIG_H
#include "webp/config.h"
#endif
#include "webp/types.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
uint8_t* rgba; // Decoded and reconstructed full frame.
int duration; // Frame duration in milliseconds.
int is_key_frame; // True if this frame is a key-frame.
} DecodedFrame;
typedef struct {
uint32_t canvas_width;
uint32_t canvas_height;
uint32_t bgcolor;
uint32_t loop_count;
DecodedFrame* frames;
uint32_t num_frames;
void* raw_mem;
} AnimatedImage;
// Deallocate everything in 'image' (but not the object itself).
void ClearAnimatedImage(AnimatedImage* const image);
// Read animated image file into 'AnimatedImage' struct.
// If 'dump_frames' is true, dump frames to 'dump_folder'.
// Previous content of 'image' is obliterated.
// Upon successful return, content of 'image' must be deleted by
// calling 'ClearAnimatedImage'.
int ReadAnimatedImage(const char filename[], AnimatedImage* const image,
int dump_frames, const char dump_folder[]);
// Given two RGBA buffers, calculate max pixel difference and PSNR.
// If 'premultiply' is true, R/G/B values will be pre-multiplied by the
// transparency before comparison.
void GetDiffAndPSNR(const uint8_t rgba1[], const uint8_t rgba2[],
uint32_t width, uint32_t height, int premultiply,
int* const max_diff, double* const psnr);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_EXAMPLES_ANIM_UTIL_H_

View File

@ -20,12 +20,18 @@
#include "webp/config.h"
#endif
#include "../examples/example_util.h"
#include "../imageio/image_dec.h"
#include "../imageio/imageio_util.h"
#include "./stopwatch.h"
#include "webp/encode.h"
#include "./example_util.h"
#include "./metadata.h"
#include "./stopwatch.h"
#include "./jpegdec.h"
#include "./pngdec.h"
#include "./tiffdec.h"
#include "./webpdec.h"
#include "./wicdec.h"
#ifndef WEBP_DLL
#ifdef __cplusplus
extern "C" {
@ -42,81 +48,127 @@ extern void* VP8GetCPUInfo; // opaque forward declaration.
static int verbose = 0;
static int ReadYUV(const uint8_t* const data, size_t data_size,
WebPPicture* const pic) {
static int ReadYUV(FILE* in_file, WebPPicture* const pic) {
const int use_argb = pic->use_argb;
const int uv_width = (pic->width + 1) / 2;
const int uv_height = (pic->height + 1) / 2;
const int y_plane_size = pic->width * pic->height;
const int uv_plane_size = uv_width * uv_height;
const size_t expected_data_size = y_plane_size + 2 * uv_plane_size;
if (data_size != expected_data_size) {
fprintf(stderr,
"input data doesn't have the expected size (%d instead of %d)\n",
(int)data_size, (int)expected_data_size);
return 0;
}
int y;
int ok = 0;
pic->use_argb = 0;
if (!WebPPictureAlloc(pic)) return 0;
ImgIoUtilCopyPlane(data, pic->width, pic->y, pic->y_stride,
pic->width, pic->height);
ImgIoUtilCopyPlane(data + y_plane_size, uv_width,
pic->u, pic->uv_stride, uv_width, uv_height);
ImgIoUtilCopyPlane(data + y_plane_size + uv_plane_size, uv_width,
pic->v, pic->uv_stride, uv_width, uv_height);
return use_argb ? WebPPictureYUVAToARGB(pic) : 1;
if (!WebPPictureAlloc(pic)) return ok;
for (y = 0; y < pic->height; ++y) {
if (fread(pic->y + y * pic->y_stride, pic->width, 1, in_file) != 1) {
goto End;
}
}
for (y = 0; y < uv_height; ++y) {
if (fread(pic->u + y * pic->uv_stride, uv_width, 1, in_file) != 1)
goto End;
}
for (y = 0; y < uv_height; ++y) {
if (fread(pic->v + y * pic->uv_stride, uv_width, 1, in_file) != 1)
goto End;
}
ok = 1;
if (use_argb) ok = WebPPictureYUVAToARGB(pic);
End:
return ok;
}
#ifdef HAVE_WINCODEC_H
static int ReadPicture(const char* const filename, WebPPicture* const pic,
int keep_alpha, Metadata* const metadata) {
int ok = 0;
const uint8_t* data = NULL;
size_t data_size = 0;
int ok;
if (pic->width != 0 && pic->height != 0) {
ok = ImgIoUtilReadFile(filename, &data, &data_size);
ok = ok && ReadYUV(data, data_size, pic);
// If image size is specified, infer it as YUV format.
FILE* in_file = fopen(filename, "rb");
if (in_file == NULL) {
fprintf(stderr, "Error! Cannot open input file '%s'\n", filename);
return 0;
}
ok = ReadYUV(in_file, pic);
fclose(in_file);
} else {
// If no size specified, try to decode it using WIC.
ok = ReadPictureWithWIC(filename, pic, keep_alpha, metadata);
if (!ok) {
ok = ImgIoUtilReadFile(filename, &data, &data_size);
ok = ok && ReadWebP(data, data_size, pic, keep_alpha, metadata);
ok = ReadWebP(filename, pic, keep_alpha, metadata);
}
}
if (!ok) {
fprintf(stderr, "Error! Could not process file %s\n", filename);
}
free((void*)data);
return ok;
}
#else // !HAVE_WINCODEC_H
typedef enum {
PNG_ = 0,
JPEG_,
TIFF_, // 'TIFF' clashes with libtiff
WEBP_,
UNSUPPORTED
} InputFileFormat;
static InputFileFormat GetImageType(FILE* in_file) {
InputFileFormat format = UNSUPPORTED;
uint32_t magic1, magic2;
uint8_t buf[12];
if ((fread(&buf[0], 12, 1, in_file) != 1) ||
(fseek(in_file, 0, SEEK_SET) != 0)) {
return format;
}
magic1 = ((uint32_t)buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
magic2 = ((uint32_t)buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
if (magic1 == 0x89504E47U) {
format = PNG_;
} else if (magic1 >= 0xFFD8FF00U && magic1 <= 0xFFD8FFFFU) {
format = JPEG_;
} else if (magic1 == 0x49492A00 || magic1 == 0x4D4D002A) {
format = TIFF_;
} else if (magic1 == 0x52494646 && magic2 == 0x57454250) {
format = WEBP_;
}
return format;
}
static int ReadPicture(const char* const filename, WebPPicture* const pic,
int keep_alpha, Metadata* const metadata) {
const uint8_t* data = NULL;
size_t data_size = 0;
int ok = 0;
ok = ImgIoUtilReadFile(filename, &data, &data_size);
if (!ok) goto End;
FILE* in_file = fopen(filename, "rb");
if (in_file == NULL) {
fprintf(stderr, "Error! Cannot open input file '%s'\n", filename);
return ok;
}
if (pic->width == 0 || pic->height == 0) {
WebPImageReader reader = WebPGuessImageReader(data, data_size);
ok = reader(data, data_size, pic, keep_alpha, metadata);
// If no size specified, try to decode it as PNG/JPEG (as appropriate).
const InputFileFormat format = GetImageType(in_file);
if (format == PNG_) {
ok = ReadPNG(in_file, pic, keep_alpha, metadata);
} else if (format == JPEG_) {
ok = ReadJPEG(in_file, pic, metadata);
} else if (format == TIFF_) {
ok = ReadTIFF(filename, pic, keep_alpha, metadata);
} else if (format == WEBP_) {
ok = ReadWebP(filename, pic, keep_alpha, metadata);
}
} else {
// If image size is specified, infer it as YUV format.
ok = ReadYUV(data, data_size, pic);
ok = ReadYUV(in_file, pic);
}
End:
if (!ok) {
fprintf(stderr, "Error! Could not process file %s\n", filename);
}
free((void*)data);
fclose(in_file);
return ok;
}
@ -160,8 +212,6 @@ static void PrintFullLosslessInfo(const WebPAuxStats* const stats,
const char* const description) {
fprintf(stderr, "Lossless-%s compressed size: %d bytes\n",
description, stats->lossless_size);
fprintf(stderr, " * Header size: %d bytes, image data size: %d\n",
stats->lossless_hdr_size, stats->lossless_data_size);
if (stats->lossless_features) {
fprintf(stderr, " * Lossless features used:");
if (stats->lossless_features & 1) fprintf(stderr, " PREDICTION");
@ -297,10 +347,6 @@ static int DumpPicture(const WebPPicture* const picture, const char* PGM_name) {
const int uv_width = (picture->width + 1) / 2;
const int uv_height = (picture->height + 1) / 2;
const int stride = (picture->width + 1) & ~1;
const uint8_t* src_y = picture->y;
const uint8_t* src_u = picture->u;
const uint8_t* src_v = picture->v;
const uint8_t* src_a = picture->a;
const int alpha_height =
WebPPictureHasTransparency(picture) ? picture->height : 0;
const int height = picture->height + uv_height + alpha_height;
@ -308,20 +354,20 @@ static int DumpPicture(const WebPPicture* const picture, const char* PGM_name) {
if (f == NULL) return 0;
fprintf(f, "P5\n%d %d\n255\n", stride, height);
for (y = 0; y < picture->height; ++y) {
if (fwrite(src_y, picture->width, 1, f) != 1) return 0;
if (fwrite(picture->y + y * picture->y_stride, picture->width, 1, f) != 1)
return 0;
if (picture->width & 1) fputc(0, f); // pad
src_y += picture->y_stride;
}
for (y = 0; y < uv_height; ++y) {
if (fwrite(src_u, uv_width, 1, f) != 1) return 0;
if (fwrite(src_v, uv_width, 1, f) != 1) return 0;
src_u += picture->uv_stride;
src_v += picture->uv_stride;
if (fwrite(picture->u + y * picture->uv_stride, uv_width, 1, f) != 1)
return 0;
if (fwrite(picture->v + y * picture->uv_stride, uv_width, 1, f) != 1)
return 0;
}
for (y = 0; y < alpha_height; ++y) {
if (fwrite(src_a, picture->width, 1, f) != 1) return 0;
if (fwrite(picture->a + y * picture->a_stride, picture->width, 1, f) != 1)
return 0;
if (picture->width & 1) fputc(0, f); // pad
src_a += picture->a_stride;
}
fclose(f);
return 1;
@ -519,38 +565,33 @@ static void HelpLong(void) {
printf("Windows builds can take as input any of the files handled by WIC.\n");
#endif
printf("\nOptions:\n");
printf(" -h / -help ............. short help\n");
printf(" -H / -longhelp ......... long help\n");
printf(" -q <float> ............. quality factor (0:small..100:big), "
"default=75\n");
printf(" -alpha_q <int> ......... transparency-compression quality (0..100),"
"\n default=100\n");
printf(" -h / -help ............ short help\n");
printf(" -H / -longhelp ........ long help\n");
printf(" -q <float> ............. quality factor (0:small..100:big)\n");
printf(" -alpha_q <int> ......... transparency-compression quality "
"(0..100)\n");
printf(" -preset <string> ....... preset setting, one of:\n");
printf(" default, photo, picture,\n");
printf(" drawing, icon, text\n");
printf(" -preset must come first, as it overwrites other parameters\n");
#if WEBP_ENCODER_ABI_VERSION > 0x0202
printf(" -z <int> ............... activates lossless preset with given\n"
" level in [0:fast, ..., 9:slowest]\n");
#endif
printf("\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest), "
"default=4\n");
printf(" -segments <int> ........ number of segments to use (1..4), "
"default=4\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
printf(" -segments <int> ........ number of segments to use (1..4)\n");
printf(" -size <int> ............ target size (in bytes)\n");
printf(" -psnr <float> .......... target PSNR (in dB. typically: 42)\n");
printf("\n");
printf(" -s <int> <int> ......... input size (width x height) for YUV\n");
printf(" -sns <int> ............. spatial noise shaping (0:off, 100:max), "
"default=50\n");
printf(" -f <int> ............... filter strength (0=off..100), "
"default=60\n");
printf(" -sns <int> ............. spatial noise shaping (0:off, 100:max)\n");
printf(" -f <int> ............... filter strength (0=off..100)\n");
printf(" -sharpness <int> ....... "
"filter sharpness (0:most .. 7:least sharp), default=0\n");
"filter sharpness (0:most .. 7:least sharp)\n");
printf(" -strong ................ use strong filter instead "
"of simple (default)\n");
printf(" -nostrong .............. use simple filter instead of strong\n");
printf(" -sharp_yuv ............. use sharper (and slower) RGB->YUV "
"conversion\n");
printf(" -partition_limit <int> . limit quality to fit the 512k limit on\n");
printf(" "
"the first partition (0=no degradation ... 100=full)\n");
@ -564,24 +605,16 @@ static void HelpLong(void) {
printf(" -print_ssim ............ prints averaged SSIM distortion\n");
printf(" -print_lsim ............ prints local-similarity distortion\n");
printf(" -d <file.pgm> .......... dump the compressed output (PGM file)\n");
printf(" -alpha_method <int> .... transparency-compression method (0..1), "
"default=1\n");
printf(" -alpha_method <int> .... transparency-compression method (0..1)\n");
printf(" -alpha_filter <string> . predictive filtering for alpha plane,\n");
printf(" one of: none, fast (default) or best\n");
printf(" -exact ................. preserve RGB values in transparent area, "
"default=off\n");
printf(" -alpha_cleanup ......... clean RGB values in transparent area\n");
printf(" -blend_alpha <hex> ..... blend colors against background color\n"
" expressed as RGB values written in\n"
" hexadecimal, e.g. 0xc0e0d0 for red=0xc0\n"
" green=0xe0 and blue=0xd0\n");
printf(" -noalpha ............... discard any transparency information\n");
printf(" -lossless .............. encode image losslessly, default=off\n");
printf(" -near_lossless <int> ... use near-lossless image\n"
" preprocessing (0..100=off), "
"default=100\n");
#ifdef WEBP_EXPERIMENTAL_FEATURES /* not documented yet */
printf(" -delta_palette ......... use delta palettization\n");
#endif // WEBP_EXPERIMENTAL_FEATURES
printf(" -lossless .............. encode image losslessly\n");
printf(" -hint <string> ......... specify image characteristics hint,\n");
printf(" one of: photo, picture or graph\n");
@ -646,8 +679,10 @@ int main(int argc, const char *argv[]) {
uint32_t background_color = 0xffffffu;
int crop = 0, crop_x = 0, crop_y = 0, crop_w = 0, crop_h = 0;
int resize_w = 0, resize_h = 0;
#if WEBP_ENCODER_ABI_VERSION > 0x0202
int lossless_preset = 6;
int use_lossless_preset = -1; // -1=unset, 0=don't use, 1=use it
#endif
int show_progress = 0;
int keep_metadata = 0;
int metadata_written = 0;
@ -701,31 +736,25 @@ int main(int argc, const char *argv[]) {
} else if (!strcmp(argv[c], "-s") && c < argc - 2) {
picture.width = ExUtilGetInt(argv[++c], 0, &parse_error);
picture.height = ExUtilGetInt(argv[++c], 0, &parse_error);
if (picture.width > WEBP_MAX_DIMENSION || picture.width < 0 ||
picture.height > WEBP_MAX_DIMENSION || picture.height < 0) {
fprintf(stderr,
"Specified dimension (%d x %d) is out of range.\n",
picture.width, picture.height);
goto Error;
}
} else if (!strcmp(argv[c], "-m") && c < argc - 1) {
config.method = ExUtilGetInt(argv[++c], 0, &parse_error);
#if WEBP_ENCODER_ABI_VERSION > 0x0202
use_lossless_preset = 0; // disable -z option
#endif
} else if (!strcmp(argv[c], "-q") && c < argc - 1) {
config.quality = ExUtilGetFloat(argv[++c], &parse_error);
#if WEBP_ENCODER_ABI_VERSION > 0x0202
use_lossless_preset = 0; // disable -z option
} else if (!strcmp(argv[c], "-z") && c < argc - 1) {
lossless_preset = ExUtilGetInt(argv[++c], 0, &parse_error);
if (use_lossless_preset != 0) use_lossless_preset = 1;
#endif
} else if (!strcmp(argv[c], "-alpha_q") && c < argc - 1) {
config.alpha_quality = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-alpha_method") && c < argc - 1) {
config.alpha_compression = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-alpha_cleanup")) {
// This flag is obsolete, does opposite of -exact.
config.exact = 0;
} else if (!strcmp(argv[c], "-exact")) {
config.exact = 1;
keep_alpha = keep_alpha ? 2 : 0;
} else if (!strcmp(argv[c], "-blend_alpha") && c < argc - 1) {
blend_alpha = 1;
// background color is given in hex with an optional '0x' prefix
@ -747,14 +776,6 @@ int main(int argc, const char *argv[]) {
keep_alpha = 0;
} else if (!strcmp(argv[c], "-lossless")) {
config.lossless = 1;
} else if (!strcmp(argv[c], "-near_lossless") && c < argc - 1) {
config.near_lossless = ExUtilGetInt(argv[++c], 0, &parse_error);
config.lossless = 1; // use near-lossless only with lossless
#ifdef WEBP_EXPERIMENTAL_FEATURES
} else if (!strcmp(argv[c], "-delta_palette")) {
config.use_delta_palette = 1;
config.lossless = 1; // delta-palette is for lossless only
#endif // WEBP_EXPERIMENTAL_FEATURES
} else if (!strcmp(argv[c], "-hint") && c < argc - 1) {
++c;
if (!strcmp(argv[c], "photo")) {
@ -789,8 +810,6 @@ int main(int argc, const char *argv[]) {
config.filter_type = 0;
} else if (!strcmp(argv[c], "-sharpness") && c < argc - 1) {
config.filter_sharpness = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-sharp_yuv")) {
config.use_sharp_yuv = 1;
} else if (!strcmp(argv[c], "-pass") && c < argc - 1) {
config.pass = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-pre") && c < argc - 1) {
@ -817,7 +836,7 @@ int main(int argc, const char *argv[]) {
} else if (!strcmp(argv[c], "-version")) {
const int version = WebPGetEncoderVersion();
printf("%d.%d.%d\n",
(version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff);
(version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff);
return 0;
} else if (!strcmp(argv[c], "-progress")) {
show_progress = 1;
@ -916,12 +935,14 @@ int main(int argc, const char *argv[]) {
goto Error;
}
#if WEBP_ENCODER_ABI_VERSION > 0x0202
if (use_lossless_preset == 1) {
if (!WebPConfigLosslessPreset(&config, lossless_preset)) {
fprintf(stderr, "Invalid lossless preset (-z %d)\n", lossless_preset);
goto Error;
}
}
#endif
// Check for unsupported command line options for lossless mode and log
// warning for such options.
@ -935,23 +956,13 @@ int main(int argc, const char *argv[]) {
" encoding. Ignoring this option!\n");
}
}
// If a target size or PSNR was given, but somehow the -pass option was
// omitted, force a reasonable value.
if (config.target_size > 0 || config.target_PSNR > 0) {
if (config.pass == 1) config.pass = 6;
}
if (!WebPValidateConfig(&config)) {
fprintf(stderr, "Error! Invalid configuration.\n");
goto Error;
}
// Read the input. We need to decide if we prefer ARGB or YUVA
// samples, depending on the expected compression mode (this saves
// some conversion steps).
picture.use_argb = (config.lossless || config.use_sharp_yuv ||
config.preprocessing > 0 ||
crop || (resize_w | resize_h) > 0);
// Read the input
if (verbose) {
StopwatchReset(&stop_watch);
}
@ -966,6 +977,10 @@ int main(int argc, const char *argv[]) {
WebPBlendAlpha(&picture, background_color);
}
if (keep_alpha == 2) {
WebPCleanupTransparentArea(&picture);
}
if (verbose) {
const double read_time = StopwatchReadAndReset(&stop_watch);
fprintf(stderr, "Time to read input: %.3fs\n", read_time);
@ -974,7 +989,7 @@ int main(int argc, const char *argv[]) {
// Open the output
if (out_file != NULL) {
const int use_stdout = !strcmp(out_file, "-");
out = use_stdout ? ImgIoUtilSetBinaryMode(stdout) : fopen(out_file, "wb");
out = use_stdout ? ExUtilSetBinaryMode(stdout) : fopen(out_file, "wb");
if (out == NULL) {
fprintf(stderr, "Error! Cannot open output file '%s'\n", out_file);
goto Error;
@ -1002,7 +1017,7 @@ int main(int argc, const char *argv[]) {
picture.user_data = (void*)in_file;
}
// Crop & resize.
// Compress
if (verbose) {
StopwatchReset(&stop_watch);
}
@ -1019,22 +1034,12 @@ int main(int argc, const char *argv[]) {
goto Error;
}
}
if (verbose && (crop != 0 || (resize_w | resize_h) > 0)) {
const double preproc_time = StopwatchReadAndReset(&stop_watch);
fprintf(stderr, "Time to crop/resize picture: %.3fs\n", preproc_time);
}
if (picture.extra_info_type > 0) {
AllocExtraInfo(&picture);
}
if (print_distortion >= 0) { // Save original picture for later comparison
WebPPictureCopy(&picture, &original_picture);
}
// Compress.
if (verbose) {
StopwatchReset(&stop_watch);
}
if (!WebPEncode(&config, &picture)) {
fprintf(stderr, "Error! Cannot encode picture as WebP\n");
fprintf(stderr, "Error code: %d (%s)\n",
@ -1098,14 +1103,25 @@ int main(int argc, const char *argv[]) {
if (print_distortion >= 0) { // print distortion
static const char* distortion_names[] = { "PSNR", "SSIM", "LSIM" };
float values[5];
// Comparison is performed in YUVA colorspace.
if (original_picture.use_argb &&
!WebPPictureARGBToYUVA(&original_picture, WEBP_YUV420A)) {
fprintf(stderr, "Error while converting original picture to YUVA.\n");
goto Error;
}
if (picture.use_argb &&
!WebPPictureARGBToYUVA(&picture, WEBP_YUV420A)) {
fprintf(stderr, "Error while converting compressed picture to YUVA.\n");
goto Error;
}
if (!WebPPictureDistortion(&picture, &original_picture,
print_distortion, values)) {
fprintf(stderr, "Error while computing the distortion.\n");
goto Error;
}
if (!short_output) {
fprintf(stderr, "%s: ", distortion_names[print_distortion]);
fprintf(stderr, "B:%.2f G:%.2f R:%.2f A:%.2f Total:%.2f\n",
fprintf(stderr, "%s: Y:%.2f U:%.2f V:%.2f A:%.2f Total:%.2f\n",
distortion_names[print_distortion],
values[0], values[1], values[2], values[3], values[4]);
} else {
fprintf(stderr, "%7d %.4f\n", picture.stats->coded_size, values[4]);
@ -1118,7 +1134,11 @@ int main(int argc, const char *argv[]) {
return_value = 0;
Error:
#if WEBP_ENCODER_ABI_VERSION > 0x0203
WebPMemoryWriterClear(&memory_writer);
#else
free(memory_writer.mem);
#endif
free(picture.extra_info);
MetadataFree(&metadata);
WebPPictureFree(&picture);

View File

@ -20,13 +20,30 @@
#include "webp/config.h"
#endif
#include "../examples/example_util.h"
#include "../imageio/image_enc.h"
#include "../imageio/webpdec.h"
#ifdef WEBP_HAVE_PNG
#include <png.h>
#include <setjmp.h> // note: this must be included *after* png.h
#endif
#ifdef HAVE_WINCODEC_H
#ifdef __MINGW32__
#define INITGUID // Without this GUIDs are declared extern and fail to link
#endif
#define CINTERFACE
#define COBJMACROS
#define _WIN32_IE 0x500 // Workaround bug in shlwapi.h when compiling C++
// code with COBJMACROS.
#include <ole2.h> // CreateStreamOnHGlobal()
#include <shlwapi.h>
#include <windows.h>
#include <wincodec.h>
#endif
#include "webp/decode.h"
#include "./example_util.h"
#include "./stopwatch.h"
static int verbose = 0;
static int quiet = 0;
#ifndef WEBP_DLL
#ifdef __cplusplus
extern "C" {
@ -39,25 +56,463 @@ extern void* VP8GetCPUInfo; // opaque forward declaration.
#endif
#endif // WEBP_DLL
//------------------------------------------------------------------------------
// Output types
typedef enum {
PNG = 0,
PAM,
PPM,
PGM,
BMP,
TIFF,
YUV,
ALPHA_PLANE_ONLY // this is for experimenting only
} OutputFileFormat;
#ifdef HAVE_WINCODEC_H
#define IFS(fn) \
do { \
if (SUCCEEDED(hr)) { \
hr = (fn); \
if (FAILED(hr)) fprintf(stderr, #fn " failed %08lx\n", hr); \
} \
} while (0)
#ifdef __cplusplus
#define MAKE_REFGUID(x) (x)
#else
#define MAKE_REFGUID(x) &(x)
#endif
static HRESULT CreateOutputStream(const char* out_file_name,
int write_to_mem, IStream** stream) {
HRESULT hr = S_OK;
if (write_to_mem) {
// Output to a memory buffer. This is freed when 'stream' is released.
IFS(CreateStreamOnHGlobal(NULL, TRUE, stream));
} else {
IFS(SHCreateStreamOnFileA(out_file_name, STGM_WRITE | STGM_CREATE, stream));
}
if (FAILED(hr)) {
fprintf(stderr, "Error opening output file %s (%08lx)\n",
out_file_name, hr);
}
return hr;
}
static HRESULT WriteUsingWIC(const char* out_file_name, int use_stdout,
REFGUID container_guid,
uint8_t* rgb, int stride,
uint32_t width, uint32_t height, int has_alpha) {
HRESULT hr = S_OK;
IWICImagingFactory* factory = NULL;
IWICBitmapFrameEncode* frame = NULL;
IWICBitmapEncoder* encoder = NULL;
IStream* stream = NULL;
WICPixelFormatGUID pixel_format = has_alpha ? GUID_WICPixelFormat32bppBGRA
: GUID_WICPixelFormat24bppBGR;
IFS(CoInitialize(NULL));
IFS(CoCreateInstance(MAKE_REFGUID(CLSID_WICImagingFactory), NULL,
CLSCTX_INPROC_SERVER,
MAKE_REFGUID(IID_IWICImagingFactory),
(LPVOID*)&factory));
if (hr == REGDB_E_CLASSNOTREG) {
fprintf(stderr,
"Couldn't access Windows Imaging Component (are you running "
"Windows XP SP3 or newer?). PNG support not available. "
"Use -ppm or -pgm for available PPM and PGM formats.\n");
}
IFS(CreateOutputStream(out_file_name, use_stdout, &stream));
IFS(IWICImagingFactory_CreateEncoder(factory, container_guid, NULL,
&encoder));
IFS(IWICBitmapEncoder_Initialize(encoder, stream,
WICBitmapEncoderNoCache));
IFS(IWICBitmapEncoder_CreateNewFrame(encoder, &frame, NULL));
IFS(IWICBitmapFrameEncode_Initialize(frame, NULL));
IFS(IWICBitmapFrameEncode_SetSize(frame, width, height));
IFS(IWICBitmapFrameEncode_SetPixelFormat(frame, &pixel_format));
IFS(IWICBitmapFrameEncode_WritePixels(frame, height, stride,
height * stride, rgb));
IFS(IWICBitmapFrameEncode_Commit(frame));
IFS(IWICBitmapEncoder_Commit(encoder));
if (SUCCEEDED(hr) && use_stdout) {
HGLOBAL image;
IFS(GetHGlobalFromStream(stream, &image));
if (SUCCEEDED(hr)) {
HANDLE std_output = GetStdHandle(STD_OUTPUT_HANDLE);
DWORD mode;
const BOOL update_mode = GetConsoleMode(std_output, &mode);
const void* const image_mem = GlobalLock(image);
DWORD bytes_written = 0;
// Clear output processing if necessary, then output the image.
if (update_mode) SetConsoleMode(std_output, 0);
if (!WriteFile(std_output, image_mem, (DWORD)GlobalSize(image),
&bytes_written, NULL) ||
bytes_written != GlobalSize(image)) {
hr = E_FAIL;
}
if (update_mode) SetConsoleMode(std_output, mode);
GlobalUnlock(image);
}
}
if (frame != NULL) IUnknown_Release(frame);
if (encoder != NULL) IUnknown_Release(encoder);
if (factory != NULL) IUnknown_Release(factory);
if (stream != NULL) IUnknown_Release(stream);
return hr;
}
static int WritePNG(const char* out_file_name, int use_stdout,
const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const int has_alpha = (buffer->colorspace == MODE_BGRA);
return SUCCEEDED(WriteUsingWIC(out_file_name, use_stdout,
MAKE_REFGUID(GUID_ContainerFormatPng),
rgb, stride, width, height, has_alpha));
}
#elif defined(WEBP_HAVE_PNG) // !HAVE_WINCODEC_H
static void PNGAPI PNGErrorFunction(png_structp png, png_const_charp dummy) {
(void)dummy; // remove variable-unused warning
longjmp(png_jmpbuf(png), 1);
}
static int WritePNG(FILE* out_file, const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const int has_alpha = (buffer->colorspace == MODE_RGBA);
volatile png_structp png;
volatile png_infop info;
png_uint_32 y;
png = png_create_write_struct(PNG_LIBPNG_VER_STRING,
NULL, PNGErrorFunction, NULL);
if (png == NULL) {
return 0;
}
info = png_create_info_struct(png);
if (info == NULL) {
png_destroy_write_struct((png_structpp)&png, NULL);
return 0;
}
if (setjmp(png_jmpbuf(png))) {
png_destroy_write_struct((png_structpp)&png, (png_infopp)&info);
return 0;
}
png_init_io(png, out_file);
png_set_IHDR(png, info, width, height, 8,
has_alpha ? PNG_COLOR_TYPE_RGBA : PNG_COLOR_TYPE_RGB,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT);
png_write_info(png, info);
for (y = 0; y < height; ++y) {
png_bytep row = rgb + y * stride;
png_write_rows(png, &row, 1);
}
png_write_end(png, info);
png_destroy_write_struct((png_structpp)&png, (png_infopp)&info);
return 1;
}
#else // !HAVE_WINCODEC_H && !WEBP_HAVE_PNG
static int WritePNG(FILE* out_file, const WebPDecBuffer* const buffer) {
(void)out_file;
(void)buffer;
fprintf(stderr, "PNG support not compiled. Please install the libpng "
"development package before building.\n");
fprintf(stderr, "You can run with -ppm flag to decode in PPM format.\n");
return 0;
}
#endif
static int WritePPM(FILE* fout, const WebPDecBuffer* const buffer, int alpha) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const size_t bytes_per_px = alpha ? 4 : 3;
uint32_t y;
if (alpha) {
fprintf(fout, "P7\nWIDTH %u\nHEIGHT %u\nDEPTH 4\nMAXVAL 255\n"
"TUPLTYPE RGB_ALPHA\nENDHDR\n", width, height);
} else {
fprintf(fout, "P6\n%u %u\n255\n", width, height);
}
for (y = 0; y < height; ++y) {
if (fwrite(rgb + y * stride, width, bytes_per_px, fout) != bytes_per_px) {
return 0;
}
}
return 1;
}
static void PutLE16(uint8_t* const dst, uint32_t value) {
dst[0] = (value >> 0) & 0xff;
dst[1] = (value >> 8) & 0xff;
}
static void PutLE32(uint8_t* const dst, uint32_t value) {
PutLE16(dst + 0, (value >> 0) & 0xffff);
PutLE16(dst + 2, (value >> 16) & 0xffff);
}
#define BMP_HEADER_SIZE 54
static int WriteBMP(FILE* fout, const WebPDecBuffer* const buffer) {
const int has_alpha = (buffer->colorspace != MODE_BGR);
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* const rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint32_t bytes_per_px = has_alpha ? 4 : 3;
uint32_t y;
const uint32_t line_size = bytes_per_px * width;
const uint32_t bmp_stride = (line_size + 3) & ~3; // pad to 4
const uint32_t total_size = bmp_stride * height + BMP_HEADER_SIZE;
uint8_t bmp_header[BMP_HEADER_SIZE] = { 0 };
// bitmap file header
PutLE16(bmp_header + 0, 0x4d42); // signature 'BM'
PutLE32(bmp_header + 2, total_size); // size including header
PutLE32(bmp_header + 6, 0); // reserved
PutLE32(bmp_header + 10, BMP_HEADER_SIZE); // offset to pixel array
// bitmap info header
PutLE32(bmp_header + 14, 40); // DIB header size
PutLE32(bmp_header + 18, width); // dimensions
PutLE32(bmp_header + 22, -(int)height); // vertical flip!
PutLE16(bmp_header + 26, 1); // number of planes
PutLE16(bmp_header + 28, bytes_per_px * 8); // bits per pixel
PutLE32(bmp_header + 30, 0); // no compression (BI_RGB)
PutLE32(bmp_header + 34, 0); // image size (dummy)
PutLE32(bmp_header + 38, 2400); // x pixels/meter
PutLE32(bmp_header + 42, 2400); // y pixels/meter
PutLE32(bmp_header + 46, 0); // number of palette colors
PutLE32(bmp_header + 50, 0); // important color count
// TODO(skal): color profile
// write header
if (fwrite(bmp_header, sizeof(bmp_header), 1, fout) != 1) {
return 0;
}
// write pixel array
for (y = 0; y < height; ++y) {
if (fwrite(rgba + y * stride, line_size, 1, fout) != 1) {
return 0;
}
// write padding zeroes
if (bmp_stride != line_size) {
const uint8_t zeroes[3] = { 0 };
if (fwrite(zeroes, bmp_stride - line_size, 1, fout) != 1) {
return 0;
}
}
}
return 1;
}
#undef BMP_HEADER_SIZE
#define NUM_IFD_ENTRIES 15
#define EXTRA_DATA_SIZE 16
// 10b for signature/header + n * 12b entries + 4b for IFD terminator:
#define EXTRA_DATA_OFFSET (10 + 12 * NUM_IFD_ENTRIES + 4)
#define TIFF_HEADER_SIZE (EXTRA_DATA_OFFSET + EXTRA_DATA_SIZE)
static int WriteTIFF(FILE* fout, const WebPDecBuffer* const buffer) {
const int has_alpha = (buffer->colorspace != MODE_RGB);
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* const rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint8_t bytes_per_px = has_alpha ? 4 : 3;
// For non-alpha case, we omit tag 0x152 (ExtraSamples).
const uint8_t num_ifd_entries = has_alpha ? NUM_IFD_ENTRIES
: NUM_IFD_ENTRIES - 1;
uint8_t tiff_header[TIFF_HEADER_SIZE] = {
0x49, 0x49, 0x2a, 0x00, // little endian signature
8, 0, 0, 0, // offset to the unique IFD that follows
// IFD (offset = 8). Entries must be written in increasing tag order.
num_ifd_entries, 0, // Number of entries in the IFD (12 bytes each).
0x00, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 10: Width (TBD)
0x01, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 22: Height (TBD)
0x02, 0x01, 3, 0, bytes_per_px, 0, 0, 0, // 34: BitsPerSample: 8888
EXTRA_DATA_OFFSET + 0, 0, 0, 0,
0x03, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 46: Compression: none
0x06, 0x01, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, // 58: Photometric: RGB
0x11, 0x01, 4, 0, 1, 0, 0, 0, // 70: Strips offset:
TIFF_HEADER_SIZE, 0, 0, 0, // data follows header
0x12, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 82: Orientation: topleft
0x15, 0x01, 3, 0, 1, 0, 0, 0, // 94: SamplesPerPixels
bytes_per_px, 0, 0, 0,
0x16, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 106: Rows per strip (TBD)
0x17, 0x01, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 118: StripByteCount (TBD)
0x1a, 0x01, 5, 0, 1, 0, 0, 0, // 130: X-resolution
EXTRA_DATA_OFFSET + 8, 0, 0, 0,
0x1b, 0x01, 5, 0, 1, 0, 0, 0, // 142: Y-resolution
EXTRA_DATA_OFFSET + 8, 0, 0, 0,
0x1c, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 154: PlanarConfiguration
0x28, 0x01, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, // 166: ResolutionUnit (inch)
0x52, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 178: ExtraSamples: rgbA
0, 0, 0, 0, // 190: IFD terminator
// EXTRA_DATA_OFFSET:
8, 0, 8, 0, 8, 0, 8, 0, // BitsPerSample
72, 0, 0, 0, 1, 0, 0, 0 // 72 pixels/inch, for X/Y-resolution
};
uint32_t y;
// Fill placeholders in IFD:
PutLE32(tiff_header + 10 + 8, width);
PutLE32(tiff_header + 22 + 8, height);
PutLE32(tiff_header + 106 + 8, height);
PutLE32(tiff_header + 118 + 8, width * bytes_per_px * height);
if (!has_alpha) PutLE32(tiff_header + 178, 0); // IFD terminator
// write header
if (fwrite(tiff_header, sizeof(tiff_header), 1, fout) != 1) {
return 0;
}
// write pixel values
for (y = 0; y < height; ++y) {
if (fwrite(rgba + y * stride, bytes_per_px, width, fout) != width) {
return 0;
}
}
return 1;
}
#undef TIFF_HEADER_SIZE
#undef EXTRA_DATA_OFFSET
#undef EXTRA_DATA_SIZE
#undef NUM_IFD_ENTRIES
static int WriteAlphaPlane(FILE* fout, const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* const a = buffer->u.YUVA.a;
const int a_stride = buffer->u.YUVA.a_stride;
uint32_t y;
assert(a != NULL);
fprintf(fout, "P5\n%u %u\n255\n", width, height);
for (y = 0; y < height; ++y) {
if (fwrite(a + y * a_stride, width, 1, fout) != 1) {
return 0;
}
}
return 1;
}
// format=PGM: save a grayscale PGM file using the IMC4 layout
// (http://www.fourcc.org/yuv.php#IMC4). This is a very convenient format for
// viewing the samples, esp. for odd dimensions.
// format=YUV: just save the Y/U/V/A planes sequentially without header.
static int WritePGMOrYUV(FILE* fout, const WebPDecBuffer* const buffer,
OutputFileFormat format) {
const int width = buffer->width;
const int height = buffer->height;
const WebPYUVABuffer* const yuv = &buffer->u.YUVA;
int ok = 1;
int y;
const int pad = (format == YUV) ? 0 : 1;
const int uv_width = (width + 1) / 2;
const int uv_height = (height + 1) / 2;
const int out_stride = (width + pad) & ~pad;
const int a_height = yuv->a ? height : 0;
if (format == PGM) {
fprintf(fout, "P5\n%d %d\n255\n",
out_stride, height + uv_height + a_height);
}
for (y = 0; ok && y < height; ++y) {
ok &= (fwrite(yuv->y + y * yuv->y_stride, width, 1, fout) == 1);
if (format == PGM) {
if (width & 1) fputc(0, fout); // padding byte
}
}
if (format == PGM) { // IMC4 layout
for (y = 0; ok && y < uv_height; ++y) {
ok &= (fwrite(yuv->u + y * yuv->u_stride, uv_width, 1, fout) == 1);
ok &= (fwrite(yuv->v + y * yuv->v_stride, uv_width, 1, fout) == 1);
}
} else {
for (y = 0; ok && y < uv_height; ++y) {
ok &= (fwrite(yuv->u + y * yuv->u_stride, uv_width, 1, fout) == 1);
}
for (y = 0; ok && y < uv_height; ++y) {
ok &= (fwrite(yuv->v + y * yuv->v_stride, uv_width, 1, fout) == 1);
}
}
for (y = 0; ok && y < a_height; ++y) {
ok &= (fwrite(yuv->a + y * yuv->a_stride, width, 1, fout) == 1);
if (format == PGM) {
if (width & 1) fputc(0, fout); // padding byte
}
}
return ok;
}
static int SaveOutput(const WebPDecBuffer* const buffer,
WebPOutputFileFormat format, const char* const out_file) {
const int use_stdout = (out_file != NULL) && !strcmp(out_file, "-");
OutputFileFormat format, const char* const out_file) {
FILE* fout = NULL;
int needs_open_file = 1;
const int use_stdout = !strcmp(out_file, "-");
int ok = 1;
Stopwatch stop_watch;
if (verbose) {
StopwatchReset(&stop_watch);
}
ok = WebPSaveImage(buffer, format, out_file);
#ifdef HAVE_WINCODEC_H
needs_open_file = (format != PNG);
#endif
if (needs_open_file) {
fout = use_stdout ? ExUtilSetBinaryMode(stdout) : fopen(out_file, "wb");
if (fout == NULL) {
fprintf(stderr, "Error opening output file %s\n", out_file);
return 0;
}
}
if (format == PNG) {
#ifdef HAVE_WINCODEC_H
ok &= WritePNG(out_file, use_stdout, buffer);
#else
ok &= WritePNG(fout, buffer);
#endif
} else if (format == PAM) {
ok &= WritePPM(fout, buffer, 1);
} else if (format == PPM) {
ok &= WritePPM(fout, buffer, 0);
} else if (format == BMP) {
ok &= WriteBMP(fout, buffer);
} else if (format == TIFF) {
ok &= WriteTIFF(fout, buffer);
} else if (format == PGM || format == YUV) {
ok &= WritePGMOrYUV(fout, buffer, format);
} else if (format == ALPHA_PLANE_ONLY) {
ok &= WriteAlphaPlane(fout, buffer);
}
if (fout != NULL && fout != stdout) {
fclose(fout);
}
if (ok) {
if (!quiet) {
if (use_stdout) {
fprintf(stderr, "Saved to stdout\n");
} else {
fprintf(stderr, "Saved file %s\n", out_file);
}
if (use_stdout) {
fprintf(stderr, "Saved to stdout\n");
} else {
fprintf(stderr, "Saved file %s\n", out_file);
}
if (verbose) {
const double write_time = StopwatchReadAndReset(&stop_watch);
@ -86,21 +541,24 @@ static void Help(void) {
" -yuv ......... save the raw YUV samples in flat layout\n"
"\n"
" Other options are:\n"
" -version ..... print version number and exit\n"
" -version .... print version number and exit\n"
" -nofancy ..... don't use the fancy YUV420 upscaler\n"
" -nofilter .... disable in-loop filtering\n"
" -nodither .... disable dithering\n"
" -dither <d> .. dithering strength (in 0..100)\n"
#if WEBP_DECODER_ABI_VERSION > 0x0204
" -alpha_dither use alpha-plane dithering if needed\n"
#endif
" -mt .......... use multi-threading\n"
" -crop <x> <y> <w> <h> ... crop output with the given rectangle\n"
" -resize <w> <h> ......... scale the output (*after* any cropping)\n"
" -scale <w> <h> .......... scale the output (*after* any cropping)\n"
#if WEBP_DECODER_ABI_VERSION > 0x0203
" -flip ........ flip the output vertically\n"
#endif
" -alpha ....... only save the alpha plane\n"
" -incremental . use incremental decoding (useful for tests)\n"
" -h ........... this help message\n"
" -v ........... verbose (e.g. print encoding/decoding times)\n"
" -quiet ....... quiet mode, don't print anything\n"
" -h ....... this help message\n"
" -v ....... verbose (e.g. print encoding/decoding times)\n"
#ifndef WEBP_DLL
" -noasm ....... disable all assembly optimizations\n"
#endif
@ -111,70 +569,6 @@ static const char* const kFormatType[] = {
"unspecified", "lossy", "lossless"
};
static uint8_t* AllocateExternalBuffer(WebPDecoderConfig* config,
WebPOutputFileFormat format,
int use_external_memory) {
uint8_t* external_buffer = NULL;
WebPDecBuffer* const output_buffer = &config->output;
int w = config->input.width;
int h = config->input.height;
if (config->options.use_scaling) {
w = config->options.scaled_width;
h = config->options.scaled_height;
} else if (config->options.use_cropping) {
w = config->options.crop_width;
h = config->options.crop_height;
}
if (format >= RGB && format <= rgbA_4444) {
const int bpp = (format == RGB || format == BGR) ? 3
: (format == RGBA_4444 || format == rgbA_4444 ||
format == RGB_565) ? 2
: 4;
uint32_t stride = bpp * w + 7; // <- just for exercising
external_buffer = (uint8_t*)malloc(stride * h);
if (external_buffer == NULL) return NULL;
output_buffer->u.RGBA.stride = stride;
output_buffer->u.RGBA.size = stride * h;
output_buffer->u.RGBA.rgba = external_buffer;
} else { // YUV and YUVA
const int has_alpha = WebPIsAlphaMode(output_buffer->colorspace);
uint8_t* tmp;
uint32_t stride = w + 3;
uint32_t uv_stride = (w + 1) / 2 + 13;
uint32_t total_size = stride * h * (has_alpha ? 2 : 1)
+ 2 * uv_stride * (h + 1) / 2;
assert(format >= YUV && format <= YUVA);
external_buffer = (uint8_t*)malloc(total_size);
if (external_buffer == NULL) return NULL;
tmp = external_buffer;
output_buffer->u.YUVA.y = tmp;
output_buffer->u.YUVA.y_stride = stride;
output_buffer->u.YUVA.y_size = stride * h;
tmp += output_buffer->u.YUVA.y_size;
if (has_alpha) {
output_buffer->u.YUVA.a = tmp;
output_buffer->u.YUVA.a_stride = stride;
output_buffer->u.YUVA.a_size = stride * h;
tmp += output_buffer->u.YUVA.a_size;
} else {
output_buffer->u.YUVA.a = NULL;
output_buffer->u.YUVA.a_stride = 0;
}
output_buffer->u.YUVA.u = tmp;
output_buffer->u.YUVA.u_stride = uv_stride;
output_buffer->u.YUVA.u_size = uv_stride * (h + 1) / 2;
tmp += output_buffer->u.YUVA.u_size;
output_buffer->u.YUVA.v = tmp;
output_buffer->u.YUVA.v_stride = uv_stride;
output_buffer->u.YUVA.v_size = uv_stride * (h + 1) / 2;
tmp += output_buffer->u.YUVA.v_size;
assert(tmp <= external_buffer + total_size);
}
output_buffer->is_external_memory = use_external_memory;
return external_buffer;
}
int main(int argc, const char *argv[]) {
int ok = 0;
const char *in_file = NULL;
@ -183,11 +577,7 @@ int main(int argc, const char *argv[]) {
WebPDecoderConfig config;
WebPDecBuffer* const output_buffer = &config.output;
WebPBitstreamFeatures* const bitstream = &config.input;
WebPOutputFileFormat format = PNG;
uint8_t* external_buffer = NULL;
int use_external_memory = 0;
const uint8_t* data = NULL;
OutputFileFormat format = PNG;
int incremental = 0;
int c;
@ -217,8 +607,6 @@ int main(int argc, const char *argv[]) {
format = BMP;
} else if (!strcmp(argv[c], "-tiff")) {
format = TIFF;
} else if (!strcmp(argv[c], "-quiet")) {
quiet = 1;
} else if (!strcmp(argv[c], "-version")) {
const int version = WebPGetDecoderVersion();
printf("%d.%d.%d\n",
@ -227,36 +615,13 @@ int main(int argc, const char *argv[]) {
} else if (!strcmp(argv[c], "-pgm")) {
format = PGM;
} else if (!strcmp(argv[c], "-yuv")) {
format = RAW_YUV;
} else if (!strcmp(argv[c], "-pixel_format") && c < argc - 1) {
const char* const fmt = argv[++c];
if (!strcmp(fmt, "RGB")) format = RGB;
else if (!strcmp(fmt, "RGBA")) format = RGBA;
else if (!strcmp(fmt, "BGR")) format = BGR;
else if (!strcmp(fmt, "BGRA")) format = BGRA;
else if (!strcmp(fmt, "ARGB")) format = ARGB;
else if (!strcmp(fmt, "RGBA_4444")) format = RGBA_4444;
else if (!strcmp(fmt, "RGB_565")) format = RGB_565;
else if (!strcmp(fmt, "rgbA")) format = rgbA;
else if (!strcmp(fmt, "bgrA")) format = bgrA;
else if (!strcmp(fmt, "Argb")) format = Argb;
else if (!strcmp(fmt, "rgbA_4444")) format = rgbA_4444;
else if (!strcmp(fmt, "YUV")) format = YUV;
else if (!strcmp(fmt, "YUVA")) format = YUVA;
else {
fprintf(stderr, "Can't parse pixel_format %s\n", fmt);
parse_error = 1;
}
} else if (!strcmp(argv[c], "-external_memory") && c < argc - 1) {
use_external_memory = ExUtilGetInt(argv[++c], 0, &parse_error);
parse_error |= (use_external_memory > 2 || use_external_memory < 0);
if (parse_error) {
fprintf(stderr, "Can't parse 'external_memory' value %s\n", argv[c]);
}
format = YUV;
} else if (!strcmp(argv[c], "-mt")) {
config.options.use_threads = 1;
#if WEBP_DECODER_ABI_VERSION > 0x0204
} else if (!strcmp(argv[c], "-alpha_dither")) {
config.options.alpha_dithering_strength = 100;
#endif
} else if (!strcmp(argv[c], "-nodither")) {
config.options.dithering_strength = 0;
} else if (!strcmp(argv[c], "-dither") && c < argc - 1) {
@ -268,13 +633,14 @@ int main(int argc, const char *argv[]) {
config.options.crop_top = ExUtilGetInt(argv[++c], 0, &parse_error);
config.options.crop_width = ExUtilGetInt(argv[++c], 0, &parse_error);
config.options.crop_height = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if ((!strcmp(argv[c], "-scale") || !strcmp(argv[c], "-resize")) &&
c < argc - 2) { // '-scale' is left for compatibility
} else if (!strcmp(argv[c], "-scale") && c < argc - 2) {
config.options.use_scaling = 1;
config.options.scaled_width = ExUtilGetInt(argv[++c], 0, &parse_error);
config.options.scaled_height = ExUtilGetInt(argv[++c], 0, &parse_error);
#if WEBP_DECODER_ABI_VERSION > 0x0203
} else if (!strcmp(argv[c], "-flip")) {
config.options.flip = 1;
#endif
} else if (!strcmp(argv[c], "-v")) {
verbose = 1;
#ifndef WEBP_DLL
@ -306,12 +672,11 @@ int main(int argc, const char *argv[]) {
return -1;
}
if (quiet) verbose = 0;
{
VP8StatusCode status = VP8_STATUS_OK;
size_t data_size = 0;
if (!LoadWebP(in_file, &data, &data_size, bitstream)) {
const uint8_t* data = NULL;
if (!ExUtilLoadWebP(in_file, &data, &data_size, bitstream)) {
return -1;
}
@ -337,81 +702,49 @@ int main(int argc, const char *argv[]) {
bitstream->has_alpha ? MODE_rgbA : MODE_RGB;
break;
case PGM:
case RAW_YUV:
case YUV:
output_buffer->colorspace = bitstream->has_alpha ? MODE_YUVA : MODE_YUV;
break;
case ALPHA_PLANE_ONLY:
output_buffer->colorspace = MODE_YUVA;
break;
// forced modes:
case RGB: output_buffer->colorspace = MODE_RGB; break;
case RGBA: output_buffer->colorspace = MODE_RGBA; break;
case BGR: output_buffer->colorspace = MODE_BGR; break;
case BGRA: output_buffer->colorspace = MODE_BGRA; break;
case ARGB: output_buffer->colorspace = MODE_ARGB; break;
case RGBA_4444: output_buffer->colorspace = MODE_RGBA_4444; break;
case RGB_565: output_buffer->colorspace = MODE_RGB_565; break;
case rgbA: output_buffer->colorspace = MODE_rgbA; break;
case bgrA: output_buffer->colorspace = MODE_bgrA; break;
case Argb: output_buffer->colorspace = MODE_Argb; break;
case rgbA_4444: output_buffer->colorspace = MODE_rgbA_4444; break;
case YUV: output_buffer->colorspace = MODE_YUV; break;
case YUVA: output_buffer->colorspace = MODE_YUVA; break;
default: goto Exit;
default:
free((void*)data);
return -1;
}
if (use_external_memory > 0 && format >= RGB) {
external_buffer = AllocateExternalBuffer(&config, format,
use_external_memory);
if (external_buffer == NULL) goto Exit;
}
{
Stopwatch stop_watch;
if (verbose) StopwatchReset(&stop_watch);
if (incremental) {
status = DecodeWebPIncremental(data, data_size, &config);
} else {
status = DecodeWebP(data, data_size, &config);
}
if (verbose) {
const double decode_time = StopwatchReadAndReset(&stop_watch);
fprintf(stderr, "Time to decode picture: %.3fs\n", decode_time);
}
if (incremental) {
status = ExUtilDecodeWebPIncremental(data, data_size, verbose, &config);
} else {
status = ExUtilDecodeWebP(data, data_size, verbose, &config);
}
free((void*)data);
ok = (status == VP8_STATUS_OK);
if (!ok) {
PrintWebPError(in_file, status);
ExUtilPrintWebPError(in_file, status);
goto Exit;
}
}
if (out_file != NULL) {
if (!quiet) {
fprintf(stderr, "Decoded %s. Dimensions: %d x %d %s. Format: %s. "
"Now saving...\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "",
kFormatType[bitstream->format]);
}
fprintf(stderr, "Decoded %s. Dimensions: %d x %d %s. Format: %s. "
"Now saving...\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "",
kFormatType[bitstream->format]);
ok = SaveOutput(output_buffer, format, out_file);
} else {
if (!quiet) {
fprintf(stderr, "File %s can be decoded "
"(dimensions: %d x %d %s. Format: %s).\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "",
kFormatType[bitstream->format]);
fprintf(stderr, "Nothing written; "
"use -o flag to save the result as e.g. PNG.\n");
}
fprintf(stderr, "File %s can be decoded "
"(dimensions: %d x %d %s. Format: %s).\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "",
kFormatType[bitstream->format]);
fprintf(stderr, "Nothing written; "
"use -o flag to save the result as e.g. PNG.\n");
}
Exit:
WebPFreeDecBuffer(output_buffer);
free((void*)external_buffer);
free((void*)data);
return ok ? 0 : -1;
}

View File

@ -12,10 +12,17 @@
#include "./example_util.h"
#if defined(_WIN32)
#include <fcntl.h> // for _O_BINARY
#include <io.h> // for _setmode()
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "webp/decode.h"
#include "./stopwatch.h"
//------------------------------------------------------------------------------
// String parsing
@ -34,18 +41,6 @@ int ExUtilGetInt(const char* const v, int base, int* const error) {
return (int)ExUtilGetUInt(v, base, error);
}
int ExUtilGetInts(const char* v, int base, int max_output, int output[]) {
int n, error = 0;
for (n = 0; v != NULL && n < max_output; ++n) {
const int value = ExUtilGetInt(v, base, &error);
if (error) return -1;
output[n] = value;
v = strchr(v, ',');
if (v != NULL) ++v; // skip over the trailing ','
}
return n;
}
float ExUtilGetFloat(const char* const v, int* const error) {
char* end = NULL;
const float f = (v != NULL) ? (float)strtod(v, &end) : 0.f;
@ -56,3 +51,208 @@ float ExUtilGetFloat(const char* const v, int* const error) {
}
return f;
}
// -----------------------------------------------------------------------------
// File I/O
FILE* ExUtilSetBinaryMode(FILE* file) {
#if defined(_WIN32)
if (_setmode(_fileno(file), _O_BINARY) == -1) {
fprintf(stderr, "Failed to reopen file in O_BINARY mode.\n");
return NULL;
}
#endif
return file;
}
int ExUtilReadFromStdin(const uint8_t** data, size_t* data_size) {
static const size_t kBlockSize = 16384; // default initial size
size_t max_size = 0;
size_t size = 0;
uint8_t* input = NULL;
if (data == NULL || data_size == NULL) return 0;
*data = NULL;
*data_size = 0;
if (!ExUtilSetBinaryMode(stdin)) return 0;
while (!feof(stdin)) {
// We double the buffer size each time and read as much as possible.
const size_t extra_size = (max_size == 0) ? kBlockSize : max_size;
void* const new_data = realloc(input, max_size + extra_size);
if (new_data == NULL) goto Error;
input = (uint8_t*)new_data;
max_size += extra_size;
size += fread(input + size, 1, extra_size, stdin);
if (size < max_size) break;
}
if (ferror(stdin)) goto Error;
*data = input;
*data_size = size;
return 1;
Error:
free(input);
fprintf(stderr, "Could not read from stdin\n");
return 0;
}
int ExUtilReadFile(const char* const file_name,
const uint8_t** data, size_t* data_size) {
int ok;
void* file_data;
size_t file_size;
FILE* in;
const int from_stdin = (file_name == NULL) || !strcmp(file_name, "-");
if (from_stdin) return ExUtilReadFromStdin(data, data_size);
if (data == NULL || data_size == NULL) return 0;
*data = NULL;
*data_size = 0;
in = fopen(file_name, "rb");
if (in == NULL) {
fprintf(stderr, "cannot open input file '%s'\n", file_name);
return 0;
}
fseek(in, 0, SEEK_END);
file_size = ftell(in);
fseek(in, 0, SEEK_SET);
file_data = malloc(file_size);
if (file_data == NULL) return 0;
ok = (fread(file_data, file_size, 1, in) == 1);
fclose(in);
if (!ok) {
fprintf(stderr, "Could not read %d bytes of data from file %s\n",
(int)file_size, file_name);
free(file_data);
return 0;
}
*data = (uint8_t*)file_data;
*data_size = file_size;
return 1;
}
int ExUtilWriteFile(const char* const file_name,
const uint8_t* data, size_t data_size) {
int ok;
FILE* out;
const int to_stdout = (file_name == NULL) || !strcmp(file_name, "-");
if (data == NULL) {
return 0;
}
out = to_stdout ? stdout : fopen(file_name, "wb");
if (out == NULL) {
fprintf(stderr, "Error! Cannot open output file '%s'\n", file_name);
return 0;
}
ok = (fwrite(data, data_size, 1, out) == 1);
if (out != stdout) fclose(out);
return ok;
}
//------------------------------------------------------------------------------
// WebP decoding
static const char* const kStatusMessages[VP8_STATUS_NOT_ENOUGH_DATA + 1] = {
"OK", "OUT_OF_MEMORY", "INVALID_PARAM", "BITSTREAM_ERROR",
"UNSUPPORTED_FEATURE", "SUSPENDED", "USER_ABORT", "NOT_ENOUGH_DATA"
};
static void PrintAnimationWarning(const WebPDecoderConfig* const config) {
if (config->input.has_animation) {
fprintf(stderr,
"Error! Decoding of an animated WebP file is not supported.\n"
" Use webpmux to extract the individual frames or\n"
" vwebp to view this image.\n");
}
}
void ExUtilPrintWebPError(const char* const in_file, int status) {
fprintf(stderr, "Decoding of %s failed.\n", in_file);
fprintf(stderr, "Status: %d", status);
if (status >= VP8_STATUS_OK && status <= VP8_STATUS_NOT_ENOUGH_DATA) {
fprintf(stderr, "(%s)", kStatusMessages[status]);
}
fprintf(stderr, "\n");
}
int ExUtilLoadWebP(const char* const in_file,
const uint8_t** data, size_t* data_size,
WebPBitstreamFeatures* bitstream) {
VP8StatusCode status;
WebPBitstreamFeatures local_features;
if (!ExUtilReadFile(in_file, data, data_size)) return 0;
if (bitstream == NULL) {
bitstream = &local_features;
}
status = WebPGetFeatures(*data, *data_size, bitstream);
if (status != VP8_STATUS_OK) {
free((void*)*data);
*data = NULL;
*data_size = 0;
ExUtilPrintWebPError(in_file, status);
return 0;
}
return 1;
}
//------------------------------------------------------------------------------
VP8StatusCode ExUtilDecodeWebP(const uint8_t* const data, size_t data_size,
int verbose, WebPDecoderConfig* const config) {
Stopwatch stop_watch;
VP8StatusCode status = VP8_STATUS_OK;
if (config == NULL) return VP8_STATUS_INVALID_PARAM;
PrintAnimationWarning(config);
StopwatchReset(&stop_watch);
// Decoding call.
status = WebPDecode(data, data_size, config);
if (verbose) {
const double decode_time = StopwatchReadAndReset(&stop_watch);
fprintf(stderr, "Time to decode picture: %.3fs\n", decode_time);
}
return status;
}
VP8StatusCode ExUtilDecodeWebPIncremental(
const uint8_t* const data, size_t data_size,
int verbose, WebPDecoderConfig* const config) {
Stopwatch stop_watch;
VP8StatusCode status = VP8_STATUS_OK;
if (config == NULL) return VP8_STATUS_INVALID_PARAM;
PrintAnimationWarning(config);
StopwatchReset(&stop_watch);
// Decoding call.
{
WebPIDecoder* const idec = WebPIDecode(data, data_size, config);
if (idec == NULL) {
fprintf(stderr, "Failed during WebPINewDecoder().\n");
return VP8_STATUS_OUT_OF_MEMORY;
} else {
status = WebPIUpdate(idec, data, data_size);
WebPIDelete(idec);
}
}
if (verbose) {
const double decode_time = StopwatchReadAndReset(&stop_watch);
fprintf(stderr, "Time to decode picture: %.3fs\n", decode_time);
}
return status;
}
// -----------------------------------------------------------------------------

View File

@ -13,7 +13,8 @@
#ifndef WEBP_EXAMPLES_EXAMPLE_UTIL_H_
#define WEBP_EXAMPLES_EXAMPLE_UTIL_H_
#include "webp/types.h"
#include <stdio.h>
#include "webp/decode.h"
#ifdef __cplusplus
extern "C" {
@ -29,11 +30,57 @@ uint32_t ExUtilGetUInt(const char* const v, int base, int* const error);
int ExUtilGetInt(const char* const v, int base, int* const error);
float ExUtilGetFloat(const char* const v, int* const error);
// This variant of ExUtilGetInt() will parse multiple integers from a
// comma-separated list. Up to 'max_output' integers are parsed.
// The result is placed in the output[] array, and the number of integers
// actually parsed is returned, or -1 if an error occurred.
int ExUtilGetInts(const char* v, int base, int max_output, int output[]);
//------------------------------------------------------------------------------
// File I/O
// Reopen file in binary (O_BINARY) mode.
// Returns 'file' on success, NULL otherwise.
FILE* ExUtilSetBinaryMode(FILE* file);
// Allocates storage for entire file 'file_name' and returns contents and size
// in 'data' and 'data_size'. Returns 1 on success, 0 otherwise. '*data' should
// be deleted using free().
// If 'file_name' is NULL or equal to "-", input is read from stdin by calling
// the function ExUtilReadFromStdin().
int ExUtilReadFile(const char* const file_name,
const uint8_t** data, size_t* data_size);
// Same as ExUtilReadFile(), but reads until EOF from stdin instead.
int ExUtilReadFromStdin(const uint8_t** data, size_t* data_size);
// Write a data segment into a file named 'file_name'. Returns true if ok.
// If 'file_name' is NULL or equal to "-", output is written to stdout.
int ExUtilWriteFile(const char* const file_name,
const uint8_t* data, size_t data_size);
//------------------------------------------------------------------------------
// WebP decoding
// Prints an informative error message regarding decode failure of 'in_file'.
// 'status' is treated as a VP8StatusCode and if valid will be printed as a
// text string.
void ExUtilPrintWebPError(const char* const in_file, int status);
// Reads a WebP from 'in_file', returning the contents and size in 'data' and
// 'data_size'. If not NULL, 'bitstream' is populated using WebPGetFeatures().
// Returns true on success.
int ExUtilLoadWebP(const char* const in_file,
const uint8_t** data, size_t* data_size,
WebPBitstreamFeatures* bitstream);
// Decodes the WebP contained in 'data'.
// 'config' is a structure previously initialized by WebPInitDecoderConfig().
// 'config->output' should have the desired colorspace selected. 'verbose' will
// cause decode timing to be reported.
// Returns the decoder status. On success 'config->output' will contain the
// decoded picture.
VP8StatusCode ExUtilDecodeWebP(const uint8_t* const data, size_t data_size,
int verbose, WebPDecoderConfig* const config);
// Same as ExUtilDecodeWebP(), but using the incremental decoder.
VP8StatusCode ExUtilDecodeWebPIncremental(
const uint8_t* const data, size_t data_size,
int verbose, WebPDecoderConfig* const config);
#ifdef __cplusplus
} // extern "C"

View File

@ -14,7 +14,6 @@
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_CONFIG_H
@ -26,13 +25,183 @@
#include <gif_lib.h>
#include "webp/encode.h"
#include "webp/mux.h"
#include "../examples/example_util.h"
#include "../imageio/imageio_util.h"
#include "./gifdec.h"
#include "./example_util.h"
#include "./gif2webp_util.h"
// GIFLIB_MAJOR is only defined in libgif >= 4.2.0.
#if defined(GIFLIB_MAJOR) && defined(GIFLIB_MINOR)
# define LOCAL_GIF_VERSION ((GIFLIB_MAJOR << 8) | GIFLIB_MINOR)
# define LOCAL_GIF_PREREQ(maj, min) \
(LOCAL_GIF_VERSION >= (((maj) << 8) | (min)))
#else
# define LOCAL_GIF_VERSION 0
# define LOCAL_GIF_PREREQ(maj, min) 0
#endif
#define GIF_TRANSPARENT_MASK 0x01
#define GIF_DISPOSE_MASK 0x07
#define GIF_DISPOSE_SHIFT 2
#define WHITE_COLOR 0xffffffff
#define MAX_CACHE_SIZE 30
//------------------------------------------------------------------------------
static int transparent_index = GIF_INDEX_INVALID; // Opaque by default.
static int transparent_index = -1; // Opaque frame by default.
static void SanitizeKeyFrameIntervals(size_t* const kmin_ptr,
size_t* const kmax_ptr) {
size_t kmin = *kmin_ptr;
size_t kmax = *kmax_ptr;
int print_warning = 1;
if (kmin == 0) { // Disable keyframe insertion.
kmax = ~0;
kmin = kmax - 1;
print_warning = 0;
}
if (kmax == 0) {
kmax = ~0;
print_warning = 0;
}
if (kmin >= kmax) {
kmin = kmax - 1;
if (print_warning) {
fprintf(stderr,
"WARNING: Setting kmin = %d, so that kmin < kmax.\n", (int)kmin);
}
} else if (kmin < (kmax / 2 + 1)) {
// This ensures that cache.keyframe + kmin >= kmax is always true. So, we
// can flush all the frames in the count_since_key_frame == kmax case.
kmin = (kmax / 2 + 1);
if (print_warning) {
fprintf(stderr,
"WARNING: Setting kmin = %d, so that kmin >= kmax / 2 + 1.\n",
(int)kmin);
}
}
// Limit the max number of frames that are allocated.
if (kmax - kmin > MAX_CACHE_SIZE) {
kmin = kmax - MAX_CACHE_SIZE;
if (print_warning) {
fprintf(stderr,
"WARNING: Setting kmin = %d, so that kmax - kmin <= 30.\n",
(int)kmin);
}
}
*kmin_ptr = kmin;
*kmax_ptr = kmax;
}
static void Remap(const uint8_t* const src, const GifFileType* const gif,
uint32_t* dst, int len) {
int i;
const GifColorType* colors;
const ColorMapObject* const cmap =
gif->Image.ColorMap ? gif->Image.ColorMap : gif->SColorMap;
if (cmap == NULL) return;
colors = cmap->Colors;
for (i = 0; i < len; ++i) {
const GifColorType c = colors[src[i]];
dst[i] = (src[i] == transparent_index) ? WEBP_UTIL_TRANSPARENT_COLOR
: c.Blue | (c.Green << 8) | (c.Red << 16) | (0xff << 24);
}
}
// Read the GIF image frame.
static int ReadFrame(GifFileType* const gif, WebPFrameRect* const gif_rect,
WebPPicture* const webp_frame) {
WebPPicture sub_image;
const GifImageDesc* const image_desc = &gif->Image;
uint32_t* dst = NULL;
uint8_t* tmp = NULL;
int ok = 0;
WebPFrameRect rect = {
image_desc->Left, image_desc->Top, image_desc->Width, image_desc->Height
};
*gif_rect = rect;
// Use a view for the sub-picture:
if (!WebPPictureView(webp_frame, rect.x_offset, rect.y_offset,
rect.width, rect.height, &sub_image)) {
fprintf(stderr, "Sub-image %dx%d at position %d,%d is invalid!\n",
rect.width, rect.height, rect.x_offset, rect.y_offset);
return 0;
}
dst = sub_image.argb;
tmp = (uint8_t*)malloc(rect.width * sizeof(*tmp));
if (tmp == NULL) goto End;
if (image_desc->Interlace) { // Interlaced image.
// We need 4 passes, with the following offsets and jumps.
const int interlace_offsets[] = { 0, 4, 2, 1 };
const int interlace_jumps[] = { 8, 8, 4, 2 };
int pass;
for (pass = 0; pass < 4; ++pass) {
int y;
for (y = interlace_offsets[pass]; y < rect.height;
y += interlace_jumps[pass]) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * sub_image.argb_stride, rect.width);
}
}
} else { // Non-interlaced image.
int y;
for (y = 0; y < rect.height; ++y) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * sub_image.argb_stride, rect.width);
}
}
ok = 1;
End:
if (!ok) webp_frame->error_code = sub_image.error_code;
WebPPictureFree(&sub_image);
free(tmp);
return ok;
}
static void GetBackgroundColor(const ColorMapObject* const color_map,
int bgcolor_idx, uint32_t* const bgcolor) {
if (transparent_index != -1 && bgcolor_idx == transparent_index) {
*bgcolor = WEBP_UTIL_TRANSPARENT_COLOR; // Special case.
} else if (color_map == NULL || color_map->Colors == NULL
|| bgcolor_idx >= color_map->ColorCount) {
*bgcolor = WHITE_COLOR;
fprintf(stderr,
"GIF decode warning: invalid background color index. Assuming "
"white background.\n");
} else {
const GifColorType color = color_map->Colors[bgcolor_idx];
*bgcolor = (0xff << 24)
| (color.Red << 16)
| (color.Green << 8)
| (color.Blue << 0);
}
}
static void DisplayGifError(const GifFileType* const gif, int gif_error) {
// libgif 4.2.0 has retired PrintGifError() and added GifErrorString().
#if LOCAL_GIF_PREREQ(4,2)
#if LOCAL_GIF_PREREQ(5,0)
// Static string actually, hence the const char* cast.
const char* error_str = (const char*)GifErrorString(
(gif == NULL) ? gif_error : gif->Error);
#else
const char* error_str = (const char*)GifErrorString();
(void)gif;
#endif
if (error_str == NULL) error_str = "Unknown error";
fprintf(stderr, "GIFLib Error %d: %s\n", gif_error, error_str);
#else
(void)gif;
fprintf(stderr, "GIFLib Error %d: ", gif_error);
PrintGifError();
fprintf(stderr, "\n");
#endif
}
static const char* const kErrorMessages[-WEBP_MUX_NOT_ENOUGH_DATA + 1] = {
"WEBP_MUX_NOT_FOUND", "WEBP_MUX_INVALID_ARGUMENT", "WEBP_MUX_BAD_DATA",
@ -56,16 +225,12 @@ static void Help(void) {
printf("Usage:\n");
printf(" gif2webp [options] gif_file -o webp_file\n");
printf("Options:\n");
printf(" -h / -help ............. this help\n");
printf(" -h / -help ............ this help\n");
printf(" -lossy ................. encode image using lossy compression\n");
printf(" -mixed ................. for each frame in the image, pick lossy\n"
" or lossless compression heuristically\n");
printf(" -q <float> ............. quality factor (0:small..100:big)\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
printf(" -min_size .............. minimize output size (default:off)\n"
" lossless compression by default; can be\n"
" combined with -q, -m, -lossy or -mixed\n"
" options\n");
printf(" -kmin <int> ............ min distance between key frames\n");
printf(" -kmax <int> ............ max distance between key frames\n");
printf(" -f <int> ............... filter strength (0=off..100)\n");
@ -92,48 +257,35 @@ int main(int argc, const char *argv[]) {
const char *in_file = NULL, *out_file = NULL;
FILE* out = NULL;
GifFileType* gif = NULL;
int frame_duration = 0;
int frame_timestamp = 0;
GIFDisposeMethod orig_dispose = GIF_DISPOSE_NONE;
WebPPicture frame; // Frame rectangle only (not disposed).
WebPPicture curr_canvas; // Not disposed.
WebPPicture prev_canvas; // Disposed.
WebPAnimEncoder* enc = NULL;
WebPAnimEncoderOptions enc_options;
WebPConfig config;
WebPPicture frame;
int duration = 0;
FrameDisposeMethod orig_dispose = FRAME_DISPOSE_NONE;
WebPMuxAnimParams anim = { WHITE_COLOR, 0 };
WebPFrameCache* cache = NULL;
int is_first_frame = 1; // Whether we are processing the first frame.
int done;
int c;
int quiet = 0;
WebPData webp_data;
int keep_metadata = METADATA_XMP; // ICC not output by default.
WebPData icc_data;
int stored_icc = 0; // Whether we have already stored an ICC profile.
WebPData xmp_data;
int stored_xmp = 0; // Whether we have already stored an XMP profile.
int loop_count = 0;
int stored_loop_count = 0; // Whether we have found an explicit loop count.
WebPMux* mux = NULL;
WebPData webp_data = { NULL, 0 };
int keep_metadata = METADATA_XMP; // ICC not output by default.
int stored_icc = 0; // Whether we have already stored an ICC profile.
int stored_xmp = 0;
int default_kmin = 1; // Whether to use default kmin value.
int default_kmax = 1;
size_t kmin = 0;
size_t kmax = 0;
int allow_mixed = 0; // If true, each frame can be lossy or lossless.
if (!WebPConfigInit(&config) || !WebPAnimEncoderOptionsInit(&enc_options) ||
!WebPPictureInit(&frame) || !WebPPictureInit(&curr_canvas) ||
!WebPPictureInit(&prev_canvas)) {
if (!WebPConfigInit(&config) || !WebPPictureInit(&frame)) {
fprintf(stderr, "Error! Version mismatch!\n");
return -1;
}
config.lossless = 1; // Use lossless compression by default.
WebPDataInit(&webp_data);
WebPDataInit(&icc_data);
WebPDataInit(&xmp_data);
if (argc == 1) {
Help();
return 0;
@ -149,19 +301,17 @@ int main(int argc, const char *argv[]) {
} else if (!strcmp(argv[c], "-lossy")) {
config.lossless = 0;
} else if (!strcmp(argv[c], "-mixed")) {
enc_options.allow_mixed = 1;
allow_mixed = 1;
config.lossless = 0;
} else if (!strcmp(argv[c], "-q") && c < argc - 1) {
config.quality = ExUtilGetFloat(argv[++c], &parse_error);
} else if (!strcmp(argv[c], "-m") && c < argc - 1) {
config.method = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-min_size")) {
enc_options.minimize_size = 1;
} else if (!strcmp(argv[c], "-kmax") && c < argc - 1) {
enc_options.kmax = ExUtilGetInt(argv[++c], 0, &parse_error);
kmax = ExUtilGetUInt(argv[++c], 0, &parse_error);
default_kmax = 0;
} else if (!strcmp(argv[c], "-kmin") && c < argc - 1) {
enc_options.kmin = ExUtilGetInt(argv[++c], 0, &parse_error);
kmin = ExUtilGetUInt(argv[++c], 0, &parse_error);
default_kmin = 0;
} else if (!strcmp(argv[c], "-f") && c < argc - 1) {
config.filter_strength = ExUtilGetInt(argv[++c], 0, &parse_error);
@ -216,10 +366,8 @@ int main(int argc, const char *argv[]) {
return 0;
} else if (!strcmp(argv[c], "-quiet")) {
quiet = 1;
enc_options.verbose = 0;
} else if (!strcmp(argv[c], "-v")) {
verbose = 1;
enc_options.verbose = 1;
} else if (!strcmp(argv[c], "--")) {
if (c < argc - 1) in_file = argv[++c];
break;
@ -239,11 +387,12 @@ int main(int argc, const char *argv[]) {
// Appropriate default kmin, kmax values for lossy and lossless.
if (default_kmin) {
enc_options.kmin = config.lossless ? 9 : 3;
kmin = config.lossless ? 9 : 3;
}
if (default_kmax) {
enc_options.kmax = config.lossless ? 17 : 5;
kmax = config.lossless ? 17 : 5;
}
SanitizeKeyFrameIntervals(&kmin, &kmax);
if (!WebPValidateConfig(&config)) {
fprintf(stderr, "Error! Invalid configuration.\n");
@ -264,6 +413,12 @@ int main(int argc, const char *argv[]) {
#endif
if (gif == NULL) goto End;
mux = WebPMuxNew();
if (mux == NULL) {
fprintf(stderr, "ERROR: could not create a mux object.\n");
goto End;
}
// Loop over GIF images
done = 0;
do {
@ -272,17 +427,17 @@ int main(int argc, const char *argv[]) {
switch (type) {
case IMAGE_DESC_RECORD_TYPE: {
GIFFrameRect gif_rect;
WebPFrameRect gif_rect;
GifImageDesc* const image_desc = &gif->Image;
if (!DGifGetImageDesc(gif)) goto End;
// Fix some broken GIF global headers that report
// 0 x 0 screen dimension.
if (is_first_frame) {
if (verbose) {
printf("Canvas screen: %d x %d\n", gif->SWidth, gif->SHeight);
}
// Fix some broken GIF global headers that report
// 0 x 0 screen dimension.
if (gif->SWidth == 0 || gif->SHeight == 0) {
image_desc->Left = 0;
image_desc->Top = 0;
@ -296,62 +451,61 @@ int main(int argc, const char *argv[]) {
gif->SWidth, gif->SHeight);
}
}
#if WEBP_MUX_ABI_VERSION > 0x0101
// Set definitive canvas size.
err = WebPMuxSetCanvasSize(mux, gif->SWidth, gif->SHeight);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "Invalid canvas size %d x %d\n",
gif->SWidth, gif->SHeight);
goto End;
}
#endif
// Allocate current buffer.
frame.width = gif->SWidth;
frame.height = gif->SHeight;
frame.use_argb = 1;
if (!WebPPictureAlloc(&frame)) goto End;
GIFClearPic(&frame, NULL);
WebPPictureCopy(&frame, &curr_canvas);
WebPPictureCopy(&frame, &prev_canvas);
WebPUtilClearPic(&frame, NULL);
// Initialize cache.
cache = WebPFrameCacheNew(frame.width, frame.height,
kmin, kmax, allow_mixed);
if (cache == NULL) goto End;
// Background color.
GIFGetBackgroundColor(gif->SColorMap, gif->SBackGroundColor,
transparent_index,
&enc_options.anim_params.bgcolor);
// Initialize encoder.
enc = WebPAnimEncoderNew(curr_canvas.width, curr_canvas.height,
&enc_options);
if (enc == NULL) {
fprintf(stderr,
"Error! Could not create encoder object. Possibly due to "
"a memory error.\n");
goto End;
}
is_first_frame = 0;
GetBackgroundColor(gif->SColorMap, gif->SBackGroundColor,
&anim.bgcolor);
}
// Some even more broken GIF can have sub-rect with zero width/height.
if (image_desc->Width == 0 || image_desc->Height == 0) {
image_desc->Width = gif->SWidth;
image_desc->Height = gif->SHeight;
}
if (!GIFReadFrame(gif, transparent_index, &gif_rect, &frame)) {
if (!ReadFrame(gif, &gif_rect, &frame)) {
goto End;
}
// Blend frame rectangle with previous canvas to compose full canvas.
// Note that 'curr_canvas' is same as 'prev_canvas' at this point.
GIFBlendFrames(&frame, &gif_rect, &curr_canvas);
if (!WebPAnimEncoderAdd(enc, &curr_canvas, frame_timestamp, &config)) {
fprintf(stderr, "%s\n", WebPAnimEncoderGetError(enc));
if (!WebPFrameCacheAddFrame(cache, &config, &gif_rect, orig_dispose,
duration, &frame)) {
fprintf(stderr, "Error! Cannot encode frame as WebP\n");
fprintf(stderr, "Error code: %d\n", frame.error_code);
}
// Update canvases.
GIFDisposeFrame(orig_dispose, &gif_rect, &prev_canvas, &curr_canvas);
GIFCopyPixels(&curr_canvas, &prev_canvas);
// Update timestamp (for next frame).
frame_timestamp += frame_duration;
err = WebPFrameCacheFlush(cache, verbose, mux);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not add animation frame.\n",
ErrorString(err));
goto End;
}
is_first_frame = 0;
// In GIF, graphic control extensions are optional for a frame, so we
// may not get one before reading the next frame. To handle this case,
// we reset frame properties to reasonable defaults for the next frame.
orig_dispose = GIF_DISPOSE_NONE;
frame_duration = 0;
transparent_index = GIF_INDEX_INVALID;
orig_dispose = FRAME_DISPOSE_NONE;
duration = 0;
transparent_index = -1; // Opaque frame by default.
break;
}
case EXTENSION_RECORD_TYPE: {
@ -360,17 +514,30 @@ int main(int argc, const char *argv[]) {
if (DGifGetExtension(gif, &extension, &data) == GIF_ERROR) {
goto End;
}
if (data == NULL) continue;
switch (extension) {
case COMMENT_EXT_FUNC_CODE: {
break; // Do nothing for now.
}
case GRAPHICS_EXT_FUNC_CODE: {
if (!GIFReadGraphicsExtension(data, &frame_duration, &orig_dispose,
&transparent_index)) {
goto End;
const int flags = data[1];
const int dispose = (flags >> GIF_DISPOSE_SHIFT) & GIF_DISPOSE_MASK;
const int delay = data[2] | (data[3] << 8); // In 10 ms units.
if (data[0] != 4) goto End;
duration = delay * 10; // Duration is in 1 ms units for WebP.
switch (dispose) {
case 3:
orig_dispose = FRAME_DISPOSE_RESTORE_PREVIOUS;
break;
case 2:
orig_dispose = FRAME_DISPOSE_BACKGROUND;
break;
case 1:
case 0:
default:
orig_dispose = FRAME_DISPOSE_NONE;
break;
}
transparent_index = (flags & GIF_TRANSPARENT_MASK) ? data[4] : -1;
break;
}
case PLAINTEXT_EXT_FUNC_CODE: {
@ -380,13 +547,14 @@ int main(int argc, const char *argv[]) {
if (data[0] != 11) break; // Chunk is too short
if (!memcmp(data + 1, "NETSCAPE2.0", 11) ||
!memcmp(data + 1, "ANIMEXTS1.0", 11)) {
if (!GIFReadLoopCount(gif, &data, &loop_count)) {
goto End;
}
// Recognize and parse Netscape2.0 NAB extension for loop count.
if (DGifGetExtensionNext(gif, &data) == GIF_ERROR) goto End;
if (data == NULL) goto End; // Loop count sub-block missing.
if (data[0] < 3 || data[1] != 1) break; // wrong size/marker
anim.loop_count = data[2] | (data[3] << 8);
if (verbose) {
fprintf(stderr, "Loop count: %d\n", loop_count);
fprintf(stderr, "Loop count: %d\n", anim.loop_count);
}
stored_loop_count = (loop_count != 0);
} else { // An extension containing metadata.
// We only store the first encountered chunk of each type, and
// only if requested by the user.
@ -397,8 +565,56 @@ int main(int argc, const char *argv[]) {
!stored_icc &&
!memcmp(data + 1, "ICCRGBG1012", 11);
if (is_xmp || is_icc) {
if (!GIFReadMetadata(gif, &data,
is_xmp ? &xmp_data : &icc_data)) {
const char* const fourccs[2] = { "XMP " , "ICCP" };
const char* const features[2] = { "XMP" , "ICC" };
WebPData metadata = { NULL, 0 };
// Construct metadata from sub-blocks.
// Usual case (including ICC profile): In each sub-block, the
// first byte specifies its size in bytes (0 to 255) and the
// rest of the bytes contain the data.
// Special case for XMP data: In each sub-block, the first byte
// is also part of the XMP payload. XMP in GIF also has a 257
// byte padding data. See the XMP specification for details.
while (1) {
WebPData prev_metadata = metadata;
WebPData subblock;
if (DGifGetExtensionNext(gif, &data) == GIF_ERROR) {
WebPDataClear(&metadata);
goto End;
}
if (data == NULL) break; // Finished.
subblock.size = is_xmp ? data[0] + 1 : data[0];
assert(subblock.size > 0);
subblock.bytes = is_xmp ? data : data + 1;
metadata.bytes =
(uint8_t*)realloc((void*)metadata.bytes,
prev_metadata.size + subblock.size);
if (metadata.bytes == NULL) {
WebPDataClear(&prev_metadata);
goto End;
}
metadata.size += subblock.size;
memcpy((void*)(metadata.bytes + prev_metadata.size),
subblock.bytes, subblock.size);
}
if (is_xmp) {
// XMP padding data is 0x01, 0xff, 0xfe ... 0x01, 0x00.
const size_t xmp_pading_size = 257;
if (metadata.size > xmp_pading_size) {
metadata.size -= xmp_pading_size;
}
}
// Add metadata chunk.
err = WebPMuxSetChunk(mux, fourccs[is_icc], &metadata, 1);
if (verbose) {
fprintf(stderr, "%s size: %d\n",
features[is_icc], (int)metadata.size);
}
WebPDataClear(&metadata);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not set %s chunk.\n",
ErrorString(err), features[is_icc]);
goto End;
}
if (is_icc) {
@ -432,88 +648,38 @@ int main(int argc, const char *argv[]) {
}
} while (!done);
// Last NULL frame.
if (!WebPAnimEncoderAdd(enc, NULL, frame_timestamp, NULL)) {
fprintf(stderr, "Error flushing WebP muxer.\n");
fprintf(stderr, "%s\n", WebPAnimEncoderGetError(enc));
}
if (!WebPAnimEncoderAssemble(enc, &webp_data)) {
fprintf(stderr, "%s\n", WebPAnimEncoderGetError(enc));
// Flush any pending frames.
err = WebPFrameCacheFlushAll(cache, verbose, mux);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not add animation frame.\n",
ErrorString(err));
goto End;
}
if (stored_loop_count || stored_icc || stored_xmp) {
// Re-mux to add loop count and/or metadata as needed.
mux = WebPMuxCreate(&webp_data, 1);
if (mux == NULL) {
fprintf(stderr, "ERROR: Could not re-mux to add loop count/metadata.\n");
goto End;
}
WebPDataClear(&webp_data);
if (stored_loop_count) { // Update loop count.
WebPMuxAnimParams new_params;
err = WebPMuxGetAnimationParams(mux, &new_params);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not fetch loop count.\n",
ErrorString(err));
goto End;
}
new_params.loop_count = loop_count;
err = WebPMuxSetAnimationParams(mux, &new_params);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not update loop count.\n",
ErrorString(err));
goto End;
}
}
if (stored_icc) { // Add ICCP chunk.
err = WebPMuxSetChunk(mux, "ICCP", &icc_data, 1);
if (verbose) {
fprintf(stderr, "ICC size: %d\n", (int)icc_data.size);
}
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not set ICC chunk.\n",
ErrorString(err));
goto End;
}
}
if (stored_xmp) { // Add XMP chunk.
err = WebPMuxSetChunk(mux, "XMP ", &xmp_data, 1);
if (verbose) {
fprintf(stderr, "XMP size: %d\n", (int)xmp_data.size);
}
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not set XMP chunk.\n",
ErrorString(err));
goto End;
}
}
err = WebPMuxAssemble(mux, &webp_data);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not assemble when re-muxing to add "
"loop count/metadata.\n", ErrorString(err));
goto End;
}
// Finish muxing
err = WebPMuxSetAnimationParams(mux, &anim);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not set animation parameters.\n",
ErrorString(err));
goto End;
}
err = WebPMuxAssemble(mux, &webp_data);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s) assembling the WebP file.\n", ErrorString(err));
goto End;
}
if (out_file != NULL) {
if (!ImgIoUtilWriteFile(out_file, webp_data.bytes, webp_data.size)) {
if (!ExUtilWriteFile(out_file, webp_data.bytes, webp_data.size)) {
fprintf(stderr, "Error writing output file: %s\n", out_file);
goto End;
}
if (!quiet) {
fprintf(stderr, "Saved output file (%d bytes): %s\n",
(int)webp_data.size, out_file);
fprintf(stderr, "Saved output file: %s\n", out_file);
}
} else {
if (!quiet) {
fprintf(stderr, "Nothing written; use -o flag to save the result "
"(%d bytes).\n", (int)webp_data.size);
fprintf(stderr, "Nothing written; use -o flag to save the result.\n");
}
}
@ -522,18 +688,14 @@ int main(int argc, const char *argv[]) {
gif_error = GIF_OK;
End:
WebPDataClear(&icc_data);
WebPDataClear(&xmp_data);
WebPMuxDelete(mux);
WebPDataClear(&webp_data);
WebPMuxDelete(mux);
WebPPictureFree(&frame);
WebPPictureFree(&curr_canvas);
WebPPictureFree(&prev_canvas);
WebPAnimEncoderDelete(enc);
WebPFrameCacheDelete(cache);
if (out != NULL && out_file != NULL) fclose(out);
if (gif_error != GIF_OK) {
GIFDisplayError(gif, gif_error);
DisplayGifError(gif, gif_error);
}
if (gif != NULL) {
#if LOCAL_GIF_PREREQ(5,1)

1042
examples/gif2webp_util.c Normal file

File diff suppressed because it is too large Load Diff

88
examples/gif2webp_util.h Normal file
View File

@ -0,0 +1,88 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Helper structs and methods for gif2webp tool.
//
// Author: Urvang (urvang@google.com)
#ifndef WEBP_EXAMPLES_GIF2WEBP_UTIL_H_
#define WEBP_EXAMPLES_GIF2WEBP_UTIL_H_
#include <stdlib.h>
#include "webp/mux.h"
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
// Helper utilities.
#define WEBP_UTIL_TRANSPARENT_COLOR 0x00ffffff
struct WebPPicture;
// Includes all disposal methods, even the ones not supported by WebP bitstream.
typedef enum FrameDisposeMethod {
FRAME_DISPOSE_NONE,
FRAME_DISPOSE_BACKGROUND,
FRAME_DISPOSE_RESTORE_PREVIOUS
} FrameDisposeMethod;
typedef struct {
int x_offset, y_offset, width, height;
} WebPFrameRect;
// Clear pixels in 'picture' within given 'rect' to transparent color.
void WebPUtilClearPic(struct WebPPicture* const picture,
const WebPFrameRect* const rect);
//------------------------------------------------------------------------------
// Frame cache.
typedef struct WebPFrameCache WebPFrameCache;
// Given the minimum distance between key frames 'kmin' and maximum distance
// between key frames 'kmax', returns an appropriately allocated cache object.
// If 'allow_mixed' is true, the subsequent calls to WebPFrameCacheAddFrame()
// will heuristically pick lossy or lossless compression for each frame.
// Use WebPFrameCacheDelete() to deallocate the 'cache'.
WebPFrameCache* WebPFrameCacheNew(int width, int height,
size_t kmin, size_t kmax, int allow_mixed);
// Release all the frame data from 'cache' and free 'cache'.
void WebPFrameCacheDelete(WebPFrameCache* const cache);
// Given an image described by 'frame', 'rect', 'dispose_method' and 'duration',
// optimize it for WebP, encode it and add it to 'cache'. 'rect' can be NULL.
// This takes care of frame disposal too, according to 'dispose_method'.
// Returns false in case of error (and sets frame->error_code accordingly).
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
const WebPConfig* const config,
const WebPFrameRect* const rect,
FrameDisposeMethod dispose_method, int duration,
WebPPicture* const frame);
// Flush the *ready* frames from cache and add them to 'mux'. If 'verbose' is
// true, prints the information about these frames.
WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
WebPMux* const mux);
// Similar to 'WebPFrameCacheFlushFrames()', but flushes *all* the frames.
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
WebPMux* const mux);
//------------------------------------------------------------------------------
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_EXAMPLES_GIF2WEBP_UTIL_H_

View File

@ -1,410 +0,0 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// GIF decode.
#include "./gifdec.h"
#include <stdio.h>
#ifdef WEBP_HAVE_GIF
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "webp/encode.h"
#include "webp/mux_types.h"
#define GIF_TRANSPARENT_COLOR 0x00000000u
#define GIF_WHITE_COLOR 0xffffffffu
#define GIF_TRANSPARENT_MASK 0x01
#define GIF_DISPOSE_MASK 0x07
#define GIF_DISPOSE_SHIFT 2
// from utils/utils.h
extern void WebPCopyPlane(const uint8_t* src, int src_stride,
uint8_t* dst, int dst_stride,
int width, int height);
extern void WebPCopyPixels(const WebPPicture* const src,
WebPPicture* const dst);
void GIFGetBackgroundColor(const ColorMapObject* const color_map,
int bgcolor_index, int transparent_index,
uint32_t* const bgcolor) {
if (transparent_index != GIF_INDEX_INVALID &&
bgcolor_index == transparent_index) {
*bgcolor = GIF_TRANSPARENT_COLOR; // Special case.
} else if (color_map == NULL || color_map->Colors == NULL
|| bgcolor_index >= color_map->ColorCount) {
*bgcolor = GIF_WHITE_COLOR;
fprintf(stderr,
"GIF decode warning: invalid background color index. Assuming "
"white background.\n");
} else {
const GifColorType color = color_map->Colors[bgcolor_index];
*bgcolor = (0xffu << 24)
| (color.Red << 16)
| (color.Green << 8)
| (color.Blue << 0);
}
}
int GIFReadGraphicsExtension(const GifByteType* const buf, int* const duration,
GIFDisposeMethod* const dispose,
int* const transparent_index) {
const int flags = buf[1];
const int dispose_raw = (flags >> GIF_DISPOSE_SHIFT) & GIF_DISPOSE_MASK;
const int duration_raw = buf[2] | (buf[3] << 8); // In 10 ms units.
if (buf[0] != 4) return 0;
*duration = duration_raw * 10; // Duration is in 1 ms units.
switch (dispose_raw) {
case 3:
*dispose = GIF_DISPOSE_RESTORE_PREVIOUS;
break;
case 2:
*dispose = GIF_DISPOSE_BACKGROUND;
break;
case 1:
case 0:
default:
*dispose = GIF_DISPOSE_NONE;
break;
}
*transparent_index =
(flags & GIF_TRANSPARENT_MASK) ? buf[4] : GIF_INDEX_INVALID;
return 1;
}
static int Remap(const GifFileType* const gif, const uint8_t* const src,
int len, int transparent_index, uint32_t* dst) {
int i;
const GifColorType* colors;
const ColorMapObject* const cmap =
gif->Image.ColorMap ? gif->Image.ColorMap : gif->SColorMap;
if (cmap == NULL) return 1;
if (cmap->Colors == NULL || cmap->ColorCount <= 0) return 0;
colors = cmap->Colors;
for (i = 0; i < len; ++i) {
if (src[i] == transparent_index) {
dst[i] = GIF_TRANSPARENT_COLOR;
} else if (src[i] < cmap->ColorCount) {
const GifColorType c = colors[src[i]];
dst[i] = c.Blue | (c.Green << 8) | (c.Red << 16) | (0xffu << 24);
} else {
return 0;
}
}
return 1;
}
int GIFReadFrame(GifFileType* const gif, int transparent_index,
GIFFrameRect* const gif_rect, WebPPicture* const picture) {
WebPPicture sub_image;
const GifImageDesc* const image_desc = &gif->Image;
uint32_t* dst = NULL;
uint8_t* tmp = NULL;
const GIFFrameRect rect = {
image_desc->Left, image_desc->Top, image_desc->Width, image_desc->Height
};
const uint64_t memory_needed = 4 * rect.width * (uint64_t)rect.height;
int ok = 0;
*gif_rect = rect;
if (memory_needed != (size_t)memory_needed || memory_needed > (4ULL << 32)) {
fprintf(stderr, "Image is too large (%d x %d).", rect.width, rect.height);
return 0;
}
// Use a view for the sub-picture:
if (!WebPPictureView(picture, rect.x_offset, rect.y_offset,
rect.width, rect.height, &sub_image)) {
fprintf(stderr, "Sub-image %dx%d at position %d,%d is invalid!\n",
rect.width, rect.height, rect.x_offset, rect.y_offset);
return 0;
}
dst = sub_image.argb;
tmp = (uint8_t*)malloc(rect.width * sizeof(*tmp));
if (tmp == NULL) goto End;
if (image_desc->Interlace) { // Interlaced image.
// We need 4 passes, with the following offsets and jumps.
const int interlace_offsets[] = { 0, 4, 2, 1 };
const int interlace_jumps[] = { 8, 8, 4, 2 };
int pass;
for (pass = 0; pass < 4; ++pass) {
const size_t stride = (size_t)sub_image.argb_stride;
int y = interlace_offsets[pass];
uint32_t* row = dst + y * stride;
const size_t jump = interlace_jumps[pass] * stride;
for (; y < rect.height; y += interlace_jumps[pass], row += jump) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
if (!Remap(gif, tmp, rect.width, transparent_index, row)) goto End;
}
}
} else { // Non-interlaced image.
int y;
uint32_t* ptr = dst;
for (y = 0; y < rect.height; ++y, ptr += sub_image.argb_stride) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
if (!Remap(gif, tmp, rect.width, transparent_index, ptr)) goto End;
}
}
ok = 1;
End:
if (!ok) picture->error_code = sub_image.error_code;
WebPPictureFree(&sub_image);
free(tmp);
return ok;
}
int GIFReadLoopCount(GifFileType* const gif, GifByteType** const buf,
int* const loop_count) {
assert(!memcmp(*buf + 1, "NETSCAPE2.0", 11) ||
!memcmp(*buf + 1, "ANIMEXTS1.0", 11));
if (DGifGetExtensionNext(gif, buf) == GIF_ERROR) {
return 0;
}
if (*buf == NULL) {
return 0; // Loop count sub-block missing.
}
if ((*buf)[0] < 3 || (*buf)[1] != 1) {
return 0; // wrong size/marker
}
*loop_count = (*buf)[2] | ((*buf)[3] << 8);
return 1;
}
int GIFReadMetadata(GifFileType* const gif, GifByteType** const buf,
WebPData* const metadata) {
const int is_xmp = !memcmp(*buf + 1, "XMP DataXMP", 11);
const int is_icc = !memcmp(*buf + 1, "ICCRGBG1012", 11);
assert(is_xmp || is_icc);
(void)is_icc; // silence unused warning.
// Construct metadata from sub-blocks.
// Usual case (including ICC profile): In each sub-block, the
// first byte specifies its size in bytes (0 to 255) and the
// rest of the bytes contain the data.
// Special case for XMP data: In each sub-block, the first byte
// is also part of the XMP payload. XMP in GIF also has a 257
// byte padding data. See the XMP specification for details.
while (1) {
WebPData subblock;
const uint8_t* tmp;
if (DGifGetExtensionNext(gif, buf) == GIF_ERROR) {
return 0;
}
if (*buf == NULL) break; // Finished.
subblock.size = is_xmp ? (*buf)[0] + 1 : (*buf)[0];
assert(subblock.size > 0);
subblock.bytes = is_xmp ? *buf : *buf + 1;
// Note: We store returned value in 'tmp' first, to avoid
// leaking old memory in metadata->bytes on error.
tmp = (uint8_t*)realloc((void*)metadata->bytes,
metadata->size + subblock.size);
if (tmp == NULL) {
return 0;
}
memcpy((void*)(tmp + metadata->size),
subblock.bytes, subblock.size);
metadata->bytes = tmp;
metadata->size += subblock.size;
}
if (is_xmp) {
// XMP padding data is 0x01, 0xff, 0xfe ... 0x01, 0x00.
const size_t xmp_pading_size = 257;
if (metadata->size > xmp_pading_size) {
metadata->size -= xmp_pading_size;
}
}
return 1;
}
static void ClearRectangle(WebPPicture* const picture,
int left, int top, int width, int height) {
int i, j;
const size_t stride = picture->argb_stride;
uint32_t* dst = picture->argb + top * stride + left;
for (j = 0; j < height; ++j, dst += stride) {
for (i = 0; i < width; ++i) dst[i] = GIF_TRANSPARENT_COLOR;
}
}
void GIFClearPic(WebPPicture* const pic, const GIFFrameRect* const rect) {
if (rect != NULL) {
ClearRectangle(pic, rect->x_offset, rect->y_offset,
rect->width, rect->height);
} else {
ClearRectangle(pic, 0, 0, pic->width, pic->height);
}
}
void GIFCopyPixels(const WebPPicture* const src, WebPPicture* const dst) {
WebPCopyPixels(src, dst);
}
void GIFDisposeFrame(GIFDisposeMethod dispose, const GIFFrameRect* const rect,
const WebPPicture* const prev_canvas,
WebPPicture* const curr_canvas) {
assert(rect != NULL);
if (dispose == GIF_DISPOSE_BACKGROUND) {
GIFClearPic(curr_canvas, rect);
} else if (dispose == GIF_DISPOSE_RESTORE_PREVIOUS) {
const size_t src_stride = prev_canvas->argb_stride;
const uint32_t* const src = prev_canvas->argb + rect->x_offset
+ rect->y_offset * src_stride;
const size_t dst_stride = curr_canvas->argb_stride;
uint32_t* const dst = curr_canvas->argb + rect->x_offset
+ rect->y_offset * dst_stride;
assert(prev_canvas != NULL);
WebPCopyPlane((uint8_t*)src, (int)(4 * src_stride),
(uint8_t*)dst, (int)(4 * dst_stride),
4 * rect->width, rect->height);
}
}
void GIFBlendFrames(const WebPPicture* const src,
const GIFFrameRect* const rect, WebPPicture* const dst) {
int i, j;
const size_t src_stride = src->argb_stride;
const size_t dst_stride = dst->argb_stride;
assert(src->width == dst->width && src->height == dst->height);
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {
const uint32_t src_pixel = src->argb[j * src_stride + i];
const int src_alpha = src_pixel >> 24;
if (src_alpha != 0) {
dst->argb[j * dst_stride + i] = src_pixel;
}
}
}
}
void GIFDisplayError(const GifFileType* const gif, int gif_error) {
// libgif 4.2.0 has retired PrintGifError() and added GifErrorString().
#if LOCAL_GIF_PREREQ(4,2)
#if LOCAL_GIF_PREREQ(5,0)
// Static string actually, hence the const char* cast.
const char* error_str = (const char*)GifErrorString(
(gif == NULL) ? gif_error : gif->Error);
#else
const char* error_str = (const char*)GifErrorString();
(void)gif;
#endif
if (error_str == NULL) error_str = "Unknown error";
fprintf(stderr, "GIFLib Error %d: %s\n", gif_error, error_str);
#else
(void)gif;
fprintf(stderr, "GIFLib Error %d: ", gif_error);
PrintGifError();
fprintf(stderr, "\n");
#endif
}
#else // !WEBP_HAVE_GIF
static void ErrorGIFNotAvailable() {
fprintf(stderr, "GIF support not compiled. Please install the libgif-dev "
"package before building.\n");
}
void GIFGetBackgroundColor(const struct ColorMapObject* const color_map,
int bgcolor_index, int transparent_index,
uint32_t* const bgcolor) {
(void)color_map;
(void)bgcolor_index;
(void)transparent_index;
(void)bgcolor;
ErrorGIFNotAvailable();
}
int GIFReadGraphicsExtension(const GifByteType* const data, int* const duration,
GIFDisposeMethod* const dispose,
int* const transparent_index) {
(void)data;
(void)duration;
(void)dispose;
(void)transparent_index;
ErrorGIFNotAvailable();
return 0;
}
int GIFReadFrame(struct GifFileType* const gif, int transparent_index,
GIFFrameRect* const gif_rect,
struct WebPPicture* const picture) {
(void)gif;
(void)transparent_index;
(void)gif_rect;
(void)picture;
ErrorGIFNotAvailable();
return 0;
}
int GIFReadLoopCount(struct GifFileType* const gif, GifByteType** const buf,
int* const loop_count) {
(void)gif;
(void)buf;
(void)loop_count;
ErrorGIFNotAvailable();
return 0;
}
int GIFReadMetadata(struct GifFileType* const gif, GifByteType** const buf,
struct WebPData* const metadata) {
(void)gif;
(void)buf;
(void)metadata;
ErrorGIFNotAvailable();
return 0;
}
void GIFDisposeFrame(GIFDisposeMethod dispose, const GIFFrameRect* const rect,
const struct WebPPicture* const prev_canvas,
struct WebPPicture* const curr_canvas) {
(void)dispose;
(void)rect;
(void)prev_canvas;
(void)curr_canvas;
ErrorGIFNotAvailable();
}
void GIFBlendFrames(const struct WebPPicture* const src,
const GIFFrameRect* const rect,
struct WebPPicture* const dst) {
(void)src;
(void)rect;
(void)dst;
ErrorGIFNotAvailable();
}
void GIFDisplayError(const struct GifFileType* const gif, int gif_error) {
(void)gif;
(void)gif_error;
ErrorGIFNotAvailable();
}
void GIFClearPic(struct WebPPicture* const pic,
const GIFFrameRect* const rect) {
(void)pic;
(void)rect;
ErrorGIFNotAvailable();
}
void GIFCopyPixels(const struct WebPPicture* const src,
struct WebPPicture* const dst) {
(void)src;
(void)dst;
ErrorGIFNotAvailable();
}
#endif // WEBP_HAVE_GIF
// -----------------------------------------------------------------------------

View File

@ -1,116 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// GIF decode.
#ifndef WEBP_EXAMPLES_GIFDEC_H_
#define WEBP_EXAMPLES_GIFDEC_H_
#include <stdio.h>
#include "webp/types.h"
#ifdef HAVE_CONFIG_H
#include "webp/config.h"
#endif
#ifdef WEBP_HAVE_GIF
#include <gif_lib.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
// GIFLIB_MAJOR is only defined in libgif >= 4.2.0.
#if defined(GIFLIB_MAJOR) && defined(GIFLIB_MINOR)
# define LOCAL_GIF_VERSION ((GIFLIB_MAJOR << 8) | GIFLIB_MINOR)
# define LOCAL_GIF_PREREQ(maj, min) \
(LOCAL_GIF_VERSION >= (((maj) << 8) | (min)))
#else
# define LOCAL_GIF_VERSION 0
# define LOCAL_GIF_PREREQ(maj, min) 0
#endif
#define GIF_INDEX_INVALID (-1)
typedef enum GIFDisposeMethod {
GIF_DISPOSE_NONE,
GIF_DISPOSE_BACKGROUND,
GIF_DISPOSE_RESTORE_PREVIOUS
} GIFDisposeMethod;
typedef struct {
int x_offset, y_offset, width, height;
} GIFFrameRect;
struct WebPData;
struct WebPPicture;
#ifndef WEBP_HAVE_GIF
struct ColorMapObject;
struct GifFileType;
typedef unsigned char GifByteType;
#endif
// Given the index of background color and transparent color, returns the
// corresponding background color (in BGRA format) in 'bgcolor'.
void GIFGetBackgroundColor(const struct ColorMapObject* const color_map,
int bgcolor_index, int transparent_index,
uint32_t* const bgcolor);
// Parses the given graphics extension data to get frame duration (in 1ms
// units), dispose method and transparent color index.
// Returns true on success.
int GIFReadGraphicsExtension(const GifByteType* const buf, int* const duration,
GIFDisposeMethod* const dispose,
int* const transparent_index);
// Reads the next GIF frame from 'gif' into 'picture'. Also, returns the GIF
// frame dimensions and offsets in 'rect'.
// Returns true on success.
int GIFReadFrame(struct GifFileType* const gif, int transparent_index,
GIFFrameRect* const gif_rect,
struct WebPPicture* const picture);
// Parses loop count from the given Netscape extension data.
int GIFReadLoopCount(struct GifFileType* const gif, GifByteType** const buf,
int* const loop_count);
// Parses the given ICC or XMP extension data and stores it into 'metadata'.
// Returns true on success.
int GIFReadMetadata(struct GifFileType* const gif, GifByteType** const buf,
struct WebPData* const metadata);
// Dispose the pixels within 'rect' of 'curr_canvas' based on 'dispose' method
// and 'prev_canvas'.
void GIFDisposeFrame(GIFDisposeMethod dispose, const GIFFrameRect* const rect,
const struct WebPPicture* const prev_canvas,
struct WebPPicture* const curr_canvas);
// Given 'src' picture and its frame rectangle 'rect', blend it into 'dst'.
void GIFBlendFrames(const struct WebPPicture* const src,
const GIFFrameRect* const rect,
struct WebPPicture* const dst);
// Prints an error string based on 'gif_error'.
void GIFDisplayError(const struct GifFileType* const gif, int gif_error);
// In the given 'pic', clear the pixels in 'rect' to transparent color.
void GIFClearPic(struct WebPPicture* const pic, const GIFFrameRect* const rect);
// Copy pixels from 'src' to 'dst' honoring strides. 'src' and 'dst' are assumed
// to be already allocated.
void GIFCopyPixels(const struct WebPPicture* const src,
struct WebPPicture* const dst);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_EXAMPLES_GIFDEC_H_

View File

@ -1,300 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// generate an animated WebP out of a sequence of images
// (PNG, JPEG, ...)
//
// Example usage:
// img2webp -o out.webp -q 40 -mixed -duration 40 input??.png
//
// Author: skal@google.com (Pascal Massimino)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_CONFIG_H
#include "webp/config.h"
#endif
#include "../examples/example_util.h"
#include "../imageio/image_dec.h"
#include "../imageio/imageio_util.h"
#include "./stopwatch.h"
#include "webp/encode.h"
#include "webp/mux.h"
//------------------------------------------------------------------------------
static void Help(void) {
printf("Usage:\n\n");
printf(" img2webp [file-level options] [image files...] "
"[per-frame options...]\n");
printf("\n");
printf("File-level options (only used at the start of compression):\n");
printf(" -min_size ............ minimize size\n");
printf(" -loop <int> .......... loop count (default: 0, = infinite loop)\n");
printf(" -kmax <int> .......... maximum number of frame between key-frames\n"
" (0=only keyframes)\n");
printf(" -kmin <int> .......... minimum number of frame between key-frames\n"
" (0=disable key-frames altogether)\n");
printf(" -mixed ............... use mixed lossy/lossless automatic mode\n");
printf(" -v ................... verbose mode\n");
printf(" -h ................... this help\n");
printf("\n");
printf("Per-frame options (only used for subsequent images input):\n");
printf(" -d <int> ............. frame duration in ms (default: 100)\n");
printf(" -lossless ........... use lossless mode (default)\n");
printf(" -lossy ... ........... use lossy mode\n");
printf(" -q <float> ........... quality\n");
printf(" -m <int> ............. method to use\n");
printf("\n");
printf("example: img2webp -loop 2 in0.png -lossy in1.jpg\n"
" -d 80 in2.tiff -o out.webp\n");
}
//------------------------------------------------------------------------------
static int ReadImage(const char filename[], WebPPicture* const pic) {
const uint8_t* data = NULL;
size_t data_size = 0;
WebPImageReader reader;
int ok;
#ifdef HAVE_WINCODEC_H
// Try to decode the file using WIC falling back to the other readers for
// e.g., WebP.
ok = ReadPictureWithWIC(filename, pic, 1, NULL);
if (ok) return 1;
#endif
if (!ImgIoUtilReadFile(filename, &data, &data_size)) return 0;
reader = WebPGuessImageReader(data, data_size);
ok = reader(data, data_size, pic, 1, NULL);
free((void*)data);
return ok;
}
static int SetLoopCount(int loop_count, WebPData* const webp_data) {
int ok = 1;
WebPMuxError err;
uint32_t features;
WebPMuxAnimParams new_params;
WebPMux* const mux = WebPMuxCreate(webp_data, 1);
if (mux == NULL) return 0;
err = WebPMuxGetFeatures(mux, &features);
ok = (err == WEBP_MUX_OK);
if (!ok || !(features & ANIMATION_FLAG)) goto End;
err = WebPMuxGetAnimationParams(mux, &new_params);
ok = (err == WEBP_MUX_OK);
if (ok) {
new_params.loop_count = loop_count;
err = WebPMuxSetAnimationParams(mux, &new_params);
ok = (err == WEBP_MUX_OK);
}
if (ok) {
WebPDataClear(webp_data);
err = WebPMuxAssemble(mux, webp_data);
ok = (err == WEBP_MUX_OK);
}
End:
WebPMuxDelete(mux);
if (!ok) {
fprintf(stderr, "Error during loop-count setting\n");
}
return ok;
}
//------------------------------------------------------------------------------
int main(int argc, char* argv[]) {
const char* output = NULL;
WebPAnimEncoder* enc = NULL;
int verbose = 0;
int pic_num = 0;
int duration = 100;
int timestamp_ms = 0;
int ok = 1;
int loop_count = 0;
int width = 0, height = 0;
WebPAnimEncoderOptions anim_config;
WebPConfig config;
WebPPicture pic;
WebPData webp_data;
int c;
int have_input = 0;
WebPDataInit(&webp_data);
if (!WebPAnimEncoderOptionsInit(&anim_config) ||
!WebPConfigInit(&config) ||
!WebPPictureInit(&pic)) {
fprintf(stderr, "Library version mismatch!\n");
return 1;
}
// 1st pass of option parsing
for (c = 1; ok && c < argc; ++c) {
if (argv[c][0] == '-') {
int parse_error = 0;
if (!strcmp(argv[c], "-o") && c + 1 < argc) {
argv[c] = NULL;
output = argv[++c];
} else if (!strcmp(argv[c], "-kmin") && c + 1 < argc) {
argv[c] = NULL;
anim_config.kmin = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-kmax") && c + 1 < argc) {
argv[c] = NULL;
anim_config.kmax = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-loop") && c + 1 < argc) {
argv[c] = NULL;
loop_count = ExUtilGetInt(argv[++c], 0, &parse_error);
if (loop_count < 0) {
fprintf(stderr, "Invalid non-positive loop-count (%d)\n", loop_count);
parse_error = 1;
}
} else if (!strcmp(argv[c], "-min_size")) {
anim_config.minimize_size = 1;
} else if (!strcmp(argv[c], "-mixed")) {
anim_config.allow_mixed = 1;
config.lossless = 0;
} else if (!strcmp(argv[c], "-v")) {
verbose = 1;
} else if (!strcmp(argv[c], "-h") || !strcmp(argv[c], "-help")) {
Help();
return 0;
} else {
continue;
}
ok = !parse_error;
if (!ok) goto End;
argv[c] = NULL; // mark option as 'parsed' during 1st pass
} else {
have_input |= 1;
}
}
if (!have_input) {
fprintf(stderr, "No input file(s) for generating animation!\n");
return 0;
}
// image-reading pass
pic_num = 0;
config.lossless = 1;
for (c = 1; ok && c < argc; ++c) {
if (argv[c] == NULL) continue;
if (argv[c][0] == '-') { // parse local options
int parse_error = 0;
if (!strcmp(argv[c], "-lossy")) {
if (!anim_config.allow_mixed) config.lossless = 0;
} else if (!strcmp(argv[c], "-lossless")) {
if (!anim_config.allow_mixed) config.lossless = 1;
} else if (!strcmp(argv[c], "-q") && c + 1 < argc) {
config.quality = ExUtilGetFloat(argv[++c], &parse_error);
} else if (!strcmp(argv[c], "-m") && c + 1 < argc) {
config.method = ExUtilGetInt(argv[++c], 0, &parse_error);
} else if (!strcmp(argv[c], "-d") && c + 1 < argc) {
duration = ExUtilGetInt(argv[++c], 0, &parse_error);
if (duration <= 0) {
fprintf(stderr, "Invalid negative duration (%d)\n", duration);
parse_error = 1;
}
} else {
parse_error = 1; // shouldn't be here.
fprintf(stderr, "Unknown option [%s]\n", argv[c]);
}
ok = !parse_error;
if (!ok) goto End;
continue;
}
if (ok) {
ok = WebPValidateConfig(&config);
if (!ok) {
fprintf(stderr, "Invalid configuration.\n");
goto End;
}
}
// read next input image
pic.use_argb = 1;
ok = ReadImage(argv[c], &pic);
if (!ok) goto End;
if (enc == NULL) {
width = pic.width;
height = pic.height;
enc = WebPAnimEncoderNew(width, height, &anim_config);
ok = (enc != NULL);
if (!ok) {
fprintf(stderr, "Could not create WebPAnimEncoder object.\n");
}
}
if (ok) {
ok = (width == pic.width && height == pic.height);
if (!ok) {
fprintf(stderr, "Frame #%d dimension mismatched! "
"Got %d x %d. Was expecting %d x %d.\n",
pic_num, pic.width, pic.height, width, height);
}
}
if (ok) {
ok = WebPAnimEncoderAdd(enc, &pic, timestamp_ms, &config);
if (!ok) {
fprintf(stderr, "Error while adding frame #%d\n", pic_num);
}
}
WebPPictureFree(&pic);
if (!ok) goto End;
if (verbose) {
fprintf(stderr, "Added frame #%3d at time %4d (file: %s)\n",
pic_num, timestamp_ms, argv[c]);
}
timestamp_ms += duration;
++pic_num;
}
// add a last fake frame to signal the last duration
ok = ok && WebPAnimEncoderAdd(enc, NULL, timestamp_ms, NULL);
ok = ok && WebPAnimEncoderAssemble(enc, &webp_data);
if (!ok) {
fprintf(stderr, "Error during final animation assembly.\n");
}
End:
// free resources
WebPAnimEncoderDelete(enc);
if (ok && loop_count > 0) { // Re-mux to add loop count.
ok = SetLoopCount(loop_count, &webp_data);
}
if (ok) {
if (output != NULL) {
ok = ImgIoUtilWriteFile(output, webp_data.bytes, webp_data.size);
if (ok) fprintf(stderr, "output file: %s ", output);
} else {
fprintf(stderr, "[no output file specified] ");
}
}
if (ok) {
fprintf(stderr, "[%d frames, %u bytes].\n",
pic_num, (unsigned int)webp_data.size);
}
WebPDataClear(&webp_data);
return ok ? 0 : 1;
}

View File

@ -19,13 +19,11 @@
#ifdef WEBP_HAVE_JPEG
#include <jpeglib.h>
#include <jerror.h>
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
// -----------------------------------------------------------------------------
@ -210,68 +208,13 @@ static void my_error_exit(j_common_ptr dinfo) {
longjmp(myerr->setjmp_buffer, 1);
}
typedef struct {
struct jpeg_source_mgr pub;
const uint8_t* data;
size_t data_size;
} JPEGReadContext;
static void ContextInit(j_decompress_ptr cinfo) {
JPEGReadContext* const ctx = (JPEGReadContext*)cinfo->src;
ctx->pub.next_input_byte = ctx->data;
ctx->pub.bytes_in_buffer = ctx->data_size;
}
static boolean ContextFill(j_decompress_ptr cinfo) {
// we shouldn't get here.
ERREXIT(cinfo, JERR_FILE_READ);
return FALSE;
}
static void ContextSkip(j_decompress_ptr cinfo, long jump_size) {
JPEGReadContext* const ctx = (JPEGReadContext*)cinfo->src;
size_t jump = (size_t)jump_size;
if (jump > ctx->pub.bytes_in_buffer) { // Don't overflow the buffer.
jump = ctx->pub.bytes_in_buffer;
}
ctx->pub.bytes_in_buffer -= jump;
ctx->pub.next_input_byte += jump;
}
static void ContextTerm(j_decompress_ptr cinfo) {
(void)cinfo;
}
static void ContextSetup(volatile struct jpeg_decompress_struct* const cinfo,
JPEGReadContext* const ctx) {
cinfo->src = (struct jpeg_source_mgr*)ctx;
ctx->pub.init_source = ContextInit;
ctx->pub.fill_input_buffer = ContextFill;
ctx->pub.skip_input_data = ContextSkip;
ctx->pub.resync_to_restart = jpeg_resync_to_restart;
ctx->pub.term_source = ContextTerm;
ctx->pub.bytes_in_buffer = 0;
ctx->pub.next_input_byte = NULL;
}
int ReadJPEG(const uint8_t* const data, size_t data_size,
WebPPicture* const pic, int keep_alpha,
Metadata* const metadata) {
volatile int ok = 0;
int width, height;
int64_t stride;
int ReadJPEG(FILE* in_file, WebPPicture* const pic, Metadata* const metadata) {
int ok = 0;
int stride, width, height;
volatile struct jpeg_decompress_struct dinfo;
struct my_error_mgr jerr;
uint8_t* volatile rgb = NULL;
JSAMPROW buffer[1];
JPEGReadContext ctx;
if (data == NULL || data_size == 0 || pic == NULL) return 0;
(void)keep_alpha;
memset(&ctx, 0, sizeof(ctx));
ctx.data = data;
ctx.data_size = data_size;
memset((j_decompress_ptr)&dinfo, 0, sizeof(dinfo)); // for setjmp sanity
dinfo.err = jpeg_std_error(&jerr.pub);
@ -285,7 +228,7 @@ int ReadJPEG(const uint8_t* const data, size_t data_size,
}
jpeg_create_decompress((j_decompress_ptr)&dinfo);
ContextSetup(&dinfo, &ctx);
jpeg_stdio_src((j_decompress_ptr)&dinfo, in_file);
if (metadata != NULL) SaveMetadataMarkers((j_decompress_ptr)&dinfo);
jpeg_read_header((j_decompress_ptr)&dinfo, TRUE);
@ -300,14 +243,9 @@ int ReadJPEG(const uint8_t* const data, size_t data_size,
width = dinfo.output_width;
height = dinfo.output_height;
stride = (int64_t)dinfo.output_width * dinfo.output_components * sizeof(*rgb);
stride = dinfo.output_width * dinfo.output_components * sizeof(*rgb);
if (stride != (int)stride ||
!ImgIoUtilCheckSizeArgumentsOverflow(stride, height)) {
goto End;
}
rgb = (uint8_t*)malloc((size_t)stride * height);
rgb = (uint8_t*)malloc(stride * height);
if (rgb == NULL) {
goto End;
}
@ -334,7 +272,8 @@ int ReadJPEG(const uint8_t* const data, size_t data_size,
// WebP conversion.
pic->width = width;
pic->height = height;
ok = WebPPictureImportRGB(pic, rgb, (int)stride);
pic->use_argb = 1; // store raw RGB samples
ok = WebPPictureImportRGB(pic, rgb, stride);
if (!ok) goto Error;
End:
@ -342,13 +281,10 @@ int ReadJPEG(const uint8_t* const data, size_t data_size,
return ok;
}
#else // !WEBP_HAVE_JPEG
int ReadJPEG(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic, int keep_alpha,
int ReadJPEG(FILE* in_file, struct WebPPicture* const pic,
struct Metadata* const metadata) {
(void)data;
(void)data_size;
(void)in_file;
(void)pic;
(void)keep_alpha;
(void)metadata;
fprintf(stderr, "JPEG support not compiled. Please install the libjpeg "
"development package before building.\n");

View File

@ -9,9 +9,10 @@
//
// JPEG decode.
#ifndef WEBP_IMAGEIO_JPEGDEC_H_
#define WEBP_IMAGEIO_JPEGDEC_H_
#ifndef WEBP_EXAMPLES_JPEGDEC_H_
#define WEBP_EXAMPLES_JPEGDEC_H_
#include <stdio.h>
#include "webp/types.h"
#ifdef __cplusplus
@ -21,17 +22,14 @@ extern "C" {
struct Metadata;
struct WebPPicture;
// Reads a JPEG from 'data', returning the decoded output in 'pic'.
// The output is RGB or YUV depending on pic->use_argb value.
// Reads a JPEG from 'in_file', returning the decoded output in 'pic'.
// The output is RGB.
// Returns true on success.
// 'keep_alpha' has no effect, but is kept for coherence with other signatures
// for image readers.
int ReadJPEG(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic, int keep_alpha,
int ReadJPEG(FILE* in_file, struct WebPPicture* const pic,
struct Metadata* const metadata);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_JPEGDEC_H_
#endif // WEBP_EXAMPLES_JPEGDEC_H_

View File

@ -10,8 +10,8 @@
// Metadata types and functions.
//
#ifndef WEBP_IMAGEIO_METADATA_H_
#define WEBP_IMAGEIO_METADATA_H_
#ifndef WEBP_EXAMPLES_METADATA_H_
#define WEBP_EXAMPLES_METADATA_H_
#include "webp/types.h"
@ -44,4 +44,4 @@ int MetadataCopy(const char* metadata, size_t metadata_len,
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_METADATA_H_
#endif // WEBP_EXAMPLES_METADATA_H_

View File

@ -24,7 +24,6 @@
#include <string.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
static void PNGAPI error_function(png_structp png, png_const_charp error) {
@ -132,8 +131,8 @@ static int ExtractMetadataFromPNG(png_structp png,
for (p = 0; p < 2; ++p) {
png_infop const info = (p == 0) ? head_info : end_info;
png_textp text = NULL;
const png_uint_32 num = png_get_text(png, info, &text, NULL);
png_uint_32 i;
const int num = png_get_text(png, info, &text, NULL);
int i;
// Look for EXIF / XMP metadata.
for (i = 0; i < num; ++i, ++text) {
int j;
@ -189,44 +188,24 @@ static int ExtractMetadataFromPNG(png_structp png,
return 1;
}
typedef struct {
const uint8_t* data;
size_t data_size;
png_size_t offset;
} PNGReadContext;
static void ReadFunc(png_structp png_ptr, png_bytep data, png_size_t length) {
PNGReadContext* const ctx = (PNGReadContext*)png_get_io_ptr(png_ptr);
if (ctx->data_size - ctx->offset < length) {
png_error(png_ptr, "ReadFunc: invalid read length (overflow)!");
}
memcpy(data, ctx->data + ctx->offset, length);
ctx->offset += length;
}
int ReadPNG(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata) {
volatile png_structp png = NULL;
int ReadPNG(FILE* in_file, WebPPicture* const pic, int keep_alpha,
Metadata* const metadata) {
volatile png_structp png;
volatile png_infop info = NULL;
volatile png_infop end_info = NULL;
PNGReadContext context = { NULL, 0, 0 };
int color_type, bit_depth, interlaced;
int has_alpha;
int num_passes;
int p;
volatile int ok = 0;
int ok = 0;
png_uint_32 width, height, y;
int64_t stride;
int stride;
uint8_t* volatile rgb = NULL;
if (data == NULL || data_size == 0 || pic == NULL) return 0;
context.data = data;
context.data_size = data_size;
png = png_create_read_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
if (png == NULL) goto End;
if (png == NULL) {
goto End;
}
png_set_error_fn(png, 0, error_function, NULL);
if (setjmp(png_jmpbuf(png))) {
@ -240,7 +219,7 @@ int ReadPNG(const uint8_t* const data, size_t data_size,
end_info = png_create_info_struct(png);
if (end_info == NULL) goto Error;
png_set_read_fn(png, &context, ReadFunc);
png_init_io(png, in_file);
png_read_info(png, info);
if (!png_get_IHDR(png, info,
&width, &height, &bit_depth, &color_type, &interlaced,
@ -272,20 +251,13 @@ int ReadPNG(const uint8_t* const data, size_t data_size,
num_passes = png_set_interlace_handling(png);
png_read_update_info(png, info);
stride = (int64_t)(has_alpha ? 4 : 3) * width * sizeof(*rgb);
if (stride != (int)stride ||
!ImgIoUtilCheckSizeArgumentsOverflow(stride, height)) {
goto Error;
}
rgb = (uint8_t*)malloc((size_t)stride * height);
stride = (has_alpha ? 4 : 3) * width * sizeof(*rgb);
rgb = (uint8_t*)malloc(stride * height);
if (rgb == NULL) goto Error;
for (p = 0; p < num_passes; ++p) {
png_bytep row = rgb;
for (y = 0; y < height; ++y) {
png_bytep row = (png_bytep)(rgb + y * stride);
png_read_rows(png, &row, NULL, 1);
row += stride;
}
}
png_read_end(png, end_info);
@ -296,10 +268,11 @@ int ReadPNG(const uint8_t* const data, size_t data_size,
goto Error;
}
pic->width = (int)width;
pic->height = (int)height;
ok = has_alpha ? WebPPictureImportRGBA(pic, rgb, (int)stride)
: WebPPictureImportRGB(pic, rgb, (int)stride);
pic->width = width;
pic->height = height;
pic->use_argb = 1;
ok = has_alpha ? WebPPictureImportRGBA(pic, rgb, stride)
: WebPPictureImportRGB(pic, rgb, stride);
if (!ok) {
goto Error;
@ -314,11 +287,9 @@ int ReadPNG(const uint8_t* const data, size_t data_size,
return ok;
}
#else // !WEBP_HAVE_PNG
int ReadPNG(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata) {
(void)data;
(void)data_size;
int ReadPNG(FILE* in_file, struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata) {
(void)in_file;
(void)pic;
(void)keep_alpha;
(void)metadata;

View File

@ -9,10 +9,10 @@
//
// PNG decode.
#ifndef WEBP_IMAGEIO_PNGDEC_H_
#define WEBP_IMAGEIO_PNGDEC_H_
#ifndef WEBP_EXAMPLES_PNGDEC_H_
#define WEBP_EXAMPLES_PNGDEC_H_
#include "webp/types.h"
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
@ -21,17 +21,15 @@ extern "C" {
struct Metadata;
struct WebPPicture;
// Reads a PNG from 'data', returning the decoded output in 'pic'.
// Output is RGBA or YUVA, depending on pic->use_argb value.
// Reads a PNG from 'in_file', returning the decoded output in 'pic'.
// If 'keep_alpha' is true and the PNG has an alpha channel, the output is RGBA
// or YUVA. Otherwise, alpha channel is dropped and output is RGB or YUV.
// otherwise it will be RGB.
// Returns true on success.
int ReadPNG(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata);
int ReadPNG(FILE* in_file, struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_PNGDEC_H_
#endif // WEBP_EXAMPLES_PNGDEC_H_

View File

@ -16,13 +16,11 @@
#endif
#include <stdio.h>
#include <string.h>
#ifdef WEBP_HAVE_TIFF
#include <tiffio.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
static const struct {
@ -65,75 +63,17 @@ static int ExtractMetadataFromTIFF(TIFF* const tif, Metadata* const metadata) {
return 1;
}
// Ad-hoc structure to supply read-from-memory functionalities.
typedef struct {
const uint8_t* data;
toff_t size;
toff_t pos;
} MyData;
static int MyClose(thandle_t opaque) {
(void)opaque;
return 0;
}
static toff_t MySize(thandle_t opaque) {
const MyData* const my_data = (MyData*)opaque;
return my_data->size;
}
static toff_t MySeek(thandle_t opaque, toff_t offset, int whence) {
MyData* const my_data = (MyData*)opaque;
offset += (whence == SEEK_CUR) ? my_data->pos
: (whence == SEEK_SET) ? 0
: my_data->size;
if (offset > my_data->size) return (toff_t)-1;
my_data->pos = offset;
return offset;
}
static int MyMapFile(thandle_t opaque, void** base, toff_t* size) {
(void)opaque;
(void)base;
(void)size;
return 0;
}
static void MyUnmapFile(thandle_t opaque, void* base, toff_t size) {
(void)opaque;
(void)base;
(void)size;
}
static tsize_t MyRead(thandle_t opaque, void* dst, tsize_t size) {
MyData* const my_data = (MyData*)opaque;
if (my_data->pos + size > my_data->size) {
size = my_data->size - my_data->pos;
}
if (size > 0) {
memcpy(dst, my_data->data + my_data->pos, size);
my_data->pos += size;
}
return size;
}
int ReadTIFF(const uint8_t* const data, size_t data_size,
int ReadTIFF(const char* const filename,
WebPPicture* const pic, int keep_alpha,
Metadata* const metadata) {
MyData my_data = { data, (toff_t)data_size, 0 };
TIFF* tif;
TIFF* const tif = TIFFOpen(filename, "r");
uint32 width, height;
uint32* raster;
int64_t alloc_size;
int ok = 0;
tdir_t dircount;
if (data == NULL || data_size == 0 || pic == NULL) return 0;
tif = TIFFClientOpen("Memory", "r", &my_data,
MyRead, MyRead, MySeek, MyClose,
MySize, MyMapFile, MyUnmapFile);
if (tif == NULL) {
fprintf(stderr, "Error! Cannot parse TIFF file\n");
fprintf(stderr, "Error! Cannot open TIFF file '%s'\n", filename);
return 0;
}
@ -147,18 +87,9 @@ int ReadTIFF(const uint8_t* const data, size_t data_size,
if (!(TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width) &&
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height))) {
fprintf(stderr, "Error! Cannot retrieve TIFF image dimensions.\n");
goto End;
return 0;
}
if (!ImgIoUtilCheckSizeArgumentsOverflow((uint64_t)width * height,
sizeof(*raster))) {
goto End;
}
// _Tiffmalloc uses a signed type for size.
alloc_size = (int64_t)((uint64_t)width * height * sizeof(*raster));
if (alloc_size < 0 || alloc_size != (tsize_t)alloc_size) goto End;
raster = (uint32*)_TIFFmalloc((tsize_t)alloc_size);
raster = (uint32*)_TIFFmalloc(width * height * sizeof(*raster));
if (raster != NULL) {
if (TIFFReadRGBAImageOriented(tif, width, height, raster,
ORIENTATION_TOPLEFT, 1)) {
@ -169,6 +100,7 @@ int ReadTIFF(const uint8_t* const data, size_t data_size,
#ifdef WORDS_BIGENDIAN
TIFFSwabArrayOfLong(raster, width * height);
#endif
pic->use_argb = 1;
ok = keep_alpha
? WebPPictureImportRGBA(pic, (const uint8_t*)raster, stride)
: WebPPictureImportRGBX(pic, (const uint8_t*)raster, stride);
@ -188,16 +120,15 @@ int ReadTIFF(const uint8_t* const data, size_t data_size,
}
}
}
End:
TIFFClose(tif);
return ok;
}
#else // !WEBP_HAVE_TIFF
int ReadTIFF(const uint8_t* const data, size_t data_size,
int ReadTIFF(const char* const filename,
struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata) {
(void)data;
(void)data_size;
(void)filename;
(void)pic;
(void)keep_alpha;
(void)metadata;

View File

@ -9,10 +9,8 @@
//
// TIFF decode.
#ifndef WEBP_IMAGEIO_TIFFDEC_H_
#define WEBP_IMAGEIO_TIFFDEC_H_
#include "webp/types.h"
#ifndef WEBP_EXAMPLES_TIFFDEC_H_
#define WEBP_EXAMPLES_TIFFDEC_H_
#ifdef __cplusplus
extern "C" {
@ -21,12 +19,11 @@ extern "C" {
struct Metadata;
struct WebPPicture;
// Reads a TIFF from 'data', returning the decoded output in 'pic'.
// Output is RGBA or YUVA, depending on pic->use_argb value.
// Reads a TIFF from 'filename', returning the decoded output in 'pic'.
// If 'keep_alpha' is true and the TIFF has an alpha channel, the output is RGBA
// or YUVA. Otherwise, alpha channel is dropped and output is RGB or YUV.
// otherwise it will be RGB.
// Returns true on success.
int ReadTIFF(const uint8_t* const data, size_t data_size,
int ReadTIFF(const char* const filename,
struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata);
@ -34,4 +31,4 @@ int ReadTIFF(const uint8_t* const data, size_t data_size,
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_TIFFDEC_H_
#endif // WEBP_EXAMPLES_TIFFDEC_H_

View File

@ -14,10 +14,6 @@
#include "webp/config.h"
#endif
#if defined(__unix__) || defined(__CYGWIN__)
#define _POSIX_C_SOURCE 200112L // for setenv
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -40,10 +36,9 @@
#include "webp/decode.h"
#include "webp/demux.h"
#include "../examples/example_util.h"
#include "../imageio/imageio_util.h"
#include "./example_util.h"
#if defined(_MSC_VER) && _MSC_VER < 1900
#ifdef _MSC_VER
#define snprintf _snprintf
#endif
@ -54,7 +49,6 @@ static struct {
int done;
int decoding_error;
int print_info;
int only_deltas;
int use_color_profile;
int canvas_width, canvas_height;
@ -69,7 +63,6 @@ static struct {
WebPIterator curr_frame;
WebPIterator prev_frame;
WebPChunkIterator iccp;
int viewport_width, viewport_height;
} kParams;
static void ClearPreviousPic(void) {
@ -249,27 +242,18 @@ static void HandleKey(unsigned char key, int pos_x, int pos_y) {
}
} else if (key == 'i') {
kParams.print_info = 1 - kParams.print_info;
// TODO(skal): handle refresh of animation's last-frame too. It's quite
// more involved though (need to save the previous frame).
if (!kParams.has_animation) ClearPreviousFrame();
glutPostRedisplay();
} else if (key == 'd') {
kParams.only_deltas = 1 - kParams.only_deltas;
glutPostRedisplay();
}
}
static void HandleReshape(int width, int height) {
// TODO(skal): should we preserve aspect ratio?
// Also: handle larger-than-screen pictures correctly.
// TODO(skal): proper handling of resize, esp. for large pictures.
// + key control of the zoom.
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
kParams.viewport_width = width;
kParams.viewport_height = height;
if (!kParams.has_animation) ClearPreviousFrame();
}
static void PrintString(const char* const text) {
@ -311,18 +295,17 @@ static void HandleDisplay(void) {
GLfloat xoff, yoff;
if (pic == NULL) return;
glPushMatrix();
glPixelZoom((GLfloat)(+1. / kParams.canvas_width * kParams.viewport_width),
(GLfloat)(-1. / kParams.canvas_height * kParams.viewport_height));
glPixelZoom(1, -1);
xoff = (GLfloat)(2. * curr->x_offset / kParams.canvas_width);
yoff = (GLfloat)(2. * curr->y_offset / kParams.canvas_height);
glRasterPos2f(-1.f + xoff, 1.f - yoff);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_UNPACK_ROW_LENGTH, pic->u.RGBA.stride / 4);
if (kParams.only_deltas) {
DrawCheckerBoard();
} else if (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ||
if (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ||
curr->blend_method == WEBP_MUX_NO_BLEND) {
// TODO(later): these offsets and those above should factor in window size.
// they will be incorrect if the window is resized.
// glScissor() takes window coordinates (0,0 at bottom left).
int window_x, window_y;
int frame_w, frame_h;
@ -342,10 +325,6 @@ static void HandleDisplay(void) {
}
glEnable(GL_SCISSOR_TEST);
// Only update the requested area, not the whole canvas.
window_x = window_x * kParams.viewport_width / kParams.canvas_width;
window_y = window_y * kParams.viewport_height / kParams.canvas_height;
frame_w = frame_w * kParams.viewport_width / kParams.canvas_width;
frame_h = frame_h * kParams.viewport_height / kParams.canvas_height;
glScissor(window_x, window_y, frame_w, frame_h);
glClear(GL_COLOR_BUFFER_BIT); // use clear color
@ -388,7 +367,6 @@ static void StartDisplay(void) {
glutInitWindowSize(width, height);
glutCreateWindow("WebP viewer");
glutDisplayFunc(HandleDisplay);
glutReshapeFunc(HandleReshape);
glutIdleFunc(NULL);
glutKeyboardFunc(HandleKey);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
@ -397,6 +375,7 @@ static void StartDisplay(void) {
GetColorf(kParams.bg_color, 8),
GetColorf(kParams.bg_color, 16),
GetColorf(kParams.bg_color, 24));
HandleReshape(width, height);
glClear(GL_COLOR_BUFFER_BIT);
DrawCheckerBoard();
}
@ -408,20 +387,21 @@ static void Help(void) {
printf("Usage: vwebp in_file [options]\n\n"
"Decodes the WebP image file and visualize it using OpenGL\n"
"Options are:\n"
" -version ..... print version number and exit\n"
" -version .... print version number and exit\n"
" -noicc ....... don't use the icc profile if present\n"
" -nofancy ..... don't use the fancy YUV420 upscaler\n"
" -nofilter .... disable in-loop filtering\n"
" -dither <int> dithering strength (0..100), default=50\n"
#if WEBP_DECODER_ABI_VERSION > 0x0204
" -noalphadither disable alpha plane dithering\n"
#endif
" -mt .......... use multi-threading\n"
" -info ........ print info\n"
" -h ........... this help message\n"
" -h ....... this help message\n"
"\n"
"Keyboard shortcuts:\n"
" 'c' ................ toggle use of color profile\n"
" 'i' ................ overlay file information\n"
" 'd' ................ disable blending & disposal (debug)\n"
" 'q' / 'Q' / ESC .... quit\n"
);
}
@ -436,7 +416,9 @@ int main(int argc, char *argv[]) {
return -1;
}
config->options.dithering_strength = 50;
#if WEBP_DECODER_ABI_VERSION > 0x0204
config->options.alpha_dithering_strength = 100;
#endif
kParams.use_color_profile = 1;
for (c = 1; c < argc; ++c) {
@ -450,8 +432,10 @@ int main(int argc, char *argv[]) {
config->options.no_fancy_upsampling = 1;
} else if (!strcmp(argv[c], "-nofilter")) {
config->options.bypass_filtering = 1;
#if WEBP_DECODER_ABI_VERSION > 0x0204
} else if (!strcmp(argv[c], "-noalphadither")) {
config->options.alpha_dithering_strength = 0;
#endif
} else if (!strcmp(argv[c], "-dither") && c + 1 < argc) {
config->options.dithering_strength =
ExUtilGetInt(argv[++c], 0, &parse_error);
@ -490,8 +474,8 @@ int main(int argc, char *argv[]) {
return 0;
}
if (!ImgIoUtilReadFile(kParams.file_name,
&kParams.data.bytes, &kParams.data.size)) {
if (!ExUtilReadFile(kParams.file_name,
&kParams.data.bytes, &kParams.data.size)) {
goto Error;
}
@ -506,6 +490,10 @@ int main(int argc, char *argv[]) {
goto Error;
}
if (WebPDemuxGetI(kParams.dmux, WEBP_FF_FORMAT_FLAGS) & FRAGMENTS_FLAG) {
fprintf(stderr, "Image fragments are not supported for now!\n");
goto Error;
}
kParams.canvas_width = WebPDemuxGetI(kParams.dmux, WEBP_FF_CANVAS_WIDTH);
kParams.canvas_height = WebPDemuxGetI(kParams.dmux, WEBP_FF_CANVAS_HEIGHT);
if (kParams.print_info) {
@ -544,12 +532,6 @@ int main(int argc, char *argv[]) {
WebPDemuxGetFrame(kParams.dmux, 0, curr);
if (kParams.loop_count) ++kParams.loop_count;
#if defined(__unix__) || defined(__CYGWIN__)
// Work around GLUT compositor bug.
// https://bugs.launchpad.net/ubuntu/+source/freeglut/+bug/369891
setenv("XLIB_SKIP_ARGB_VISUALS", "1", 1);
#endif
// Start display (and timer)
glutInit(&argc, argv);
#ifdef FREEGLUT

67
examples/webpdec.c Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP decode.
#include "./webpdec.h"
#include <stdio.h>
#include <stdlib.h>
#include "webp/decode.h"
#include "webp/encode.h"
#include "./example_util.h"
#include "./metadata.h"
int ReadWebP(const char* const in_file, WebPPicture* const pic,
int keep_alpha, Metadata* const metadata) {
int ok = 0;
size_t data_size = 0;
const uint8_t* data = NULL;
VP8StatusCode status = VP8_STATUS_OK;
WebPDecoderConfig config;
WebPDecBuffer* const output_buffer = &config.output;
WebPBitstreamFeatures* const bitstream = &config.input;
// TODO(jzern): add Exif/XMP/ICC extraction.
if (metadata != NULL) {
fprintf(stderr, "Warning: metadata extraction from WebP is unsupported.\n");
}
if (!WebPInitDecoderConfig(&config)) {
fprintf(stderr, "Library version mismatch!\n");
return 0;
}
if (ExUtilLoadWebP(in_file, &data, &data_size, bitstream)) {
const int has_alpha = keep_alpha && bitstream->has_alpha;
output_buffer->colorspace = has_alpha ? MODE_RGBA : MODE_RGB;
status = ExUtilDecodeWebP(data, data_size, 0, &config);
if (status == VP8_STATUS_OK) {
const uint8_t* const rgba = output_buffer->u.RGBA.rgba;
const int stride = output_buffer->u.RGBA.stride;
pic->width = output_buffer->width;
pic->height = output_buffer->height;
pic->use_argb = 1;
ok = has_alpha ? WebPPictureImportRGBA(pic, rgba, stride)
: WebPPictureImportRGB(pic, rgba, stride);
}
}
if (status != VP8_STATUS_OK) {
ExUtilPrintWebPError(in_file, status);
}
free((void*)data);
WebPFreeDecBuffer(output_buffer);
return ok;
}
// -----------------------------------------------------------------------------

33
examples/webpdec.h Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP decode.
#ifndef WEBP_EXAMPLES_WEBPDEC_H_
#define WEBP_EXAMPLES_WEBPDEC_H_
#ifdef __cplusplus
extern "C" {
#endif
struct Metadata;
struct WebPPicture;
// Reads a WebP from 'in_file', returning the decoded output in 'pic'.
// If 'keep_alpha' is true and the WebP has an alpha channel, the output is
// RGBA otherwise it will be RGB.
// Returns true on success.
int ReadWebP(const char* const in_file, struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_EXAMPLES_WEBPDEC_H_

View File

@ -28,6 +28,7 @@
webpmux -set xmp image_metadata.xmp in.webp -o out_xmp_container.webp
Extract relevant data from WebP container file:
webpmux -get frgm n in.webp -o out_fragment.webp
webpmux -get frame n in.webp -o out_frame.webp
webpmux -get icc in.webp -o image_profile.icc
webpmux -get exif in.webp -o image_metadata.exif
@ -38,11 +39,6 @@
webpmux -strip exif in.webp -o out.webp
webpmux -strip xmp in.webp -o out.webp
Change duration of frame intervals:
webpmux -duration 150 in.webp -o out.webp
webpmux -duration 33,2 in.webp -o out.webp
webpmux -duration 200,10,0 -duration 150,6,50 in.webp -o out.webp
Misc:
webpmux -info in.webp
webpmux [ -h | -help ]
@ -59,8 +55,7 @@
#include <string.h>
#include "webp/decode.h"
#include "webp/mux.h"
#include "../examples/example_util.h"
#include "../imageio/imageio_util.h"
#include "./example_util.h"
//------------------------------------------------------------------------------
// Config object to parse command-line arguments.
@ -71,8 +66,7 @@ typedef enum {
ACTION_SET,
ACTION_STRIP,
ACTION_INFO,
ACTION_HELP,
ACTION_DURATION
ACTION_HELP
} ActionType;
typedef enum {
@ -94,17 +88,17 @@ typedef enum {
FEATURE_XMP,
FEATURE_ICCP,
FEATURE_ANMF,
FEATURE_DURATION,
FEATURE_FRGM,
LAST_FEATURE
} FeatureType;
static const char* const kFourccList[LAST_FEATURE] = {
NULL, "EXIF", "XMP ", "ICCP", "ANMF"
NULL, "EXIF", "XMP ", "ICCP", "ANMF", "FRGM"
};
static const char* const kDescriptions[LAST_FEATURE] = {
NULL, "EXIF metadata", "XMP metadata", "ICC profile",
"Animation frame"
"Animation frame", "Image fragment"
};
typedef struct {
@ -188,32 +182,39 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
printf("Canvas size: %d x %d\n", width, height);
err = WebPMuxGetFeatures(mux, &flag);
#ifndef WEBP_EXPERIMENTAL_FEATURES
if (flag & FRAGMENTS_FLAG) err = WEBP_MUX_INVALID_ARGUMENT;
#endif
RETURN_IF_ERROR("Failed to retrieve features\n");
if (flag == 0) {
printf("No features present.\n");
fprintf(stderr, "No features present.\n");
return err;
}
// Print the features present.
printf("Features present:");
if (flag & ANIMATION_FLAG) printf(" animation");
if (flag & FRAGMENTS_FLAG) printf(" image fragments");
if (flag & ICCP_FLAG) printf(" ICC profile");
if (flag & EXIF_FLAG) printf(" EXIF metadata");
if (flag & XMP_FLAG) printf(" XMP metadata");
if (flag & ALPHA_FLAG) printf(" transparency");
printf("\n");
if (flag & ANIMATION_FLAG) {
const WebPChunkId id = WEBP_CHUNK_ANMF;
const char* const type_str = "frame";
if ((flag & ANIMATION_FLAG) || (flag & FRAGMENTS_FLAG)) {
const int is_anim = !!(flag & ANIMATION_FLAG);
const WebPChunkId id = is_anim ? WEBP_CHUNK_ANMF : WEBP_CHUNK_FRGM;
const char* const type_str = is_anim ? "frame" : "fragment";
int nFrames;
WebPMuxAnimParams params;
err = WebPMuxGetAnimationParams(mux, &params);
assert(err == WEBP_MUX_OK);
printf("Background color : 0x%.8X Loop Count : %d\n",
params.bgcolor, params.loop_count);
if (is_anim) {
WebPMuxAnimParams params;
err = WebPMuxGetAnimationParams(mux, &params);
assert(err == WEBP_MUX_OK);
printf("Background color : 0x%.8X Loop Count : %d\n",
params.bgcolor, params.loop_count);
}
err = WebPMuxNumChunks(mux, id, &nFrames);
assert(err == WEBP_MUX_OK);
@ -222,8 +223,8 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
if (nFrames > 0) {
int i;
printf("No.: width height alpha x_offset y_offset ");
printf("duration dispose blend ");
printf("image_size compression\n");
if (is_anim) printf("duration dispose blend ");
printf("image_size\n");
for (i = 1; i <= nFrames; i++) {
WebPMuxFrameInfo frame;
err = WebPMuxGetFrame(mux, i, &frame);
@ -236,7 +237,7 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
printf("%3d: %5d %5d %5s %8d %8d ", i, features.width,
features.height, features.has_alpha ? "yes" : "no",
frame.x_offset, frame.y_offset);
{
if (is_anim) {
const char* const dispose =
(frame.dispose_method == WEBP_MUX_DISPOSE_NONE) ? "none"
: "background";
@ -244,10 +245,7 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
(frame.blend_method == WEBP_MUX_BLEND) ? "yes" : "no";
printf("%8d %10s %5s ", frame.duration, dispose, blend);
}
printf("%10d %11s\n", (int)frame.bitstream.size,
(features.format == 1) ? "lossy" :
(features.format == 2) ? "lossless" :
"undefined");
printf("%10d\n", (int)frame.bitstream.size);
}
WebPDataClear(&frame.bitstream);
RETURN_IF_ERROR3("Failed to retrieve %s#%d\n", type_str, i);
@ -276,7 +274,7 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
printf("Size of the XMP metadata: %d\n", (int)xmp.size);
}
if ((flag & ALPHA_FLAG) && !(flag & ANIMATION_FLAG)) {
if ((flag & ALPHA_FLAG) && !(flag & (ANIMATION_FLAG | FRAGMENTS_FLAG))) {
WebPMuxFrameInfo image;
err = WebPMuxGetFrame(mux, 1, &image);
if (err == WEBP_MUX_OK) {
@ -292,9 +290,10 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
static void PrintHelp(void) {
printf("Usage: webpmux -get GET_OPTIONS INPUT -o OUTPUT\n");
printf(" webpmux -set SET_OPTIONS INPUT -o OUTPUT\n");
printf(" webpmux -duration DURATION_OPTIONS [-duration ...]\n");
printf(" INPUT -o OUTPUT\n");
printf(" webpmux -strip STRIP_OPTIONS INPUT -o OUTPUT\n");
#ifdef WEBP_EXPERIMENTAL_FEATURES
printf(" webpmux -frgm FRAGMENT_OPTIONS [-frgm...] -o OUTPUT\n");
#endif
printf(" webpmux -frame FRAME_OPTIONS [-frame...] [-loop LOOP_COUNT]"
"\n");
printf(" [-bgcolor BACKGROUND_COLOR] -o OUTPUT\n");
@ -308,6 +307,9 @@ static void PrintHelp(void) {
printf(" icc get ICC profile\n");
printf(" exif get EXIF metadata\n");
printf(" xmp get XMP metadata\n");
#ifdef WEBP_EXPERIMENTAL_FEATURES
printf(" frgm n get nth fragment\n");
#endif
printf(" frame n get nth frame\n");
printf("\n");
@ -320,18 +322,6 @@ static void PrintHelp(void) {
printf(" 'file.exif' contains the EXIF metadata to be set\n");
printf(" 'file.xmp' contains the XMP metadata to be set\n");
printf("\n");
printf("DURATION_OPTIONS:\n");
printf(" Set duration of selected frames:\n");
printf(" duration set duration for each frames\n");
printf(" duration,frame set duration of a particular frame\n");
printf(" duration,start,end set duration of frames in the\n");
printf(" interval [start,end])\n");
printf(" where: 'duration' is the duration in milliseconds\n");
printf(" 'start' is the start frame index\n");
printf(" 'end' is the inclusive end frame index\n");
printf(" The special 'end' value '0' means: last frame.\n");
printf("\n");
printf("STRIP_OPTIONS:\n");
printf(" Strip color profile/metadata:\n");
@ -339,6 +329,16 @@ static void PrintHelp(void) {
printf(" exif strip EXIF metadata\n");
printf(" xmp strip XMP metadata\n");
#ifdef WEBP_EXPERIMENTAL_FEATURES
printf("\n");
printf("FRAGMENT_OPTIONS(i):\n");
printf(" Create fragmented image:\n");
printf(" file_i +xi+yi\n");
printf(" where: 'file_i' is the i'th fragment (WebP format),\n");
printf(" 'xi','yi' specify the image offset for this fragment"
"\n");
#endif
printf("\n");
printf("FRAME_OPTIONS(i):\n");
printf(" Create animation:\n");
@ -383,7 +383,7 @@ static int ReadFileToWebPData(const char* const filename,
WebPData* const webp_data) {
const uint8_t* data;
size_t size;
if (!ImgIoUtilReadFile(filename, &data, &size)) return 0;
if (!ExUtilReadFile(filename, &data, &size)) return 0;
webp_data->bytes = data;
webp_data->size = size;
return 1;
@ -403,7 +403,7 @@ static int CreateMux(const char* const filename, WebPMux** mux) {
static int WriteData(const char* filename, const WebPData* const webpdata) {
int ok = 0;
FILE* fout = strcmp(filename, "-") ? fopen(filename, "wb")
: ImgIoUtilSetBinaryMode(stdout);
: ExUtilSetBinaryMode(stdout);
if (fout == NULL) {
fprintf(stderr, "Error opening output WebP file %s!\n", filename);
return 0;
@ -432,45 +432,6 @@ static int WriteWebP(WebPMux* const mux, const char* filename) {
return ok;
}
static WebPMux* DuplicateMuxHeader(const WebPMux* const mux) {
WebPMux* new_mux = WebPMuxNew();
WebPMuxAnimParams p;
WebPMuxError err;
int i;
int ok = 1;
if (new_mux == NULL) return NULL;
err = WebPMuxGetAnimationParams(mux, &p);
if (err == WEBP_MUX_OK) {
err = WebPMuxSetAnimationParams(new_mux, &p);
if (err != WEBP_MUX_OK) {
ERROR_GOTO2("Error (%s) handling animation params.\n",
ErrorString(err), End);
}
} else {
/* it might not be an animation. Just keep moving. */
}
for (i = 1; i <= 3; ++i) {
WebPData metadata;
err = WebPMuxGetChunk(mux, kFourccList[i], &metadata);
if (err == WEBP_MUX_OK && metadata.size > 0) {
err = WebPMuxSetChunk(new_mux, kFourccList[i], &metadata, 1);
if (err != WEBP_MUX_OK) {
ERROR_GOTO1("Error transferring metadata in DuplicateMux().", End);
}
}
}
End:
if (!ok) {
WebPMuxDelete(new_mux);
new_mux = NULL;
}
return new_mux;
}
static int ParseFrameArgs(const char* args, WebPMuxFrameInfo* const info) {
int dispose_method, dummy;
char plus_minus, blend_method;
@ -506,6 +467,13 @@ static int ParseFrameArgs(const char* args, WebPMuxFrameInfo* const info) {
return 1;
}
static int ParseFragmentArgs(const char* args, WebPMuxFrameInfo* const info) {
const int ok =
(sscanf(args, "+%d+%d", &info->x_offset, &info->y_offset) == 2);
if (ok) WarnAboutOddOffset(info);
return ok;
}
static int ParseBgcolorArgs(const char* args, uint32_t* const bgcolor) {
uint32_t a, r, g, b;
if (sscanf(args, "%u,%u,%u,%u", &a, &r, &g, &b) != 4) return 0;
@ -534,9 +502,9 @@ static void DeleteConfig(WebPMuxConfig* config) {
static int ValidateCommandLine(int argc, const char* argv[],
int* num_feature_args) {
int num_frame_args;
int num_frgm_args;
int num_loop_args;
int num_bgcolor_args;
int num_durations_args;
int ok = 1;
assert(num_feature_args != NULL);
@ -561,9 +529,9 @@ static int ValidateCommandLine(int argc, const char* argv[],
// Compound checks.
num_frame_args = CountOccurrences(argv, argc, "-frame");
num_frgm_args = CountOccurrences(argv, argc, "-frgm");
num_loop_args = CountOccurrences(argv, argc, "-loop");
num_bgcolor_args = CountOccurrences(argv, argc, "-bgcolor");
num_durations_args = CountOccurrences(argv, argc, "-duration");
if (num_loop_args > 1) {
ERROR_GOTO1("ERROR: Multiple loop counts specified.\n", ErrValidate);
@ -576,20 +544,22 @@ static int ValidateCommandLine(int argc, const char* argv[],
ERROR_GOTO1("ERROR: Loop count and background color are relevant only in "
"case of animation.\n", ErrValidate);
}
if (num_durations_args > 0 && num_frame_args != 0) {
ERROR_GOTO1("ERROR: Can not combine -duration and -frame commands.\n",
ErrValidate);
if (num_frame_args > 0 && num_frgm_args > 0) {
ERROR_GOTO1("ERROR: Only one of frames & fragments can be specified at a "
"time.\n", ErrValidate);
}
assert(ok == 1);
if (num_durations_args > 0) {
*num_feature_args = num_durations_args;
} else if (num_frame_args == 0) {
if (num_frame_args == 0 && num_frgm_args == 0) {
// Single argument ('set' action for ICCP/EXIF/XMP, OR a 'get' action).
*num_feature_args = 1;
} else {
// Multiple arguments ('set' action for animation)
*num_feature_args = num_frame_args + num_loop_args + num_bgcolor_args;
// Multiple arguments ('set' action for animation or fragmented image).
if (num_frame_args > 0) {
*num_feature_args = num_frame_args + num_loop_args + num_bgcolor_args;
} else {
*num_feature_args = num_frgm_args;
}
}
ErrValidate:
@ -631,21 +601,6 @@ static int ParseCommandLine(int argc, const char* argv[],
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
++i;
} else if (!strcmp(argv[i], "-duration")) {
CHECK_NUM_ARGS_LESS(2, ErrParse);
if (ACTION_IS_NIL || config->action_type_ == ACTION_DURATION) {
config->action_type_ = ACTION_DURATION;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
if (FEATURETYPE_IS_NIL || feature->type_ == FEATURE_DURATION) {
feature->type_ = FEATURE_DURATION;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
} else if (!strcmp(argv[i], "-get")) {
if (ACTION_IS_NIL) {
config->action_type_ = ACTION_GET;
@ -695,6 +650,24 @@ static int ParseCommandLine(int argc, const char* argv[],
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
#ifdef WEBP_EXPERIMENTAL_FEATURES
} else if (!strcmp(argv[i], "-frgm")) {
CHECK_NUM_ARGS_LESS(3, ErrParse);
if (ACTION_IS_NIL || config->action_type_ == ACTION_SET) {
config->action_type_ = ACTION_SET;
} else {
ERROR_GOTO1("ERROR: Multiple actions specified.\n", ErrParse);
}
if (FEATURETYPE_IS_NIL || feature->type_ == FEATURE_FRGM) {
feature->type_ = FEATURE_FRGM;
} else {
ERROR_GOTO1("ERROR: Multiple features specified.\n", ErrParse);
}
arg->filename_ = argv[i + 1];
arg->params_ = argv[i + 2];
++feature_arg_index;
i += 3;
#endif
} else if (!strcmp(argv[i], "-o")) {
CHECK_NUM_ARGS_LESS(2, ErrParse);
config->output_ = argv[i + 1];
@ -754,10 +727,16 @@ static int ParseCommandLine(int argc, const char* argv[],
} else {
++i;
}
#ifdef WEBP_EXPERIMENTAL_FEATURES
} else if ((!strcmp(argv[i], "frame") ||
!strcmp(argv[i], "frgm")) &&
#else
} else if (!strcmp(argv[i], "frame") &&
(config->action_type_ == ACTION_GET)) {
#endif
(config->action_type_ == ACTION_GET)) {
CHECK_NUM_ARGS_LESS(2, ErrParse);
feature->type_ = FEATURE_ANMF;
feature->type_ = (!strcmp(argv[i], "frame")) ? FEATURE_ANMF :
FEATURE_FRGM;
arg->params_ = argv[i + 1];
++feature_arg_index;
i += 2;
@ -795,7 +774,8 @@ static int ValidateConfig(WebPMuxConfig* config) {
if (config->input_ == NULL) {
if (config->action_type_ != ACTION_SET) {
ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2);
} else if (feature->type_ != FEATURE_ANMF) {
} else if (feature->type_ != FEATURE_ANMF &&
feature->type_ != FEATURE_FRGM) {
ERROR_GOTO1("ERROR: No input file specified.\n", ErrValidate2);
}
}
@ -847,13 +827,14 @@ static int InitializeConfig(int argc, const char* argv[],
//------------------------------------------------------------------------------
// Processing.
static int GetFrame(const WebPMux* mux, const WebPMuxConfig* config) {
static int GetFrameFragment(const WebPMux* mux,
const WebPMuxConfig* config, int is_frame) {
WebPMuxError err = WEBP_MUX_OK;
WebPMux* mux_single = NULL;
int num = 0;
long num = 0;
int ok = 1;
int parse_error = 0;
const WebPChunkId id = WEBP_CHUNK_ANMF;
const WebPChunkId id = is_frame ? WEBP_CHUNK_ANMF : WEBP_CHUNK_FRGM;
WebPMuxFrameInfo info;
WebPDataInit(&info.bitstream);
@ -866,7 +847,7 @@ static int GetFrame(const WebPMux* mux, const WebPMuxConfig* config) {
err = WebPMuxGetFrame(mux, num, &info);
if (err == WEBP_MUX_OK && info.id != id) err = WEBP_MUX_NOT_FOUND;
if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not get frame %d.\n",
ERROR_GOTO3("ERROR (%s): Could not get frame %ld.\n",
ErrorString(err), num, ErrGet);
}
@ -904,7 +885,9 @@ static int Process(const WebPMuxConfig* config) {
if (!ok) goto Err2;
switch (feature->type_) {
case FEATURE_ANMF:
ok = GetFrame(mux, config);
case FEATURE_FRGM:
ok = GetFrameFragment(mux, config,
(feature->type_ == FEATURE_ANMF) ? 1 : 0);
break;
case FEATURE_ICCP:
@ -996,6 +979,35 @@ static int Process(const WebPMuxConfig* config) {
break;
}
case FEATURE_FRGM: {
int i;
mux = WebPMuxNew();
if (mux == NULL) {
ERROR_GOTO2("ERROR (%s): Could not allocate a mux object.\n",
ErrorString(WEBP_MUX_MEMORY_ERROR), Err2);
}
for (i = 0; i < feature->arg_count_; ++i) {
WebPMuxFrameInfo frgm;
frgm.id = WEBP_CHUNK_FRGM;
ok = ReadFileToWebPData(feature->args_[i].filename_,
&frgm.bitstream);
if (!ok) goto Err2;
ok = ParseFragmentArgs(feature->args_[i].params_, &frgm);
if (!ok) {
WebPDataClear(&frgm.bitstream);
ERROR_GOTO1("ERROR: Could not parse fragment properties.\n",
Err2);
}
err = WebPMuxPushFrame(mux, &frgm, 1);
WebPDataClear(&frgm.bitstream);
if (err != WEBP_MUX_OK) {
ERROR_GOTO3("ERROR (%s): Could not add a fragment at index %d.\n",
ErrorString(err), i, Err2);
}
}
break;
}
case FEATURE_ICCP:
case FEATURE_EXIF:
case FEATURE_XMP: {
@ -1019,89 +1031,6 @@ static int Process(const WebPMuxConfig* config) {
ok = WriteWebP(mux, config->output_);
break;
}
case ACTION_DURATION: {
int num_frames;
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;
err = WebPMuxNumChunks(mux, WEBP_CHUNK_ANMF, &num_frames);
ok = (err == WEBP_MUX_OK);
if (!ok) {
ERROR_GOTO1("ERROR: can not parse the number of frames.\n", Err2);
}
if (num_frames == 0) {
fprintf(stderr, "Doesn't look like the source is animated. "
"Skipping duration setting.\n");
ok = WriteWebP(mux, config->output_);
if (!ok) goto Err2;
} else {
int i;
int* durations = NULL;
WebPMux* new_mux = DuplicateMuxHeader(mux);
if (new_mux == NULL) goto Err2;
durations = (int*)malloc((size_t)num_frames * sizeof(*durations));
if (durations == NULL) goto Err2;
for (i = 0; i < num_frames; ++i) durations[i] = -1;
// Parse intervals to process.
for (i = 0; i < feature->arg_count_; ++i) {
int k;
int args[3];
int duration, start, end;
const int nb_args = ExUtilGetInts(feature->args_[i].params_,
10, 3, args);
ok = (nb_args >= 1);
if (!ok) goto Err3;
duration = args[0];
if (duration < 0) {
ERROR_GOTO1("ERROR: duration must be strictly positive.\n", Err3);
}
if (nb_args == 1) { // only duration is present -> use full interval
start = 1;
end = num_frames;
} else {
start = args[1];
if (start <= 0) {
start = 1;
} else if (start > num_frames) {
start = num_frames;
}
end = (nb_args >= 3) ? args[2] : start;
if (end == 0 || end > num_frames) end = num_frames;
}
for (k = start; k <= end; ++k) {
assert(k >= 1 && k <= num_frames);
durations[k - 1] = duration;
}
}
// Apply non-negative durations to their destination frames.
for (i = 1; i <= num_frames; ++i) {
WebPMuxFrameInfo frame;
err = WebPMuxGetFrame(mux, i, &frame);
if (err != WEBP_MUX_OK || frame.id != WEBP_CHUNK_ANMF) {
ERROR_GOTO2("ERROR: can not retrieve frame #%d.\n", i, Err3);
}
if (durations[i - 1] >= 0) frame.duration = durations[i - 1];
err = WebPMuxPushFrame(new_mux, &frame, 1);
if (err != WEBP_MUX_OK) {
ERROR_GOTO2("ERROR: error push frame data #%d\n", i, Err3);
}
WebPDataClear(&frame.bitstream);
}
WebPMuxDelete(mux);
ok = WriteWebP(new_mux, config->output_);
mux = new_mux; // transfer for the WebPMuxDelete() call
new_mux = NULL;
Err3:
free(durations);
WebPMuxDelete(new_mux);
if (!ok) goto Err2;
}
break;
}
case ACTION_STRIP: {
ok = CreateMux(config->input_, &mux);
if (!ok) goto Err2;

View File

@ -17,7 +17,6 @@
#include <assert.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_WINCODEC_H
#ifdef __MINGW32__
@ -27,13 +26,11 @@
#define COBJMACROS
#define _WIN32_IE 0x500 // Workaround bug in shlwapi.h when compiling C++
// code with COBJMACROS.
#include <ole2.h> // CreateStreamOnHGlobal()
#include <shlwapi.h>
#include <windows.h>
#include <wincodec.h>
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
#define IFS(fn) \
@ -85,32 +82,7 @@ WEBP_DEFINE_GUID(GUID_WICPixelFormat64bppRGBA_,
static HRESULT OpenInputStream(const char* filename, IStream** stream) {
HRESULT hr = S_OK;
if (!strcmp(filename, "-")) {
const uint8_t* data = NULL;
size_t data_size = 0;
const int ok = ImgIoUtilReadFile(filename, &data, &data_size);
if (ok) {
HGLOBAL image = GlobalAlloc(GMEM_MOVEABLE, data_size);
if (image != NULL) {
void* const image_mem = GlobalLock(image);
if (image_mem != NULL) {
memcpy(image_mem, data, data_size);
GlobalUnlock(image);
IFS(CreateStreamOnHGlobal(image, TRUE, stream));
} else {
hr = E_FAIL;
}
} else {
hr = E_OUTOFMEMORY;
}
free((void*)data);
} else {
hr = E_FAIL;
}
} else {
IFS(SHCreateStreamOnFileA(filename, STGM_READ, stream));
}
IFS(SHCreateStreamOnFileA(filename, STGM_READ, stream));
if (FAILED(hr)) {
fprintf(stderr, "Error opening input file %s (%08lx)\n", filename, hr);
}
@ -274,9 +246,7 @@ int ReadPictureWithWIC(const char* const filename,
NULL
};
int has_alpha = 0;
int64_t stride;
if (filename == NULL || pic == NULL) return 0;
int stride;
IFS(CoInitialize(NULL));
IFS(CoCreateInstance(MAKE_REFGUID(CLSID_WICImagingFactory), NULL,
@ -336,27 +306,22 @@ int ReadPictureWithWIC(const char* const filename,
// Decode.
IFS(IWICFormatConverter_GetSize(converter, &width, &height));
stride = (int64_t)importer->bytes_per_pixel * width * sizeof(*rgb);
if (stride != (int)stride ||
!ImgIoUtilCheckSizeArgumentsOverflow(stride, height)) {
hr = E_FAIL;
}
stride = importer->bytes_per_pixel * width * sizeof(*rgb);
if (SUCCEEDED(hr)) {
rgb = (BYTE*)malloc((size_t)stride * height);
rgb = (BYTE*)malloc(stride * height);
if (rgb == NULL)
hr = E_OUTOFMEMORY;
}
IFS(IWICFormatConverter_CopyPixels(converter, NULL,
(UINT)stride, (UINT)stride * height, rgb));
stride, stride * height, rgb));
// WebP conversion.
if (SUCCEEDED(hr)) {
int ok;
pic->width = width;
pic->height = height;
pic->use_argb = 1; // For WIC, we always force to argb
ok = importer->import(pic, rgb, (int)stride);
pic->use_argb = 1;
ok = importer->import(pic, rgb, stride);
if (!ok) hr = E_FAIL;
}
if (SUCCEEDED(hr)) {

View File

@ -9,8 +9,8 @@
//
// Windows Imaging Component (WIC) decode.
#ifndef WEBP_IMAGEIO_WICDEC_H_
#define WEBP_IMAGEIO_WICDEC_H_
#ifndef WEBP_EXAMPLES_WICDEC_H_
#define WEBP_EXAMPLES_WICDEC_H_
#ifdef __cplusplus
extern "C" {
@ -21,7 +21,7 @@ struct WebPPicture;
// Reads an image from 'filename', returning the decoded output in 'pic'.
// If 'keep_alpha' is true and the image has an alpha channel, the output is
// RGBA otherwise it will be RGB. pic->use_argb is always forced to true.
// RGBA otherwise it will be RGB.
// Returns true on success.
int ReadPictureWithWIC(const char* const filename,
struct WebPPicture* const pic, int keep_alpha,
@ -31,4 +31,4 @@ int ReadPictureWithWIC(const char* const filename,
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_WICDEC_H_
#endif // WEBP_EXAMPLES_WICDEC_H_

View File

@ -1,26 +0,0 @@
AM_CPPFLAGS += -I$(top_builddir)/src -I$(top_srcdir)/src
noinst_LTLIBRARIES = libwebpextras.la
noinst_HEADERS =
noinst_HEADERS += ../src/webp/types.h
libwebpextras_la_SOURCES =
libwebpextras_la_SOURCES += extras.c extras.h quality_estimate.c
libwebpextras_la_CPPFLAGS = $(AM_CPPFLAGS)
libwebpextras_la_LDFLAGS = -lm
libwebpextras_la_LIBADD = ../src/libwebp.la
noinst_PROGRAMS = get_disto webp_quality
get_disto_SOURCES = get_disto.c
get_disto_CPPFLAGS = $(AM_CPPFLAGS)
get_disto_LDADD = ../imageio/libimageio_util.la ../imageio/libimagedec.la
get_disto_LDADD += ../src/libwebp.la
get_disto_LDADD += $(PNG_LIBS) $(JPEG_LIBS) $(TIFF_LIBS)
webp_quality_SOURCES = webp_quality.c
webp_quality_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
webp_quality_LDADD = ../imageio/libimageio_util.la
webp_quality_LDADD += ./libwebpextras.la
webp_quality_LDADD += ../src/libwebp.la

View File

@ -1,142 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Additional WebP utilities.
//
#include "./extras.h"
#include "webp/format_constants.h"
#include <assert.h>
#include <string.h>
#define XTRA_MAJ_VERSION 0
#define XTRA_MIN_VERSION 1
#define XTRA_REV_VERSION 0
//------------------------------------------------------------------------------
int WebPGetExtrasVersion(void) {
return (XTRA_MAJ_VERSION << 16) | (XTRA_MIN_VERSION << 8) | XTRA_REV_VERSION;
}
//------------------------------------------------------------------------------
int WebPImportGray(const uint8_t* gray_data, WebPPicture* pic) {
int y, width, uv_width;
if (pic == NULL || gray_data == NULL) return 0;
pic->colorspace = WEBP_YUV420;
if (!WebPPictureAlloc(pic)) return 0;
width = pic->width;
uv_width = (width + 1) >> 1;
for (y = 0; y < pic->height; ++y) {
memcpy(pic->y + y * pic->y_stride, gray_data, width);
gray_data += width; // <- we could use some 'data_stride' here if needed
if ((y & 1) == 0) {
memset(pic->u + (y >> 1) * pic->uv_stride, 128, uv_width);
memset(pic->v + (y >> 1) * pic->uv_stride, 128, uv_width);
}
}
return 1;
}
int WebPImportRGB565(const uint8_t* rgb565, WebPPicture* pic) {
int x, y;
if (pic == NULL || rgb565 == NULL) return 0;
pic->colorspace = WEBP_YUV420;
pic->use_argb = 1;
if (!WebPPictureAlloc(pic)) return 0;
for (y = 0; y < pic->height; ++y) {
const int width = pic->width;
uint32_t* dst = pic->argb + y * pic->argb_stride;
for (x = 0; x < width; ++x) {
#ifdef WEBP_SWAP_16BIT_CSP
const uint32_t rg = rgb565[2 * x + 1];
const uint32_t gb = rgb565[2 * x + 0];
#else
const uint32_t rg = rgb565[2 * x + 0];
const uint32_t gb = rgb565[2 * x + 1];
#endif
uint32_t r = rg & 0xf8;
uint32_t g = ((rg << 5) | (gb >> 3)) & 0xfc;
uint32_t b = (gb << 5);
// dithering
r = r | (r >> 5);
g = g | (g >> 6);
b = b | (b >> 5);
dst[x] = (r << 16) | (g << 8) | b;
}
rgb565 += 2 * width;
}
return 1;
}
int WebPImportRGB4444(const uint8_t* rgb4444, WebPPicture* pic) {
int x, y;
if (pic == NULL || rgb4444 == NULL) return 0;
pic->colorspace = WEBP_YUV420;
pic->use_argb = 1;
if (!WebPPictureAlloc(pic)) return 0;
for (y = 0; y < pic->height; ++y) {
const int width = pic->width;
uint32_t* dst = pic->argb + y * pic->argb_stride;
for (x = 0; x < width; ++x) {
#ifdef WEBP_SWAP_16BIT_CSP
const uint32_t rg = rgb4444[2 * x + 1];
const uint32_t ba = rgb4444[2 * x + 0];
#else
const uint32_t rg = rgb4444[2 * x + 0];
const uint32_t ba = rgb4444[2 * x + 1];
#endif
uint32_t r = rg & 0xf0;
uint32_t g = (rg << 4);
uint32_t b = (ba & 0xf0);
uint32_t a = (ba << 4);
// dithering
r = r | (r >> 4);
g = g | (g >> 4);
b = b | (b >> 4);
a = a | (a >> 4);
dst[x] = (a << 24) | (r << 16) | (g << 8) | b;
}
rgb4444 += 2 * width;
}
return 1;
}
int WebPImportColorMappedARGB(const uint8_t* indexed, int indexed_stride,
const uint32_t palette[], int palette_size,
WebPPicture* pic) {
int x, y;
uint32_t* dst;
// 256 as the input buffer is uint8_t.
assert(MAX_PALETTE_SIZE <= 256);
if (pic == NULL || indexed == NULL || indexed_stride < pic->width ||
palette == NULL || palette_size > MAX_PALETTE_SIZE || palette_size <= 0) {
return 0;
}
pic->use_argb = 1;
if (!WebPPictureAlloc(pic)) return 0;
dst = pic->argb;
for (y = 0; y < pic->height; ++y) {
for (x = 0; x < pic->width; ++x) {
// Make sure we are within the palette.
if (indexed[x] >= palette_size) {
WebPPictureFree(pic);
return 0;
}
dst[x] = palette[indexed[x]];
}
indexed += indexed_stride;
dst += pic->argb_stride;
}
return 1;
}
//------------------------------------------------------------------------------

View File

@ -1,70 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
#ifndef WEBP_EXTRAS_EXTRAS_H_
#define WEBP_EXTRAS_EXTRAS_H_
#include "webp/types.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "webp/encode.h"
#define WEBP_EXTRAS_ABI_VERSION 0x0001 // MAJOR(8b) + MINOR(8b)
//------------------------------------------------------------------------------
// Returns the version number of the extras library, packed in hexadecimal using
// 8bits for each of major/minor/revision. E.g: v2.5.7 is 0x020507.
WEBP_EXTERN(int) WebPGetExtrasVersion(void);
//------------------------------------------------------------------------------
// Ad-hoc colorspace importers.
// Import luma sample (gray scale image) into 'picture'. The 'picture'
// width and height must be set prior to calling this function.
WEBP_EXTERN(int) WebPImportGray(const uint8_t* gray, WebPPicture* picture);
// Import rgb sample in RGB565 packed format into 'picture'. The 'picture'
// width and height must be set prior to calling this function.
WEBP_EXTERN(int) WebPImportRGB565(const uint8_t* rgb565, WebPPicture* pic);
// Import rgb sample in RGB4444 packed format into 'picture'. The 'picture'
// width and height must be set prior to calling this function.
WEBP_EXTERN(int) WebPImportRGB4444(const uint8_t* rgb4444, WebPPicture* pic);
// Import a color mapped image. The number of colors is less or equal to
// MAX_PALETTE_SIZE. 'pic' must have been initialized. Its content, if any,
// will be discarded. Returns 'false' in case of error, or if indexed[] contains
// invalid indices.
WEBP_EXTERN(int)
WebPImportColorMappedARGB(const uint8_t* indexed, int indexed_stride,
const uint32_t palette[], int palette_size,
WebPPicture* pic);
//------------------------------------------------------------------------------
// Parse a bitstream, search for VP8 (lossy) header and report a
// rough estimation of the quality factor used for compressing the bitstream.
// If the bitstream is in lossless format, the special value '101' is returned.
// Otherwise (lossy bitstream), the returned value is in the range [0..100].
// Any error (invalid bitstream, animated WebP, incomplete header, etc.)
// will return a value of -1.
WEBP_EXTERN(int) VP8EstimateQuality(const uint8_t* const data, size_t size);
//------------------------------------------------------------------------------
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* WEBP_EXTRAS_EXTRAS_H_ */

View File

@ -1,343 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Simple tool to load two webp/png/jpg/tiff files and compute PSNR/SSIM.
// This is mostly a wrapper around WebPPictureDistortion().
//
/*
gcc -o get_disto get_disto.c -O3 -I../ -L../examples -L../imageio \
-lexample_util -limageio_util -limagedec -lwebp -L/opt/local/lib \
-lpng -lz -ljpeg -ltiff -lm -lpthread
*/
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "webp/encode.h"
#include "../imageio/image_dec.h"
#include "../imageio/imageio_util.h"
static size_t ReadPicture(const char* const filename, WebPPicture* const pic,
int keep_alpha) {
const uint8_t* data = NULL;
size_t data_size = 0;
WebPImageReader reader = NULL;
int ok = ImgIoUtilReadFile(filename, &data, &data_size);
if (!ok) goto End;
pic->use_argb = 1; // force ARGB
#ifdef HAVE_WINCODEC_H
// Try to decode the file using WIC falling back to the other readers for
// e.g., WebP.
ok = ReadPictureWithWIC(filename, pic, keep_alpha, NULL);
if (ok) goto End;
#endif
reader = WebPGuessImageReader(data, data_size);
ok = reader(data, data_size, pic, keep_alpha, NULL);
End:
if (!ok) {
fprintf(stderr, "Error! Could not process file %s\n", filename);
}
free((void*)data);
return ok ? data_size : 0;
}
static void RescalePlane(uint8_t* plane, int width, int height,
int x_stride, int y_stride, int max) {
const uint32_t factor = (max > 0) ? (255u << 16) / max : 0;
int x, y;
for (y = 0; y < height; ++y) {
uint8_t* const ptr = plane + y * y_stride;
for (x = 0; x < width * x_stride; x += x_stride) {
const uint32_t diff = (ptr[x] * factor + (1 << 15)) >> 16;
ptr[x] = diff;
}
}
}
// Return the max absolute difference.
static int DiffScaleChannel(uint8_t* src1, int stride1,
const uint8_t* src2, int stride2,
int x_stride, int w, int h, int do_scaling) {
int x, y;
int max = 0;
for (y = 0; y < h; ++y) {
uint8_t* const ptr1 = src1 + y * stride1;
const uint8_t* const ptr2 = src2 + y * stride2;
for (x = 0; x < w * x_stride; x += x_stride) {
const int diff = abs(ptr1[x] - ptr2[x]);
if (diff > max) max = diff;
ptr1[x] = diff;
}
}
if (do_scaling) RescalePlane(src1, w, h, x_stride, stride1, max);
return max;
}
//------------------------------------------------------------------------------
// SSIM calculation. We re-implement these functions here, out of dsp/, to avoid
// breaking the library's hidden visibility. This code duplication avoids the
// bigger annoyance of having to open up internal details of libdsp...
#define SSIM_KERNEL 3 // total size of the kernel: 2 * SSIM_KERNEL + 1
// struct for accumulating statistical moments
typedef struct {
uint32_t w; // sum(w_i) : sum of weights
uint32_t xm, ym; // sum(w_i * x_i), sum(w_i * y_i)
uint32_t xxm, xym, yym; // sum(w_i * x_i * x_i), etc.
} DistoStats;
// hat-shaped filter. Sum of coefficients is equal to 16.
static const uint32_t kWeight[2 * SSIM_KERNEL + 1] = { 1, 2, 3, 4, 3, 2, 1 };
static WEBP_INLINE double SSIMCalculation(const DistoStats* const stats) {
const uint32_t N = stats->w;
const uint32_t w2 = N * N;
const uint32_t C1 = 20 * w2;
const uint32_t C2 = 60 * w2;
const uint32_t C3 = 8 * 8 * w2; // 'dark' limit ~= 6
const uint64_t xmxm = (uint64_t)stats->xm * stats->xm;
const uint64_t ymym = (uint64_t)stats->ym * stats->ym;
if (xmxm + ymym >= C3) {
const int64_t xmym = (int64_t)stats->xm * stats->ym;
const int64_t sxy = (int64_t)stats->xym * N - xmym; // can be negative
const uint64_t sxx = (uint64_t)stats->xxm * N - xmxm;
const uint64_t syy = (uint64_t)stats->yym * N - ymym;
// we descale by 8 to prevent overflow during the fnum/fden multiply.
const uint64_t num_S = (2 * (uint64_t)(sxy < 0 ? 0 : sxy) + C2) >> 8;
const uint64_t den_S = (sxx + syy + C2) >> 8;
const uint64_t fnum = (2 * xmym + C1) * num_S;
const uint64_t fden = (xmxm + ymym + C1) * den_S;
const double r = (double)fnum / fden;
assert(r >= 0. && r <= 1.0);
return r;
}
return 1.; // area is too dark to contribute meaningfully
}
static double SSIMGetClipped(const uint8_t* src1, int stride1,
const uint8_t* src2, int stride2,
int xo, int yo, int W, int H) {
DistoStats stats = { 0, 0, 0, 0, 0, 0 };
const int ymin = (yo - SSIM_KERNEL < 0) ? 0 : yo - SSIM_KERNEL;
const int ymax = (yo + SSIM_KERNEL > H - 1) ? H - 1 : yo + SSIM_KERNEL;
const int xmin = (xo - SSIM_KERNEL < 0) ? 0 : xo - SSIM_KERNEL;
const int xmax = (xo + SSIM_KERNEL > W - 1) ? W - 1 : xo + SSIM_KERNEL;
int x, y;
src1 += ymin * stride1;
src2 += ymin * stride2;
for (y = ymin; y <= ymax; ++y, src1 += stride1, src2 += stride2) {
for (x = xmin; x <= xmax; ++x) {
const uint32_t w = kWeight[SSIM_KERNEL + x - xo]
* kWeight[SSIM_KERNEL + y - yo];
const uint32_t s1 = src1[x];
const uint32_t s2 = src2[x];
stats.w += w;
stats.xm += w * s1;
stats.ym += w * s2;
stats.xxm += w * s1 * s1;
stats.xym += w * s1 * s2;
stats.yym += w * s2 * s2;
}
}
return SSIMCalculation(&stats);
}
// Compute SSIM-score map. Return -1 in case of error, max diff otherwise.
static int SSIMScaleChannel(uint8_t* src1, int stride1,
const uint8_t* src2, int stride2,
int x_stride, int w, int h, int do_scaling) {
int x, y;
int max = 0;
uint8_t* const plane1 = (uint8_t*)malloc(2 * w * h * sizeof(*plane1));
uint8_t* const plane2 = plane1 + w * h;
if (plane1 == NULL) return -1;
// extract plane
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
plane1[x + y * w] = src1[x * x_stride + y * stride1];
plane2[x + y * w] = src2[x * x_stride + y * stride2];
}
}
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
const double ssim = SSIMGetClipped(plane1, w, plane2, w, x, y, w, h);
int diff = (int)(255 * (1. - ssim));
if (diff < 0) {
diff = 0;
} else if (diff > max) {
max = diff;
}
src1[x * x_stride + y * stride1] = (diff > 255) ? 255u : (uint8_t)diff;
}
}
free(plane1);
if (do_scaling) RescalePlane(src1, w, h, x_stride, stride1, max);
return max;
}
// Convert an argb picture to luminance.
static void ConvertToGray(WebPPicture* const pic) {
int x, y;
assert(pic != NULL);
assert(pic->use_argb);
for (y = 0; y < pic->height; ++y) {
uint32_t* const row = &pic->argb[y * pic->argb_stride];
for (x = 0; x < pic->width; ++x) {
const uint32_t argb = row[x];
const uint32_t r = (argb >> 16) & 0xff;
const uint32_t g = (argb >> 8) & 0xff;
const uint32_t b = (argb >> 0) & 0xff;
// We use BT.709 for converting to luminance.
const uint32_t Y = (uint32_t)(0.2126 * r + 0.7152 * g + 0.0722 * b + .5);
row[x] = (argb & 0xff000000u) | (Y * 0x010101u);
}
}
}
static void Help(void) {
fprintf(stderr,
"Usage: get_disto [-ssim][-psnr][-alpha] compressed.webp orig.webp\n"
" -ssim ..... print SSIM distortion\n"
" -psnr ..... print PSNR distortion (default)\n"
" -alpha .... preserve alpha plane\n"
" -h ........ this message\n"
" -o <file> . save the diff map as a WebP lossless file\n"
" -scale .... scale the difference map to fit [0..255] range\n"
" -gray ..... use grayscale for difference map (-scale)\n"
" Also handles PNG, JPG and TIFF files, in addition to WebP.\n");
}
int main(int argc, const char *argv[]) {
WebPPicture pic1, pic2;
size_t size1 = 0, size2 = 0;
int ret = 1;
float disto[5];
int type = 0;
int c;
int help = 0;
int keep_alpha = 0;
int scale = 0;
int use_gray = 0;
const char* name1 = NULL;
const char* name2 = NULL;
const char* output = NULL;
if (!WebPPictureInit(&pic1) || !WebPPictureInit(&pic2)) {
fprintf(stderr, "Can't init pictures\n");
return 1;
}
for (c = 1; c < argc; ++c) {
if (!strcmp(argv[c], "-ssim")) {
type = 1;
} else if (!strcmp(argv[c], "-psnr")) {
type = 0;
} else if (!strcmp(argv[c], "-alpha")) {
keep_alpha = 1;
} else if (!strcmp(argv[c], "-scale")) {
scale = 1;
} else if (!strcmp(argv[c], "-gray")) {
use_gray = 1;
} else if (!strcmp(argv[c], "-h")) {
help = 1;
ret = 0;
} else if (!strcmp(argv[c], "-o")) {
if (++c == argc) {
fprintf(stderr, "missing file name after %s option.\n", argv[c - 1]);
goto End;
}
output = argv[c];
} else if (name1 == NULL) {
name1 = argv[c];
} else {
name2 = argv[c];
}
}
if (help || name1 == NULL || name2 == NULL) {
if (!help) {
fprintf(stderr, "Error: missing arguments.\n");
}
Help();
goto End;
}
size1 = ReadPicture(name1, &pic1, 1);
size2 = ReadPicture(name1, &pic2, 1);
if (size1 == 0 || size2 == 0) goto End;
if (!keep_alpha) {
WebPBlendAlpha(&pic1, 0x00000000);
WebPBlendAlpha(&pic2, 0x00000000);
}
if (!WebPPictureDistortion(&pic1, &pic2, type, disto)) {
fprintf(stderr, "Error while computing the distortion.\n");
goto End;
}
printf("%u %.2f %.2f %.2f %.2f %.2f\n",
(unsigned int)size1,
disto[4], disto[0], disto[1], disto[2], disto[3]);
if (output != NULL) {
uint8_t* data = NULL;
size_t data_size = 0;
if (pic1.use_argb != pic2.use_argb) {
fprintf(stderr, "Pictures are not in the same argb format. "
"Can't save the difference map.\n");
goto End;
}
if (pic1.use_argb) {
int n;
fprintf(stderr, "max differences per channel: ");
for (n = 0; n < 3; ++n) { // skip the alpha channel
const int range = (type == 1) ?
SSIMScaleChannel((uint8_t*)pic1.argb + n, pic1.argb_stride * 4,
(const uint8_t*)pic2.argb + n, pic2.argb_stride * 4,
4, pic1.width, pic1.height, scale) :
DiffScaleChannel((uint8_t*)pic1.argb + n, pic1.argb_stride * 4,
(const uint8_t*)pic2.argb + n, pic2.argb_stride * 4,
4, pic1.width, pic1.height, scale);
if (range < 0) fprintf(stderr, "\nError computing diff map\n");
fprintf(stderr, "[%d]", range);
}
fprintf(stderr, "\n");
if (use_gray) ConvertToGray(&pic1);
} else {
fprintf(stderr, "Can only compute the difference map in ARGB format.\n");
goto End;
}
data_size = WebPEncodeLosslessBGRA((const uint8_t*)pic1.argb,
pic1.width, pic1.height,
pic1.argb_stride * 4,
&data);
if (data_size == 0) {
fprintf(stderr, "Error during lossless encoding.\n");
goto End;
}
ret = ImgIoUtilWriteFile(output, data, data_size) ? 0 : 1;
WebPFree(data);
if (ret) goto End;
}
ret = 0;
End:
WebPPictureFree(&pic1);
WebPPictureFree(&pic2);
return ret;
}

View File

@ -1,129 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// VP8EstimateQuality(): rough encoding quality estimate
//
// Author: Skal (pascal.massimino@gmail.com)
#include "./extras.h"
#include "webp/decode.h"
#include <math.h>
//------------------------------------------------------------------------------
#define INVALID_BIT_POS (1ull << 63)
// In most cases, we don't need to use a full arithmetic decoder, since
// all the header's bits are written using a uniform probability of 128.
// We can just parse the header as if it was bits (works in 99.999% cases).
static WEBP_INLINE uint32_t GetBit(const uint8_t* const data, size_t nb,
uint64_t max_size, uint64_t* const bit_pos) {
uint32_t val = 0;
if (*bit_pos + nb <= 8 * max_size) {
while (nb-- > 0) {
const uint64_t p = (*bit_pos)++;
const int bit = !!(data[p >> 3] & (128 >> ((p & 7))));
val = (val << 1) | bit;
}
} else {
*bit_pos = INVALID_BIT_POS;
}
return val;
}
#define GET_BIT(n) GetBit(data, (n), size, &bit_pos)
#define CONDITIONAL_SKIP(n) (GET_BIT(1) ? GET_BIT((n)) : 0)
int VP8EstimateQuality(const uint8_t* const data, size_t size) {
size_t pos = 0;
uint64_t bit_pos;
uint64_t sig = 0x00;
int ok = 0;
int Q = -1;
WebPBitstreamFeatures features;
if (data == NULL) return -1;
if (WebPGetFeatures(data, size, &features) != VP8_STATUS_OK) {
return -1; // invalid file
}
if (features.format == 2) return 101; // lossless
if (features.format == 0 || features.has_animation) return -1; // mixed
while (pos < size) {
sig = (sig >> 8) | ((uint64_t)data[pos++] << 40);
if ((sig >> 24) == 0x2a019dull) {
ok = 1;
break;
}
}
if (!ok) return -1;
if (pos + 4 > size) return -1;
// Skip main Header
// width = (data[pos + 0] | (data[pos + 1] << 8)) & 0x3fff;
// height = (data[pos + 2] | (data[pos + 3] << 8)) & 0x3fff;
pos += 4;
bit_pos = pos * 8;
GET_BIT(2); // color_space + clamp type
// Segment header
if (GET_BIT(1)) { // use_segment_
int s;
const int update_map = GET_BIT(1);
if (GET_BIT(1)) { // update data
const int absolute_delta = GET_BIT(1);
int q[4] = { 0, 0, 0, 0 };
for (s = 0; s < 4; ++s) {
if (GET_BIT(1)) {
q[s] = GET_BIT(7);
if (GET_BIT(1)) q[s] = -q[s]; // sign
}
}
if (absolute_delta) Q = q[0]; // just use the first segment's quantizer
for (s = 0; s < 4; ++s) CONDITIONAL_SKIP(7); // filter strength
}
if (update_map) {
for (s = 0; s < 3; ++s) CONDITIONAL_SKIP(8);
}
}
// Filter header
GET_BIT(1 + 6 + 3); // simple + level + sharpness
if (GET_BIT(1)) { // use_lf_delta
if (GET_BIT(1)) { // update lf_delta?
int n;
for (n = 0; n < 4 + 4; ++n) CONDITIONAL_SKIP(6);
}
}
// num partitions
GET_BIT(2);
// ParseQuant
{
const int base_q = GET_BIT(7);
/* dqy1_dc = */ CONDITIONAL_SKIP(5);
/* dqy2_dc = */ CONDITIONAL_SKIP(5);
/* dqy2_ac = */ CONDITIONAL_SKIP(5);
/* dquv_dc = */ CONDITIONAL_SKIP(5);
/* dquv_ac = */ CONDITIONAL_SKIP(5);
if (Q < 0) Q = base_q;
}
if (bit_pos == INVALID_BIT_POS) return -1;
// base mapping
Q = (127 - Q) * 100 / 127;
// correction for power-law behavior in low range
if (Q < 80) {
Q = (int)(pow(Q / 80., 1. / 0.38) * 80);
}
return Q;
}

View File

@ -1,50 +0,0 @@
// Simple tool to roughly evaluate the quality encoding of a webp bitstream
//
// Result is a *rough* estimation of the quality. You should just consider
// the bucket it's in (q > 80? > 50? > 20?) and not take it for face value.
/*
gcc -o webp_quality webp_quality.c -O3 -I../ -L. -L../imageio \
-limageio_util -lwebpextras -lwebp -lm -lpthread
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "./extras.h"
#include "../imageio/imageio_util.h"
int main(int argc, const char *argv[]) {
int c;
int quiet = 0;
int ok = 1;
for (c = 1; ok && c < argc; ++c) {
if (!strcmp(argv[c], "-quiet")) {
quiet = 1;
} else if (!strcmp(argv[c], "-help") || !strcmp(argv[c], "-h")) {
printf("webp_quality [-h][-quiet] webp_files...\n");
return 0;
} else {
const char* const filename = argv[c];
const uint8_t* data = NULL;
size_t data_size = 0;
int q;
ok = ImgIoUtilReadFile(filename, &data, &data_size);
if (!ok) break;
q = VP8EstimateQuality(data, data_size);
if (!quiet) printf("[%s] ", filename);
if (q < 0) {
fprintf(stderr, "Not a WebP file, or not a lossy WebP file.\n");
ok = 0;
} else {
if (!quiet) {
printf("Estimated quality factor: %d\n", q);
} else {
printf("%d\n", q); // just print the number
}
}
free((void*)data);
}
}
return ok ? 0 : 1;
}

View File

@ -1,14 +0,0 @@
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Versions for gradle
BUILD_TOOLS_VERSION=23.0.3
COMPILE_SDK_VERSION=23
ANDROID_GRADLE_PLUGIN_VERSION=1.5.0
GRADLE_DOWNLOAD_TASK_VERSION=2.1.0

Binary file not shown.

View File

@ -1,6 +0,0 @@
#Thu May 12 17:06:25 CEST 2016
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-2.13-bin.zip

164
gradlew vendored
View File

@ -1,164 +0,0 @@
#!/usr/bin/env bash
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn ( ) {
echo "$*"
}
die ( ) {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
function splitJvmOpts() {
JVM_OPTS=("$@")
}
eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"

90
gradlew.bat vendored
View File

@ -1,90 +0,0 @@
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
if "%@eval[2+2]" == "4" goto 4NT_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
goto execute
:4NT_args
@rem Get arguments from the 4NT Shell from JP Software
set CMD_LINE_ARGS=%$
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View File

@ -1,53 +0,0 @@
LOCAL_PATH := $(call my-dir)
################################################################################
# libimageio_util
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
imageio_util.c \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_MODULE := imageio_util
include $(BUILD_STATIC_LIBRARY)
################################################################################
# libimagedec
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
image_dec.c \
jpegdec.c \
metadata.c \
pngdec.c \
tiffdec.c \
webpdec.c \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_STATIC_LIBRARIES := imageio_util
LOCAL_MODULE := imagedec
include $(BUILD_STATIC_LIBRARY)
################################################################################
# libimageenc
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
image_enc.c \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/../src
LOCAL_STATIC_LIBRARIES := imageio_util
LOCAL_MODULE := imageenc
include $(BUILD_STATIC_LIBRARY)

View File

@ -1,22 +0,0 @@
AM_CPPFLAGS += -I$(top_builddir)/src -I$(top_srcdir)/src
noinst_LTLIBRARIES = libimageio_util.la libimagedec.la libimageenc.la
noinst_HEADERS =
noinst_HEADERS += ../src/webp/decode.h
noinst_HEADERS += ../src/webp/types.h
libimageio_util_la_SOURCES = imageio_util.c imageio_util.h
libimagedec_la_SOURCES = image_dec.c image_dec.h
libimagedec_la_SOURCES += jpegdec.c jpegdec.h
libimagedec_la_SOURCES += metadata.c metadata.h
libimagedec_la_SOURCES += pngdec.c pngdec.h
libimagedec_la_SOURCES += tiffdec.c tiffdec.h
libimagedec_la_SOURCES += webpdec.c webpdec.h
libimagedec_la_SOURCES += wicdec.c wicdec.h
libimagedec_la_CPPFLAGS = $(JPEG_INCLUDES) $(PNG_INCLUDES) $(TIFF_INCLUDES)
libimagedec_la_CPPFLAGS += $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
libimageenc_la_SOURCES = image_enc.c image_enc.h
libimageenc_la_CPPFLAGS = $(JPEG_INCLUDES) $(PNG_INCLUDES) $(TIFF_INCLUDES)
libimageenc_la_CPPFLAGS += $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)

View File

@ -1,61 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Generic image-type guessing.
#include "./image_dec.h"
static WEBP_INLINE uint32_t GetBE32(const uint8_t buf[]) {
return ((uint32_t)buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
}
WebPInputFileFormat WebPGuessImageType(const uint8_t* const data,
size_t data_size) {
WebPInputFileFormat format = WEBP_UNSUPPORTED_FORMAT;
if (data != NULL && data_size >= 12) {
const uint32_t magic1 = GetBE32(data + 0);
const uint32_t magic2 = GetBE32(data + 8);
if (magic1 == 0x89504E47U) {
format = WEBP_PNG_FORMAT;
} else if (magic1 >= 0xFFD8FF00U && magic1 <= 0xFFD8FFFFU) {
format = WEBP_JPEG_FORMAT;
} else if (magic1 == 0x49492A00 || magic1 == 0x4D4D002A) {
format = WEBP_TIFF_FORMAT;
} else if (magic1 == 0x52494646 && magic2 == 0x57454250) {
format = WEBP_WEBP_FORMAT;
}
}
return format;
}
static int FailReader(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata) {
(void)data;
(void)data_size;
(void)pic;
(void)keep_alpha;
(void)metadata;
return 0;
}
WebPImageReader WebPGetImageReader(WebPInputFileFormat format) {
switch (format) {
case WEBP_PNG_FORMAT: return ReadPNG;
case WEBP_JPEG_FORMAT: return ReadJPEG;
case WEBP_TIFF_FORMAT: return ReadTIFF;
case WEBP_WEBP_FORMAT: return ReadWebP;
default: return FailReader;
}
}
WebPImageReader WebPGuessImageReader(const uint8_t* const data,
size_t data_size) {
return WebPGetImageReader(WebPGuessImageType(data, data_size));
}

View File

@ -1,65 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// All-in-one library to decode PNG/JPEG/WebP/TIFF/WIC input images.
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_IMAGEIO_IMAGE_DEC_H_
#define WEBP_IMAGEIO_IMAGE_DEC_H_
#include "webp/types.h"
#ifdef HAVE_CONFIG_H
#include "webp/config.h"
#endif
#include "./metadata.h"
#include "./jpegdec.h"
#include "./pngdec.h"
#include "./tiffdec.h"
#include "./webpdec.h"
#include "./wicdec.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
WEBP_PNG_FORMAT = 0,
WEBP_JPEG_FORMAT,
WEBP_TIFF_FORMAT,
WEBP_WEBP_FORMAT,
WEBP_UNSUPPORTED_FORMAT
} WebPInputFileFormat;
// Try to infer the image format. 'data_size' should be larger than 12.
// Returns WEBP_UNSUPPORTED_FORMAT if format can't be guess safely.
WebPInputFileFormat WebPGuessImageType(const uint8_t* const data,
size_t data_size);
// Signature for common image-reading functions (ReadPNG, ReadJPEG, ...)
typedef int (*WebPImageReader)(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata);
// Return the reader associated to a given file format.
WebPImageReader WebPGetImageReader(WebPInputFileFormat format);
// This function is similar to WebPGuessImageType(), but returns a
// suitable reader function. The returned reader is never NULL, but
// unknown formats will return an always-failing valid reader.
WebPImageReader WebPGuessImageReader(const uint8_t* const data,
size_t data_size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_IMAGE_DEC_H_

View File

@ -1,591 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Save image
#include "./image_enc.h"
#include <assert.h>
#include <string.h>
#ifdef WEBP_HAVE_PNG
#include <png.h>
#include <setjmp.h> // note: this must be included *after* png.h
#endif
#ifdef HAVE_WINCODEC_H
#ifdef __MINGW32__
#define INITGUID // Without this GUIDs are declared extern and fail to link
#endif
#define CINTERFACE
#define COBJMACROS
#define _WIN32_IE 0x500 // Workaround bug in shlwapi.h when compiling C++
// code with COBJMACROS.
#include <ole2.h> // CreateStreamOnHGlobal()
#include <shlwapi.h>
#include <windows.h>
#include <wincodec.h>
#endif
#include "./imageio_util.h"
//------------------------------------------------------------------------------
// PNG
#ifdef HAVE_WINCODEC_H
#define IFS(fn) \
do { \
if (SUCCEEDED(hr)) { \
hr = (fn); \
if (FAILED(hr)) fprintf(stderr, #fn " failed %08lx\n", hr); \
} \
} while (0)
#ifdef __cplusplus
#define MAKE_REFGUID(x) (x)
#else
#define MAKE_REFGUID(x) &(x)
#endif
static HRESULT CreateOutputStream(const char* out_file_name,
int write_to_mem, IStream** stream) {
HRESULT hr = S_OK;
if (write_to_mem) {
// Output to a memory buffer. This is freed when 'stream' is released.
IFS(CreateStreamOnHGlobal(NULL, TRUE, stream));
} else {
IFS(SHCreateStreamOnFileA(out_file_name, STGM_WRITE | STGM_CREATE, stream));
}
if (FAILED(hr)) {
fprintf(stderr, "Error opening output file %s (%08lx)\n",
out_file_name, hr);
}
return hr;
}
static HRESULT WriteUsingWIC(const char* out_file_name, int use_stdout,
REFGUID container_guid,
uint8_t* rgb, int stride,
uint32_t width, uint32_t height, int has_alpha) {
HRESULT hr = S_OK;
IWICImagingFactory* factory = NULL;
IWICBitmapFrameEncode* frame = NULL;
IWICBitmapEncoder* encoder = NULL;
IStream* stream = NULL;
WICPixelFormatGUID pixel_format = has_alpha ? GUID_WICPixelFormat32bppBGRA
: GUID_WICPixelFormat24bppBGR;
if (out_file_name == NULL || rgb == NULL) return E_INVALIDARG;
IFS(CoInitialize(NULL));
IFS(CoCreateInstance(MAKE_REFGUID(CLSID_WICImagingFactory), NULL,
CLSCTX_INPROC_SERVER,
MAKE_REFGUID(IID_IWICImagingFactory),
(LPVOID*)&factory));
if (hr == REGDB_E_CLASSNOTREG) {
fprintf(stderr,
"Couldn't access Windows Imaging Component (are you running "
"Windows XP SP3 or newer?). PNG support not available. "
"Use -ppm or -pgm for available PPM and PGM formats.\n");
}
IFS(CreateOutputStream(out_file_name, use_stdout, &stream));
IFS(IWICImagingFactory_CreateEncoder(factory, container_guid, NULL,
&encoder));
IFS(IWICBitmapEncoder_Initialize(encoder, stream,
WICBitmapEncoderNoCache));
IFS(IWICBitmapEncoder_CreateNewFrame(encoder, &frame, NULL));
IFS(IWICBitmapFrameEncode_Initialize(frame, NULL));
IFS(IWICBitmapFrameEncode_SetSize(frame, width, height));
IFS(IWICBitmapFrameEncode_SetPixelFormat(frame, &pixel_format));
IFS(IWICBitmapFrameEncode_WritePixels(frame, height, stride,
height * stride, rgb));
IFS(IWICBitmapFrameEncode_Commit(frame));
IFS(IWICBitmapEncoder_Commit(encoder));
if (SUCCEEDED(hr) && use_stdout) {
HGLOBAL image;
IFS(GetHGlobalFromStream(stream, &image));
if (SUCCEEDED(hr)) {
HANDLE std_output = GetStdHandle(STD_OUTPUT_HANDLE);
DWORD mode;
const BOOL update_mode = GetConsoleMode(std_output, &mode);
const void* const image_mem = GlobalLock(image);
DWORD bytes_written = 0;
// Clear output processing if necessary, then output the image.
if (update_mode) SetConsoleMode(std_output, 0);
if (!WriteFile(std_output, image_mem, (DWORD)GlobalSize(image),
&bytes_written, NULL) ||
bytes_written != GlobalSize(image)) {
hr = E_FAIL;
}
if (update_mode) SetConsoleMode(std_output, mode);
GlobalUnlock(image);
}
}
if (frame != NULL) IUnknown_Release(frame);
if (encoder != NULL) IUnknown_Release(encoder);
if (factory != NULL) IUnknown_Release(factory);
if (stream != NULL) IUnknown_Release(stream);
return hr;
}
int WebPWritePNG(const char* out_file_name, int use_stdout,
const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const int has_alpha = WebPIsAlphaMode(buffer->colorspace);
return SUCCEEDED(WriteUsingWIC(out_file_name, use_stdout,
MAKE_REFGUID(GUID_ContainerFormatPng),
rgb, stride, width, height, has_alpha));
}
#elif defined(WEBP_HAVE_PNG) // !HAVE_WINCODEC_H
static void PNGAPI PNGErrorFunction(png_structp png, png_const_charp dummy) {
(void)dummy; // remove variable-unused warning
longjmp(png_jmpbuf(png), 1);
}
int WebPWritePNG(FILE* out_file, const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
png_bytep row = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const int has_alpha = WebPIsAlphaMode(buffer->colorspace);
volatile png_structp png;
volatile png_infop info;
png_uint_32 y;
if (out_file == NULL || buffer == NULL) return 0;
png = png_create_write_struct(PNG_LIBPNG_VER_STRING,
NULL, PNGErrorFunction, NULL);
if (png == NULL) {
return 0;
}
info = png_create_info_struct(png);
if (info == NULL) {
png_destroy_write_struct((png_structpp)&png, NULL);
return 0;
}
if (setjmp(png_jmpbuf(png))) {
png_destroy_write_struct((png_structpp)&png, (png_infopp)&info);
return 0;
}
png_init_io(png, out_file);
png_set_IHDR(png, info, width, height, 8,
has_alpha ? PNG_COLOR_TYPE_RGBA : PNG_COLOR_TYPE_RGB,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_DEFAULT);
png_write_info(png, info);
for (y = 0; y < height; ++y) {
png_write_rows(png, &row, 1);
row += stride;
}
png_write_end(png, info);
png_destroy_write_struct((png_structpp)&png, (png_infopp)&info);
return 1;
}
#else // !HAVE_WINCODEC_H && !WEBP_HAVE_PNG
int WebPWritePNG(FILE* fout, const WebPDecBuffer* const buffer) {
if (fout == NULL || buffer == NULL) return 0;
fprintf(stderr, "PNG support not compiled. Please install the libpng "
"development package before building.\n");
fprintf(stderr, "You can run with -ppm flag to decode in PPM format.\n");
return 0;
}
#endif
//------------------------------------------------------------------------------
// PPM / PAM
static int WritePPMPAM(FILE* fout, const WebPDecBuffer* const buffer,
int alpha) {
if (fout == NULL || buffer == NULL) {
return 0;
} else {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* row = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const size_t bytes_per_px = alpha ? 4 : 3;
uint32_t y;
if (row == NULL) return 0;
if (alpha) {
fprintf(fout, "P7\nWIDTH %u\nHEIGHT %u\nDEPTH 4\nMAXVAL 255\n"
"TUPLTYPE RGB_ALPHA\nENDHDR\n", width, height);
} else {
fprintf(fout, "P6\n%u %u\n255\n", width, height);
}
for (y = 0; y < height; ++y) {
if (fwrite(row, width, bytes_per_px, fout) != bytes_per_px) {
return 0;
}
row += stride;
}
}
return 1;
}
int WebPWritePPM(FILE* fout, const WebPDecBuffer* const buffer) {
return WritePPMPAM(fout, buffer, 0);
}
int WebPWritePAM(FILE* fout, const WebPDecBuffer* const buffer) {
return WritePPMPAM(fout, buffer, 1);
}
//------------------------------------------------------------------------------
// Raw PGM
// Save 16b mode (RGBA4444, RGB565, ...) for debugging purpose.
int WebPWrite16bAsPGM(FILE* fout, const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint32_t bytes_per_px = 2;
uint32_t y;
if (fout == NULL || buffer == NULL || rgba == NULL) return 0;
fprintf(fout, "P5\n%u %u\n255\n", width * bytes_per_px, height);
for (y = 0; y < height; ++y) {
if (fwrite(rgba, width, bytes_per_px, fout) != bytes_per_px) {
return 0;
}
rgba += stride;
}
return 1;
}
//------------------------------------------------------------------------------
// BMP
static void PutLE16(uint8_t* const dst, uint32_t value) {
dst[0] = (value >> 0) & 0xff;
dst[1] = (value >> 8) & 0xff;
}
static void PutLE32(uint8_t* const dst, uint32_t value) {
PutLE16(dst + 0, (value >> 0) & 0xffff);
PutLE16(dst + 2, (value >> 16) & 0xffff);
}
#define BMP_HEADER_SIZE 54
int WebPWriteBMP(FILE* fout, const WebPDecBuffer* const buffer) {
const int has_alpha = WebPIsAlphaMode(buffer->colorspace);
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint32_t bytes_per_px = has_alpha ? 4 : 3;
uint32_t y;
const uint32_t line_size = bytes_per_px * width;
const uint32_t bmp_stride = (line_size + 3) & ~3; // pad to 4
const uint32_t total_size = bmp_stride * height + BMP_HEADER_SIZE;
uint8_t bmp_header[BMP_HEADER_SIZE] = { 0 };
if (fout == NULL || buffer == NULL || rgba == NULL) return 0;
// bitmap file header
PutLE16(bmp_header + 0, 0x4d42); // signature 'BM'
PutLE32(bmp_header + 2, total_size); // size including header
PutLE32(bmp_header + 6, 0); // reserved
PutLE32(bmp_header + 10, BMP_HEADER_SIZE); // offset to pixel array
// bitmap info header
PutLE32(bmp_header + 14, 40); // DIB header size
PutLE32(bmp_header + 18, width); // dimensions
PutLE32(bmp_header + 22, -(int)height); // vertical flip!
PutLE16(bmp_header + 26, 1); // number of planes
PutLE16(bmp_header + 28, bytes_per_px * 8); // bits per pixel
PutLE32(bmp_header + 30, 0); // no compression (BI_RGB)
PutLE32(bmp_header + 34, 0); // image size (dummy)
PutLE32(bmp_header + 38, 2400); // x pixels/meter
PutLE32(bmp_header + 42, 2400); // y pixels/meter
PutLE32(bmp_header + 46, 0); // number of palette colors
PutLE32(bmp_header + 50, 0); // important color count
// TODO(skal): color profile
// write header
if (fwrite(bmp_header, sizeof(bmp_header), 1, fout) != 1) {
return 0;
}
// write pixel array
for (y = 0; y < height; ++y) {
if (fwrite(rgba, line_size, 1, fout) != 1) {
return 0;
}
// write padding zeroes
if (bmp_stride != line_size) {
const uint8_t zeroes[3] = { 0 };
if (fwrite(zeroes, bmp_stride - line_size, 1, fout) != 1) {
return 0;
}
}
rgba += stride;
}
return 1;
}
#undef BMP_HEADER_SIZE
//------------------------------------------------------------------------------
// TIFF
#define NUM_IFD_ENTRIES 15
#define EXTRA_DATA_SIZE 16
// 10b for signature/header + n * 12b entries + 4b for IFD terminator:
#define EXTRA_DATA_OFFSET (10 + 12 * NUM_IFD_ENTRIES + 4)
#define TIFF_HEADER_SIZE (EXTRA_DATA_OFFSET + EXTRA_DATA_SIZE)
int WebPWriteTIFF(FILE* fout, const WebPDecBuffer* const buffer) {
const int has_alpha = WebPIsAlphaMode(buffer->colorspace);
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint8_t bytes_per_px = has_alpha ? 4 : 3;
// For non-alpha case, we omit tag 0x152 (ExtraSamples).
const uint8_t num_ifd_entries = has_alpha ? NUM_IFD_ENTRIES
: NUM_IFD_ENTRIES - 1;
uint8_t tiff_header[TIFF_HEADER_SIZE] = {
0x49, 0x49, 0x2a, 0x00, // little endian signature
8, 0, 0, 0, // offset to the unique IFD that follows
// IFD (offset = 8). Entries must be written in increasing tag order.
num_ifd_entries, 0, // Number of entries in the IFD (12 bytes each).
0x00, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 10: Width (TBD)
0x01, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 22: Height (TBD)
0x02, 0x01, 3, 0, bytes_per_px, 0, 0, 0, // 34: BitsPerSample: 8888
EXTRA_DATA_OFFSET + 0, 0, 0, 0,
0x03, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 46: Compression: none
0x06, 0x01, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, // 58: Photometric: RGB
0x11, 0x01, 4, 0, 1, 0, 0, 0, // 70: Strips offset:
TIFF_HEADER_SIZE, 0, 0, 0, // data follows header
0x12, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 82: Orientation: topleft
0x15, 0x01, 3, 0, 1, 0, 0, 0, // 94: SamplesPerPixels
bytes_per_px, 0, 0, 0,
0x16, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 106: Rows per strip (TBD)
0x17, 0x01, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 118: StripByteCount (TBD)
0x1a, 0x01, 5, 0, 1, 0, 0, 0, // 130: X-resolution
EXTRA_DATA_OFFSET + 8, 0, 0, 0,
0x1b, 0x01, 5, 0, 1, 0, 0, 0, // 142: Y-resolution
EXTRA_DATA_OFFSET + 8, 0, 0, 0,
0x1c, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 154: PlanarConfiguration
0x28, 0x01, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, // 166: ResolutionUnit (inch)
0x52, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 178: ExtraSamples: rgbA
0, 0, 0, 0, // 190: IFD terminator
// EXTRA_DATA_OFFSET:
8, 0, 8, 0, 8, 0, 8, 0, // BitsPerSample
72, 0, 0, 0, 1, 0, 0, 0 // 72 pixels/inch, for X/Y-resolution
};
uint32_t y;
if (fout == NULL || buffer == NULL || rgba == NULL) return 0;
// Fill placeholders in IFD:
PutLE32(tiff_header + 10 + 8, width);
PutLE32(tiff_header + 22 + 8, height);
PutLE32(tiff_header + 106 + 8, height);
PutLE32(tiff_header + 118 + 8, width * bytes_per_px * height);
if (!has_alpha) PutLE32(tiff_header + 178, 0); // IFD terminator
// write header
if (fwrite(tiff_header, sizeof(tiff_header), 1, fout) != 1) {
return 0;
}
// write pixel values
for (y = 0; y < height; ++y) {
if (fwrite(rgba, bytes_per_px, width, fout) != width) {
return 0;
}
rgba += stride;
}
return 1;
}
#undef TIFF_HEADER_SIZE
#undef EXTRA_DATA_OFFSET
#undef EXTRA_DATA_SIZE
#undef NUM_IFD_ENTRIES
//------------------------------------------------------------------------------
// Raw Alpha
int WebPWriteAlphaPlane(FILE* fout, const WebPDecBuffer* const buffer) {
if (fout == NULL || buffer == NULL) {
return 0;
} else {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* a = buffer->u.YUVA.a;
const int a_stride = buffer->u.YUVA.a_stride;
uint32_t y;
if (a == NULL) return 0;
fprintf(fout, "P5\n%u %u\n255\n", width, height);
for (y = 0; y < height; ++y) {
if (fwrite(a, width, 1, fout) != 1) return 0;
a += a_stride;
}
return 1;
}
}
//------------------------------------------------------------------------------
// PGM with IMC4 layout
int WebPWritePGM(FILE* fout, const WebPDecBuffer* const buffer) {
if (fout == NULL || buffer == NULL) {
return 0;
} else {
const int width = buffer->width;
const int height = buffer->height;
const WebPYUVABuffer* const yuv = &buffer->u.YUVA;
const uint8_t* src_y = yuv->y;
const uint8_t* src_u = yuv->u;
const uint8_t* src_v = yuv->v;
const uint8_t* src_a = yuv->a;
const int uv_width = (width + 1) / 2;
const int uv_height = (height + 1) / 2;
const int a_height = (src_a != NULL) ? height : 0;
int ok = 1;
int y;
if (src_y == NULL || src_u == NULL || src_v == NULL) return 0;
fprintf(fout, "P5\n%d %d\n255\n",
(width + 1) & ~1, height + uv_height + a_height);
for (y = 0; ok && y < height; ++y) {
ok &= (fwrite(src_y, width, 1, fout) == 1);
if (width & 1) fputc(0, fout); // padding byte
src_y += yuv->y_stride;
}
for (y = 0; ok && y < uv_height; ++y) {
ok &= (fwrite(src_u, uv_width, 1, fout) == 1);
ok &= (fwrite(src_v, uv_width, 1, fout) == 1);
src_u += yuv->u_stride;
src_v += yuv->v_stride;
}
for (y = 0; ok && y < a_height; ++y) {
ok &= (fwrite(src_a, width, 1, fout) == 1);
if (width & 1) fputc(0, fout); // padding byte
src_a += yuv->a_stride;
}
return ok;
}
}
//------------------------------------------------------------------------------
// Raw YUV(A) planes
int WebPWriteYUV(FILE* fout, const WebPDecBuffer* const buffer) {
if (fout == NULL || buffer == NULL) {
return 0;
} else {
const int width = buffer->width;
const int height = buffer->height;
const WebPYUVABuffer* const yuv = &buffer->u.YUVA;
const uint8_t* src_y = yuv->y;
const uint8_t* src_u = yuv->u;
const uint8_t* src_v = yuv->v;
const uint8_t* src_a = yuv->a;
const int uv_width = (width + 1) / 2;
const int uv_height = (height + 1) / 2;
const int a_height = (src_a != NULL) ? height : 0;
int ok = 1;
int y;
if (src_y == NULL || src_u == NULL || src_v == NULL) return 0;
for (y = 0; ok && y < height; ++y) {
ok &= (fwrite(src_y, width, 1, fout) == 1);
src_y += yuv->y_stride;
}
for (y = 0; ok && y < uv_height; ++y) {
ok &= (fwrite(src_u, uv_width, 1, fout) == 1);
src_u += yuv->u_stride;
}
for (y = 0; ok && y < uv_height; ++y) {
ok &= (fwrite(src_v, uv_width, 1, fout) == 1);
src_v += yuv->v_stride;
}
for (y = 0; ok && y < a_height; ++y) {
ok &= (fwrite(src_a, width, 1, fout) == 1);
src_a += yuv->a_stride;
}
return ok;
}
}
//------------------------------------------------------------------------------
// Generic top-level call
int WebPSaveImage(const WebPDecBuffer* const buffer,
WebPOutputFileFormat format, const char* const out_file) {
FILE* fout = NULL;
int needs_open_file = 1;
const int use_stdout = (out_file != NULL) && !strcmp(out_file, "-");
int ok = 1;
if (buffer == NULL || out_file == NULL) return 0;
#ifdef HAVE_WINCODEC_H
needs_open_file = (format != PNG);
#endif
if (needs_open_file) {
fout = use_stdout ? ImgIoUtilSetBinaryMode(stdout) : fopen(out_file, "wb");
if (fout == NULL) {
fprintf(stderr, "Error opening output file %s\n", out_file);
return 0;
}
}
if (format == PNG ||
format == RGBA || format == BGRA || format == ARGB ||
format == rgbA || format == bgrA || format == Argb) {
#ifdef HAVE_WINCODEC_H
ok &= WebPWritePNG(out_file, use_stdout, buffer);
#else
ok &= WebPWritePNG(fout, buffer);
#endif
} else if (format == PAM) {
ok &= WebPWritePAM(fout, buffer);
} else if (format == PPM || format == RGB || format == BGR) {
ok &= WebPWritePPM(fout, buffer);
} else if (format == RGBA_4444 || format == RGB_565 || format == rgbA_4444) {
ok &= WebPWrite16bAsPGM(fout, buffer);
} else if (format == BMP) {
ok &= WebPWriteBMP(fout, buffer);
} else if (format == TIFF) {
ok &= WebPWriteTIFF(fout, buffer);
} else if (format == RAW_YUV) {
ok &= WebPWriteYUV(fout, buffer);
} else if (format == PGM || format == YUV || format == YUVA) {
ok &= WebPWritePGM(fout, buffer);
} else if (format == ALPHA_PLANE_ONLY) {
ok &= WebPWriteAlphaPlane(fout, buffer);
}
if (fout != NULL && fout != stdout) {
fclose(fout);
}
return ok;
}

View File

@ -1,96 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// All-in-one library to save PNG/JPEG/WebP/TIFF/WIC images.
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_IMAGEIO_IMAGE_ENC_H_
#define WEBP_IMAGEIO_IMAGE_ENC_H_
#include <stdio.h>
#ifdef HAVE_CONFIG_H
#include "webp/config.h"
#endif
#include "webp/types.h"
#include "webp/decode.h"
#ifdef __cplusplus
extern "C" {
#endif
// Output types
typedef enum {
PNG = 0,
PAM,
PPM,
PGM,
BMP,
TIFF,
RAW_YUV,
ALPHA_PLANE_ONLY, // this is for experimenting only
// forced colorspace output (for testing, mostly)
RGB, RGBA, BGR, BGRA, ARGB,
RGBA_4444, RGB_565,
rgbA, bgrA, Argb, rgbA_4444,
YUV, YUVA
} WebPOutputFileFormat;
// General all-purpose call.
// Most formats expect a 'buffer' containing RGBA-like samples, except
// RAW_YUV, YUV and YUVA formats.
// If 'out_file_name' is "-", data is saved to stdout.
// Returns false if an error occurred, true otherwise.
int WebPSaveImage(const WebPDecBuffer* const buffer,
WebPOutputFileFormat format, const char* const out_file_name);
// Save to PNG.
#ifdef HAVE_WINCODEC_H
int WebPWritePNG(const char* out_file_name, int use_stdout,
const struct WebPDecBuffer* const buffer);
#else
int WebPWritePNG(FILE* out_file, const WebPDecBuffer* const buffer);
#endif
// Save to PPM format (RGB, no alpha)
int WebPWritePPM(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save to PAM format (= PPM + alpha)
int WebPWritePAM(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save 16b mode (RGBA4444, RGB565, ...) for debugging purposes.
int WebPWrite16bAsPGM(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save as BMP
int WebPWriteBMP(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save as TIFF
int WebPWriteTIFF(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save the ALPHA plane (only) as a PGM
int WebPWriteAlphaPlane(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save as YUV samples as PGM format (using IMC4 layout).
// See: http://www.fourcc.org/yuv.php#IMC4.
// (very convenient format for viewing the samples, esp. for odd dimensions).
int WebPWritePGM(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save YUV(A) planes sequentially (raw dump)
int WebPWriteYUV(FILE* fout, const struct WebPDecBuffer* const buffer);
// Save 16b mode (RGBA4444, RGB565, ...) as PGM format, for debugging purposes.
int WebPWrite16bAsPGM(FILE* fout, const struct WebPDecBuffer* const buffer);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_IMAGE_ENC_H_

View File

@ -1,143 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Utility functions used by the image decoders.
//
#include "./imageio_util.h"
#if defined(_WIN32)
#include <fcntl.h> // for _O_BINARY
#include <io.h> // for _setmode()
#endif
#include <stdlib.h>
#include <string.h>
// -----------------------------------------------------------------------------
// File I/O
FILE* ImgIoUtilSetBinaryMode(FILE* file) {
#if defined(_WIN32)
if (_setmode(_fileno(file), _O_BINARY) == -1) {
fprintf(stderr, "Failed to reopen file in O_BINARY mode.\n");
return NULL;
}
#endif
return file;
}
int ImgIoUtilReadFromStdin(const uint8_t** data, size_t* data_size) {
static const size_t kBlockSize = 16384; // default initial size
size_t max_size = 0;
size_t size = 0;
uint8_t* input = NULL;
if (data == NULL || data_size == NULL) return 0;
*data = NULL;
*data_size = 0;
if (!ImgIoUtilSetBinaryMode(stdin)) return 0;
while (!feof(stdin)) {
// We double the buffer size each time and read as much as possible.
const size_t extra_size = (max_size == 0) ? kBlockSize : max_size;
void* const new_data = realloc(input, max_size + extra_size);
if (new_data == NULL) goto Error;
input = (uint8_t*)new_data;
max_size += extra_size;
size += fread(input + size, 1, extra_size, stdin);
if (size < max_size) break;
}
if (ferror(stdin)) goto Error;
*data = input;
*data_size = size;
return 1;
Error:
free(input);
fprintf(stderr, "Could not read from stdin\n");
return 0;
}
int ImgIoUtilReadFile(const char* const file_name,
const uint8_t** data, size_t* data_size) {
int ok;
void* file_data;
size_t file_size;
FILE* in;
const int from_stdin = (file_name == NULL) || !strcmp(file_name, "-");
if (from_stdin) return ImgIoUtilReadFromStdin(data, data_size);
if (data == NULL || data_size == NULL) return 0;
*data = NULL;
*data_size = 0;
in = fopen(file_name, "rb");
if (in == NULL) {
fprintf(stderr, "cannot open input file '%s'\n", file_name);
return 0;
}
fseek(in, 0, SEEK_END);
file_size = ftell(in);
fseek(in, 0, SEEK_SET);
file_data = malloc(file_size);
if (file_data == NULL) return 0;
ok = (fread(file_data, file_size, 1, in) == 1);
fclose(in);
if (!ok) {
fprintf(stderr, "Could not read %d bytes of data from file %s\n",
(int)file_size, file_name);
free(file_data);
return 0;
}
*data = (uint8_t*)file_data;
*data_size = file_size;
return 1;
}
int ImgIoUtilWriteFile(const char* const file_name,
const uint8_t* data, size_t data_size) {
int ok;
FILE* out;
const int to_stdout = (file_name == NULL) || !strcmp(file_name, "-");
if (data == NULL) {
return 0;
}
out = to_stdout ? stdout : fopen(file_name, "wb");
if (out == NULL) {
fprintf(stderr, "Error! Cannot open output file '%s'\n", file_name);
return 0;
}
ok = (fwrite(data, data_size, 1, out) == 1);
if (out != stdout) fclose(out);
return ok;
}
// -----------------------------------------------------------------------------
void ImgIoUtilCopyPlane(const uint8_t* src, int src_stride,
uint8_t* dst, int dst_stride, int width, int height) {
while (height-- > 0) {
memcpy(dst, src, width * sizeof(*dst));
src += src_stride;
dst += dst_stride;
}
}
// -----------------------------------------------------------------------------
int ImgIoUtilCheckSizeArgumentsOverflow(uint64_t nmemb, size_t size) {
const uint64_t total_size = nmemb * size;
return (total_size == (size_t)total_size);
}
// -----------------------------------------------------------------------------

View File

@ -1,61 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Utility functions used by the image decoders.
//
#ifndef WEBP_IMAGEIO_IMAGEIO_UTIL_H_
#define WEBP_IMAGEIO_IMAGEIO_UTIL_H_
#include <stdio.h>
#include "webp/types.h"
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
// File I/O
// Reopen file in binary (O_BINARY) mode.
// Returns 'file' on success, NULL otherwise.
FILE* ImgIoUtilSetBinaryMode(FILE* file);
// Allocates storage for entire file 'file_name' and returns contents and size
// in 'data' and 'data_size'. Returns 1 on success, 0 otherwise. '*data' should
// be deleted using free().
// If 'file_name' is NULL or equal to "-", input is read from stdin by calling
// the function ImgIoUtilReadFromStdin().
int ImgIoUtilReadFile(const char* const file_name,
const uint8_t** data, size_t* data_size);
// Same as ImgIoUtilReadFile(), but reads until EOF from stdin instead.
int ImgIoUtilReadFromStdin(const uint8_t** data, size_t* data_size);
// Write a data segment into a file named 'file_name'. Returns true if ok.
// If 'file_name' is NULL or equal to "-", output is written to stdout.
int ImgIoUtilWriteFile(const char* const file_name,
const uint8_t* data, size_t data_size);
//------------------------------------------------------------------------------
// Copy width x height pixels from 'src' to 'dst' honoring the strides.
void ImgIoUtilCopyPlane(const uint8_t* src, int src_stride,
uint8_t* dst, int dst_stride, int width, int height);
//------------------------------------------------------------------------------
// Returns 0 in case of overflow of nmemb * size.
int ImgIoUtilCheckSizeArgumentsOverflow(uint64_t nmemb, size_t size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_IMAGEIO_UTIL_H_

View File

@ -1,190 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP decode.
#include "./webpdec.h"
#include <stdio.h>
#include <stdlib.h>
#include "webp/decode.h"
#include "webp/encode.h"
#include "./imageio_util.h"
#include "./metadata.h"
//------------------------------------------------------------------------------
// WebP decoding
static const char* const kStatusMessages[VP8_STATUS_NOT_ENOUGH_DATA + 1] = {
"OK", "OUT_OF_MEMORY", "INVALID_PARAM", "BITSTREAM_ERROR",
"UNSUPPORTED_FEATURE", "SUSPENDED", "USER_ABORT", "NOT_ENOUGH_DATA"
};
static void PrintAnimationWarning(const WebPDecoderConfig* const config) {
if (config->input.has_animation) {
fprintf(stderr,
"Error! Decoding of an animated WebP file is not supported.\n"
" Use webpmux to extract the individual frames or\n"
" vwebp to view this image.\n");
}
}
void PrintWebPError(const char* const in_file, int status) {
fprintf(stderr, "Decoding of %s failed.\n", in_file);
fprintf(stderr, "Status: %d", status);
if (status >= VP8_STATUS_OK && status <= VP8_STATUS_NOT_ENOUGH_DATA) {
fprintf(stderr, "(%s)", kStatusMessages[status]);
}
fprintf(stderr, "\n");
}
int LoadWebP(const char* const in_file,
const uint8_t** data, size_t* data_size,
WebPBitstreamFeatures* bitstream) {
VP8StatusCode status;
WebPBitstreamFeatures local_features;
if (!ImgIoUtilReadFile(in_file, data, data_size)) return 0;
if (bitstream == NULL) {
bitstream = &local_features;
}
status = WebPGetFeatures(*data, *data_size, bitstream);
if (status != VP8_STATUS_OK) {
free((void*)*data);
*data = NULL;
*data_size = 0;
PrintWebPError(in_file, status);
return 0;
}
return 1;
}
//------------------------------------------------------------------------------
VP8StatusCode DecodeWebP(const uint8_t* const data, size_t data_size,
WebPDecoderConfig* const config) {
if (config == NULL) return VP8_STATUS_INVALID_PARAM;
PrintAnimationWarning(config);
return WebPDecode(data, data_size, config);
}
VP8StatusCode DecodeWebPIncremental(
const uint8_t* const data, size_t data_size,
WebPDecoderConfig* const config) {
VP8StatusCode status = VP8_STATUS_OK;
if (config == NULL) return VP8_STATUS_INVALID_PARAM;
PrintAnimationWarning(config);
// Decoding call.
{
WebPIDecoder* const idec = WebPIDecode(data, data_size, config);
if (idec == NULL) {
fprintf(stderr, "Failed during WebPINewDecoder().\n");
return VP8_STATUS_OUT_OF_MEMORY;
} else {
#ifdef WEBP_EXPERIMENTAL_FEATURES
size_t size = 0;
const size_t incr = 2 + (data_size / 20);
while (size < data_size) {
size_t next_size = size + (rand() % incr);
if (next_size > data_size) next_size = data_size;
status = WebPIUpdate(idec, data, next_size);
if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED) break;
size = next_size;
}
#else
status = WebPIUpdate(idec, data, data_size);
#endif
WebPIDelete(idec);
}
}
return status;
}
// -----------------------------------------------------------------------------
int ReadWebP(const uint8_t* const data, size_t data_size,
WebPPicture* const pic,
int keep_alpha, Metadata* const metadata) {
int ok = 0;
VP8StatusCode status = VP8_STATUS_OK;
WebPDecoderConfig config;
WebPDecBuffer* const output_buffer = &config.output;
WebPBitstreamFeatures* const bitstream = &config.input;
if (data == NULL || data_size == 0 || pic == NULL) return 0;
// TODO(jzern): add Exif/XMP/ICC extraction.
if (metadata != NULL) {
fprintf(stderr, "Warning: metadata extraction from WebP is unsupported.\n");
}
if (!WebPInitDecoderConfig(&config)) {
fprintf(stderr, "Library version mismatch!\n");
return 0;
}
status = WebPGetFeatures(data, data_size, bitstream);
if (status != VP8_STATUS_OK) {
PrintWebPError("input data", status);
return 0;
}
{
const int has_alpha = keep_alpha && bitstream->has_alpha;
if (pic->use_argb) {
output_buffer->colorspace = has_alpha ? MODE_RGBA : MODE_RGB;
} else {
output_buffer->colorspace = has_alpha ? MODE_YUVA : MODE_YUV;
}
status = DecodeWebP(data, data_size, &config);
if (status == VP8_STATUS_OK) {
pic->width = output_buffer->width;
pic->height = output_buffer->height;
if (pic->use_argb) {
const uint8_t* const rgba = output_buffer->u.RGBA.rgba;
const int stride = output_buffer->u.RGBA.stride;
ok = has_alpha ? WebPPictureImportRGBA(pic, rgba, stride)
: WebPPictureImportRGB(pic, rgba, stride);
} else {
pic->colorspace = has_alpha ? WEBP_YUV420A : WEBP_YUV420;
ok = WebPPictureAlloc(pic);
if (!ok) {
status = VP8_STATUS_OUT_OF_MEMORY;
} else {
const WebPYUVABuffer* const yuva = &output_buffer->u.YUVA;
const int uv_width = (pic->width + 1) >> 1;
const int uv_height = (pic->height + 1) >> 1;
ImgIoUtilCopyPlane(yuva->y, yuva->y_stride,
pic->y, pic->y_stride, pic->width, pic->height);
ImgIoUtilCopyPlane(yuva->u, yuva->u_stride,
pic->u, pic->uv_stride, uv_width, uv_height);
ImgIoUtilCopyPlane(yuva->v, yuva->v_stride,
pic->v, pic->uv_stride, uv_width, uv_height);
if (has_alpha) {
ImgIoUtilCopyPlane(yuva->a, yuva->a_stride,
pic->a, pic->a_stride, pic->width, pic->height);
}
}
}
}
}
if (status != VP8_STATUS_OK) {
PrintWebPError("input data", status);
}
WebPFreeDecBuffer(output_buffer);
return ok;
}
// -----------------------------------------------------------------------------

View File

@ -1,67 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP decode.
#ifndef WEBP_IMAGEIO_WEBPDEC_H_
#define WEBP_IMAGEIO_WEBPDEC_H_
#include "webp/decode.h"
#ifdef __cplusplus
extern "C" {
#endif
struct Metadata;
struct WebPPicture;
//------------------------------------------------------------------------------
// WebP decoding
// Prints an informative error message regarding decode failure of 'in_file'.
// 'status' is treated as a VP8StatusCode and if valid will be printed as a
// text string.
void PrintWebPError(const char* const in_file, int status);
// Reads a WebP from 'in_file', returning the contents and size in 'data' and
// 'data_size'. If not NULL, 'bitstream' is populated using WebPGetFeatures().
// Returns true on success.
int LoadWebP(const char* const in_file,
const uint8_t** data, size_t* data_size,
WebPBitstreamFeatures* bitstream);
// Decodes the WebP contained in 'data'.
// 'config' is a structure previously initialized by WebPInitDecoderConfig().
// 'config->output' should have the desired colorspace selected.
// Returns the decoder status. On success 'config->output' will contain the
// decoded picture.
VP8StatusCode DecodeWebP(const uint8_t* const data, size_t data_size,
WebPDecoderConfig* const config);
// Same as DecodeWebP(), but using the incremental decoder.
VP8StatusCode DecodeWebPIncremental(
const uint8_t* const data, size_t data_size,
WebPDecoderConfig* const config);
//------------------------------------------------------------------------------
// Reads a WebP from 'in_file', returning the decoded output in 'pic'.
// Output is RGBA or YUVA, depending on pic->use_argb value.
// If 'keep_alpha' is true and the WebP has an alpha channel, the output is RGBA
// or YUVA. Otherwise, alpha channel is dropped and output is RGB or YUV.
// Returns true on success.
int ReadWebP(const uint8_t* const data, size_t data_size,
struct WebPPicture* const pic,
int keep_alpha, struct Metadata* const metadata);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_IMAGEIO_WEBPDEC_H_

View File

@ -1,11 +1,10 @@
#!/bin/bash
#
# This script generates 'WebP.framework' and 'WebPDecoder.framework'. An iOS
# app can decode WebP images by including 'WebPDecoder.framework' and both
# encode and decode WebP images by including 'WebP.framework'.
# This script generates 'WebP.framework'. An iOS app can decode WebP images
# by including 'WebP.framework'.
#
# Run ./iosbuild.sh to generate the frameworks under the current directory
# (the previous build will be erased if it exists).
# Run ./iosbuild.sh to generate 'WebP.framework' under the current directory
# (previous build will be erased if it exists).
#
# This script is inspired by the build script written by Carson McDonald.
# (http://www.ioncannon.net/programming/1483/using-webp-to-reduce-native-ios-app-size/).
@ -34,12 +33,10 @@ readonly SRCDIR=$(dirname $0)
readonly TOPDIR=$(pwd)
readonly BUILDDIR="${TOPDIR}/iosbuild"
readonly TARGETDIR="${TOPDIR}/WebP.framework"
readonly DECTARGETDIR="${TOPDIR}/WebPDecoder.framework"
readonly DEVELOPER=$(xcode-select --print-path)
readonly PLATFORMSROOT="${DEVELOPER}/Platforms"
readonly LIPO=$(xcrun -sdk iphoneos${SDK} -find lipo)
LIBLIST=''
DECLIBLIST=''
if [[ -z "${SDK}" ]]; then
echo "iOS SDK not available"
@ -53,8 +50,10 @@ else
echo "iOS SDK Version ${SDK}"
fi
rm -rf ${BUILDDIR} ${TARGETDIR} ${DECTARGETDIR}
mkdir -p ${BUILDDIR} ${TARGETDIR}/Headers/ ${DECTARGETDIR}/Headers/
rm -rf ${BUILDDIR}
rm -rf ${TARGETDIR}
mkdir -p ${BUILDDIR}
mkdir -p ${TARGETDIR}/Headers/
if [[ ! -e ${SRCDIR}/configure ]]; then
if ! (cd ${SRCDIR} && sh autogen.sh); then
@ -108,13 +107,12 @@ for PLATFORM in ${PLATFORMS}; do
CFLAGS="${CFLAGS}"
set +x
# run make only in the src/ directory to create libwebp.a/libwebpdecoder.a
# run make only in the src/ directory to create libwebpdecoder.a
cd src/
make V=0
make install
LIBLIST+=" ${ROOTDIR}/lib/libwebp.a"
DECLIBLIST+=" ${ROOTDIR}/lib/libwebpdecoder.a"
LIBLIST+=" ${ROOTDIR}/lib/libwebpdecoder.a"
make clean
cd ..
@ -122,8 +120,5 @@ for PLATFORM in ${PLATFORMS}; do
export PATH=${OLDPATH}
done
cp -a ${SRCDIR}/src/webp/{decode,encode,types}.h ${TARGETDIR}/Headers/
cp -a ${SRCDIR}/src/webp/*.h ${TARGETDIR}/Headers/
${LIPO} -create ${LIBLIST} -output ${TARGETDIR}/WebP
cp -a ${SRCDIR}/src/webp/{decode,types}.h ${DECTARGETDIR}/Headers/
${LIPO} -create ${DECLIBLIST} -output ${DECTARGETDIR}/WebPDecoder

View File

@ -2,8 +2,8 @@
# system, for simple local building of the libraries and tools.
# It will not install the libraries system-wide, but just create the 'cwebp'
# and 'dwebp' tools in the examples/ directory, along with the static
# libraries 'src/libwebp.a', 'src/libwebpdecoder.a', 'src/mux/libwebpmux.a',
# 'src/demux/libwebpdemux.a' and 'extras/libwebpextras.a'.
# libraries 'src/libwebp.a', 'src/libwebpdecoder.a', 'src/mux/libwebpmux.a' and
# 'src/demux/libwebpdemux.a'.
#
# To build the library and examples, use:
# make -f makefile.unix
@ -61,9 +61,6 @@ endif
EXTRA_FLAGS += -DWEBP_USE_THREAD
EXTRA_LIBS += -lpthread
# Control symbol visibility. Comment out if your compiler doesn't support it.
EXTRA_FLAGS += -fvisibility=hidden
# Extra flags to emulate C89 strictness with the full ANSI
EXTRA_FLAGS += -Wextra -Wold-style-definition
EXTRA_FLAGS += -Wmissing-prototypes
@ -71,13 +68,8 @@ EXTRA_FLAGS += -Wmissing-declarations
EXTRA_FLAGS += -Wdeclaration-after-statement
EXTRA_FLAGS += -Wshadow
EXTRA_FLAGS += -Wformat-security -Wformat-nonliteral
# EXTRA_FLAGS += -Wvla
# SSE4.1-specific flags:
ifeq ($(HAVE_SSE41), 1)
EXTRA_FLAGS += -DWEBP_HAVE_SSE41
src/dsp/%_sse41.o: EXTRA_FLAGS += -msse4.1
endif
# EXTRA_FLAGS += -Wvla
# AVX2-specific flags:
ifeq ($(HAVE_AVX2), 1)
@ -89,187 +81,121 @@ endif
# EXTRA_FLAGS += -march=armv7-a -mfloat-abi=hard -mfpu=neon -mtune=cortex-a8
# -> seems to make the overall lib slower: -fno-split-wide-types
# MIPS (MSA) 32-bit build specific flags for mips32r5 (p5600):
# EXTRA_FLAGS += -mips32r5 -mabi=32 -mtune=p5600 -mmsa -mfp64
# EXTRA_FLAGS += -msched-weight -mload-store-pairs
# MIPS (MSA) 64-bit build specific flags for mips64r6 (i6400):
# EXTRA_FLAGS += -mips64r6 -mabi=64 -mtune=i6400 -mmsa -mfp64
# EXTRA_FLAGS += -msched-weight -mload-store-pairs
#### Nothing should normally be changed below this line ####
AR = ar
ARFLAGS = r
CC = gcc
CPPFLAGS = -Isrc/ -Wall
CFLAGS = -O3 -DNDEBUG $(EXTRA_FLAGS)
CC = gcc
INSTALL = install
GROFF = /usr/bin/groff
COL = /usr/bin/col
LDFLAGS = $(EXTRA_LIBS) $(EXTRA_FLAGS) -lm
ANIM_UTIL_OBJS = \
examples/anim_util.o \
DEC_OBJS = \
src/dec/alpha_dec.o \
src/dec/buffer_dec.o \
src/dec/frame_dec.o \
src/dec/idec_dec.o \
src/dec/io_dec.o \
src/dec/quant_dec.o \
src/dec/tree_dec.o \
src/dec/vp8_dec.o \
src/dec/vp8l_dec.o \
src/dec/webp_dec.o \
src/dec/alpha.o \
src/dec/buffer.o \
src/dec/frame.o \
src/dec/idec.o \
src/dec/io.o \
src/dec/quant.o \
src/dec/tree.o \
src/dec/vp8.o \
src/dec/vp8l.o \
src/dec/webp.o \
DEMUX_OBJS = \
src/demux/anim_decode.o \
src/demux/demux.o \
DSP_DEC_OBJS = \
src/dsp/alpha_processing.o \
src/dsp/alpha_processing_mips_dsp_r2.o \
src/dsp/alpha_processing_neon.o \
src/dsp/alpha_processing_sse2.o \
src/dsp/alpha_processing_sse41.o \
src/dsp/cpu.o \
src/dsp/dec.o \
src/dsp/dec_clip_tables.o \
src/dsp/dec_mips32.o \
src/dsp/dec_mips_dsp_r2.o \
src/dsp/dec_msa.o \
src/dsp/dec_neon.o \
src/dsp/dec_sse2.o \
src/dsp/dec_sse41.o \
src/dsp/filters.o \
src/dsp/filters_mips_dsp_r2.o \
src/dsp/filters_msa.o \
src/dsp/filters_neon.o \
src/dsp/filters_sse2.o \
src/dsp/lossless.o \
src/dsp/lossless_mips_dsp_r2.o \
src/dsp/lossless_msa.o \
src/dsp/lossless_mips32.o \
src/dsp/lossless_neon.o \
src/dsp/lossless_sse2.o \
src/dsp/rescaler.o \
src/dsp/rescaler_mips32.o \
src/dsp/rescaler_mips_dsp_r2.o \
src/dsp/rescaler_msa.o \
src/dsp/rescaler_neon.o \
src/dsp/rescaler_sse2.o \
src/dsp/upsampling.o \
src/dsp/upsampling_mips_dsp_r2.o \
src/dsp/upsampling_msa.o \
src/dsp/upsampling_neon.o \
src/dsp/upsampling_sse2.o \
src/dsp/yuv.o \
src/dsp/yuv_mips32.o \
src/dsp/yuv_mips_dsp_r2.o \
src/dsp/yuv_sse2.o \
DSP_ENC_OBJS = \
src/dsp/argb.o \
src/dsp/argb_mips_dsp_r2.o \
src/dsp/argb_sse2.o \
src/dsp/cost.o \
src/dsp/cost_mips32.o \
src/dsp/cost_mips_dsp_r2.o \
src/dsp/cost_sse2.o \
src/dsp/enc.o \
src/dsp/enc_avx2.o \
src/dsp/enc_mips32.o \
src/dsp/enc_mips_dsp_r2.o \
src/dsp/enc_msa.o \
src/dsp/enc_neon.o \
src/dsp/enc_sse2.o \
src/dsp/enc_sse41.o \
src/dsp/lossless_enc.o \
src/dsp/lossless_enc_mips32.o \
src/dsp/lossless_enc_mips_dsp_r2.o \
src/dsp/lossless_enc_msa.o \
src/dsp/lossless_enc_neon.o \
src/dsp/lossless_enc_sse2.o \
src/dsp/lossless_enc_sse41.o \
ENC_OBJS = \
src/enc/alpha_enc.o \
src/enc/analysis_enc.o \
src/enc/backward_references_enc.o \
src/enc/config_enc.o \
src/enc/cost_enc.o \
src/enc/delta_palettization_enc.o \
src/enc/filter_enc.o \
src/enc/frame_enc.o \
src/enc/histogram_enc.o \
src/enc/iterator_enc.o \
src/enc/near_lossless_enc.o \
src/enc/picture_enc.o \
src/enc/picture_csp_enc.o \
src/enc/picture_psnr_enc.o \
src/enc/picture_rescale_enc.o \
src/enc/picture_tools_enc.o \
src/enc/predictor_enc.o \
src/enc/quant_enc.o \
src/enc/syntax_enc.o \
src/enc/token_enc.o \
src/enc/tree_enc.o \
src/enc/vp8l_enc.o \
src/enc/webp_enc.o \
src/enc/alpha.o \
src/enc/analysis.o \
src/enc/backward_references.o \
src/enc/config.o \
src/enc/cost.o \
src/enc/filter.o \
src/enc/frame.o \
src/enc/histogram.o \
src/enc/iterator.o \
src/enc/picture.o \
src/enc/picture_csp.o \
src/enc/picture_psnr.o \
src/enc/picture_rescale.o \
src/enc/picture_tools.o \
src/enc/quant.o \
src/enc/syntax.o \
src/enc/token.o \
src/enc/tree.o \
src/enc/vp8l.o \
src/enc/webpenc.o \
EX_FORMAT_DEC_OBJS = \
imageio/image_dec.o \
imageio/jpegdec.o \
imageio/metadata.o \
imageio/pngdec.o \
imageio/tiffdec.o \
imageio/webpdec.o \
EX_FORMAT_ENC_OBJS = \
imageio/image_enc.o \
examples/jpegdec.o \
examples/metadata.o \
examples/pngdec.o \
examples/tiffdec.o \
examples/webpdec.o \
EX_UTIL_OBJS = \
examples/example_util.o \
GIFDEC_OBJS = \
examples/gifdec.o \
IMAGE_UTIL_OBJS = \
imageio/imageio_util.o \
GIF2WEBP_UTIL_OBJS = \
examples/gif2webp_util.o \
MUX_OBJS = \
src/mux/anim_encode.o \
src/mux/muxedit.o \
src/mux/muxinternal.o \
src/mux/muxread.o \
UTILS_DEC_OBJS = \
src/utils/bit_reader_utils.o \
src/utils/color_cache_utils.o \
src/utils/filters_utils.o \
src/utils/huffman_utils.o \
src/utils/quant_levels_dec_utils.o \
src/utils/random_utils.o \
src/utils/rescaler_utils.o \
src/utils/thread_utils.o \
src/utils/bit_reader.o \
src/utils/color_cache.o \
src/utils/filters.o \
src/utils/huffman.o \
src/utils/quant_levels_dec.o \
src/utils/random.o \
src/utils/rescaler.o \
src/utils/thread.o \
src/utils/utils.o \
UTILS_ENC_OBJS = \
src/utils/bit_writer_utils.o \
src/utils/huffman_encode_utils.o \
src/utils/quant_levels_utils.o \
EXTRA_OBJS = \
extras/extras.o \
extras/quality_estimate.o \
src/utils/bit_writer.o \
src/utils/huffman_encode.o \
src/utils/quant_levels.o \
LIBWEBPDECODER_OBJS = $(DEC_OBJS) $(DSP_DEC_OBJS) $(UTILS_DEC_OBJS)
LIBWEBP_OBJS = $(LIBWEBPDECODER_OBJS) $(ENC_OBJS) $(DSP_ENC_OBJS) \
$(UTILS_ENC_OBJS)
LIBWEBPMUX_OBJS = $(MUX_OBJS)
LIBWEBPDEMUX_OBJS = $(DEMUX_OBJS)
LIBWEBPEXTRA_OBJS = $(EXTRA_OBJS)
HDRS_INSTALLED = \
src/webp/decode.h \
@ -280,91 +206,66 @@ HDRS_INSTALLED = \
src/webp/types.h \
HDRS = \
src/dec/alphai_dec.h \
src/dec/common_dec.h \
src/dec/vp8_dec.h \
src/dec/vp8i_dec.h \
src/dec/vp8li_dec.h \
src/dec/webpi_dec.h \
src/dsp/common_sse2.h \
src/dec/alphai.h \
src/dec/decode_vp8.h \
src/dec/vp8i.h \
src/dec/vp8li.h \
src/dec/webpi.h \
src/dsp/dsp.h \
src/dsp/lossless.h \
src/dsp/lossless_common.h \
src/dsp/mips_macro.h \
src/dsp/msa_macro.h \
src/dsp/neon.h \
src/dsp/yuv.h \
src/enc/backward_references_enc.h \
src/enc/cost_enc.h \
src/enc/delta_palettization_enc.h \
src/enc/histogram_enc.h \
src/enc/vp8i_enc.h \
src/enc/vp8li_enc.h \
src/mux/animi.h \
src/dsp/yuv_tables_sse2.h \
src/enc/backward_references.h \
src/enc/cost.h \
src/enc/histogram.h \
src/enc/vp8enci.h \
src/enc/vp8li.h \
src/mux/muxi.h \
src/utils/bit_reader_utils.h \
src/utils/bit_reader_inl_utils.h \
src/utils/bit_writer_utils.h \
src/utils/color_cache_utils.h \
src/utils/endian_inl_utils.h \
src/utils/filters_utils.h \
src/utils/huffman_utils.h \
src/utils/huffman_encode_utils.h \
src/utils/quant_levels_utils.h \
src/utils/quant_levels_dec_utils.h \
src/utils/random_utils.h \
src/utils/rescaler_utils.h \
src/utils/thread_utils.h \
src/utils/bit_reader.h \
src/utils/bit_writer.h \
src/utils/color_cache.h \
src/utils/filters.h \
src/utils/huffman.h \
src/utils/huffman_encode.h \
src/utils/quant_levels.h \
src/utils/quant_levels_dec.h \
src/utils/random.h \
src/utils/rescaler.h \
src/utils/thread.h \
src/utils/utils.h \
src/webp/format_constants.h \
$(HDRS_INSTALLED) \
OUT_LIBS = examples/libexample_util.a
OUT_LIBS += imageio/libimageio_util.a
OUT_LIBS += imageio/libimagedec.a
OUT_LIBS += imageio/libimageenc.a
OUT_LIBS += src/libwebpdecoder.a
OUT_LIBS += src/libwebp.a
EXTRA_LIB = extras/libwebpextras.a
OUT_LIBS = examples/libexample_util.a src/libwebpdecoder.a src/libwebp.a
OUT_EXAMPLES = examples/cwebp examples/dwebp
EXTRA_EXAMPLES = examples/gif2webp examples/vwebp examples/webpmux \
examples/anim_diff examples/img2webp
OTHER_EXAMPLES = extras/get_disto extras/webp_quality
EXTRA_EXAMPLES = examples/gif2webp examples/vwebp examples/webpmux
OUTPUT = $(OUT_LIBS) $(OUT_EXAMPLES)
ifeq ($(MAKECMDGOALS),clean)
OUTPUT += $(EXTRA_EXAMPLES) $(OTHER_EXAMPLES)
OUTPUT += src/demux/libwebpdemux.a src/mux/libwebpmux.a $(EXTRA_LIB)
OUTPUT += examples/libgifdec.a examples/libanim_util.a
OUTPUT += $(EXTRA_EXAMPLES)
OUTPUT += src/demux/libwebpdemux.a src/mux/libwebpmux.a
OUTPUT += examples/libgif2webp_util.a
endif
ex: $(OUT_EXAMPLES)
all: ex $(EXTRA_EXAMPLES) $(OTHER_EXAMPLES)
extras: $(EXTRA_LIB)
all: ex $(EXTRA_EXAMPLES)
$(EX_FORMAT_DEC_OBJS): %.o: %.h
# special dependencies:
# tree_dec.c/vp8_dec.c/bit_reader_utils.c <->
# bit_reader_inl_utils.h, endian_inl_utils.h
# bit_writer_utils.c <-> endian_inl_utils.h
src/dec/tree_dec.o: src/utils/bit_reader_inl_utils.h
src/dec/tree_dec.o: src/utils/endian_inl_utils.h
src/dec/vp8_dec.o: src/utils/bit_reader_inl_utils.h src/utils/endian_inl_utils.h
src/utils/bit_reader_utils.o: src/utils/bit_reader_inl_utils.h
src/utils/bit_reader_utils.o: src/utils/endian_inl_utils.h
src/utils/bit_writer_utils.o: src/utils/endian_inl_utils.h
# tree.c/vp8.c/bit_reader.c <-> bit_reader_inl.h, endian_inl.h
# bit_writer.c <-> endian_inl.h
src/dec/tree.o: src/utils/bit_reader_inl.h src/utils/endian_inl.h
src/dec/vp8.o: src/utils/bit_reader_inl.h src/utils/endian_inl.h
src/utils/bit_reader.o: src/utils/bit_reader_inl.h src/utils/endian_inl.h
src/utils/bit_writer.o: src/utils/endian_inl.h
%.o: %.c $(HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
examples/libanim_util.a: $(ANIM_UTIL_OBJS)
examples/libexample_util.a: $(EX_UTIL_OBJS)
examples/libgifdec.a: $(GIFDEC_OBJS)
extras/libwebpextras.a: $(LIBWEBPEXTRA_OBJS)
imageio/libimagedec.a: $(EX_FORMAT_DEC_OBJS)
imageio/libimageenc.a: $(EX_FORMAT_ENC_OBJS)
imageio/libimageio_util.a: $(IMAGE_UTIL_OBJS)
examples/libgif2webp_util.a: $(GIF2WEBP_UTIL_OBJS)
src/libwebpdecoder.a: $(LIBWEBPDECODER_OBJS)
src/libwebp.a: $(LIBWEBP_OBJS)
src/mux/libwebpmux.a: $(LIBWEBPMUX_OBJS)
@ -373,54 +274,28 @@ src/demux/libwebpdemux.a: $(LIBWEBPDEMUX_OBJS)
%.a:
$(AR) $(ARFLAGS) $@ $^
examples/anim_diff: examples/anim_diff.o $(ANIM_UTIL_OBJS) $(GIFDEC_OBJS)
examples/cwebp: examples/cwebp.o
examples/cwebp: examples/cwebp.o $(EX_FORMAT_DEC_OBJS)
examples/dwebp: examples/dwebp.o
examples/gif2webp: examples/gif2webp.o $(GIFDEC_OBJS)
examples/gif2webp: examples/gif2webp.o
examples/vwebp: examples/vwebp.o
examples/webpmux: examples/webpmux.o
examples/img2webp: examples/img2webp.o
examples/anim_diff: examples/libanim_util.a examples/libgifdec.a
examples/anim_diff: src/demux/libwebpdemux.a examples/libexample_util.a
examples/anim_diff: imageio/libimageio_util.a src/libwebp.a
examples/anim_diff: EXTRA_LIBS += $(GIF_LIBS)
examples/anim_diff: EXTRA_FLAGS += -DWEBP_HAVE_GIF
examples/cwebp: examples/libexample_util.a
examples/cwebp: imageio/libimagedec.a
examples/cwebp: imageio/libimageio_util.a
examples/cwebp: src/libwebp.a
examples/cwebp: examples/libexample_util.a src/libwebp.a
examples/cwebp: EXTRA_LIBS += $(CWEBP_LIBS)
examples/dwebp: examples/libexample_util.a
examples/dwebp: imageio/libimagedec.a
examples/dwebp: imageio/libimageenc.a
examples/dwebp: imageio/libimageio_util.a
examples/dwebp: src/libwebp.a
examples/dwebp: examples/libexample_util.a src/libwebpdecoder.a
examples/dwebp: EXTRA_LIBS += $(DWEBP_LIBS)
examples/gif2webp: examples/libexample_util.a imageio/libimageio_util.a
examples/gif2webp: examples/libgifdec.a src/mux/libwebpmux.a src/libwebp.a
examples/gif2webp: examples/libexample_util.a examples/libgif2webp_util.a
examples/gif2webp: src/mux/libwebpmux.a src/libwebp.a
examples/gif2webp: EXTRA_LIBS += $(GIF_LIBS)
examples/gif2webp: EXTRA_FLAGS += -DWEBP_HAVE_GIF
examples/vwebp: examples/libexample_util.a src/demux/libwebpdemux.a
examples/vwebp: imageio/libimageio_util.a src/libwebp.a
examples/vwebp: src/libwebp.a
examples/vwebp: EXTRA_LIBS += $(GL_LIBS)
examples/vwebp: EXTRA_FLAGS += -DWEBP_HAVE_GL
examples/webpmux: examples/libexample_util.a imageio/libimageio_util.a
examples/webpmux: src/mux/libwebpmux.a src/libwebpdecoder.a
examples/img2webp: examples/libexample_util.a imageio/libimageio_util.a
examples/img2webp: imageio/libimagedec.a
examples/img2webp: src/mux/libwebpmux.a src/libwebp.a
examples/img2webp: EXTRA_LIBS += $(CWEBP_LIBS)
examples/webpmux: examples/libexample_util.a src/mux/libwebpmux.a
examples/webpmux: src/libwebpdecoder.a
extras/get_disto: extras/get_disto.o
extras/get_disto: imageio/libimagedec.a imageio/libimageio_util.a src/libwebp.a
extras/get_disto: EXTRA_LIBS += $(CWEBP_LIBS)
extras/webp_quality: extras/webp_quality.o
extras/webp_quality: imageio/libimageio_util.a
extras/webp_quality: $(EXTRA_LIB) src/libwebp.a
$(OUT_EXAMPLES) $(EXTRA_EXAMPLES) $(OTHER_EXAMPLES):
$(OUT_EXAMPLES) $(EXTRA_EXAMPLES):
$(CC) -o $@ $^ $(LDFLAGS)
dist: DESTDIR := dist
@ -434,8 +309,7 @@ dist: all
$(INSTALL) -m644 src/demux/libwebpdemux.a $(DESTDIR)/lib
$(INSTALL) -m644 src/mux/libwebpmux.a $(DESTDIR)/lib
umask 022; \
for m in man/[cdv]webp.1 man/gif2webp.1 man/webpmux.1 \
man/img2webp.1; do \
for m in man/[cdv]webp.1 man/gif2webp.1 man/webpmux.1; do \
basenam=$$(basename $$m .1); \
$(GROFF) -t -e -man -T utf8 $$m \
| $(COL) -bx >$(DESTDIR)/doc/$${basenam}.txt; \
@ -446,8 +320,6 @@ dist: all
clean:
$(RM) $(OUTPUT) *~ \
examples/*.o examples/*~ \
extras/*.o extras/*~ \
imageio/*.o imageio/*~ \
src/dec/*.o src/dec/*~ \
src/demux/*.o src/demux/*~ \
src/dsp/*.o src/dsp/*~ \
@ -456,5 +328,19 @@ clean:
src/utils/*.o src/utils/*~ \
src/webp/*~ man/*~ doc/*~ swig/*~ \
.PHONY: all clean dist ex
superclean: clean
$(RM) -r .git *.log *.cache *~
$(RM) -r .deps */.deps */*/.deps
$(RM) -r .libs */.libs */*/.libs
$(RM) */*.lo */*/*.lo
$(RM) */*.la */*/*.la
$(RM) Makefile */Makefile */*/Makefile
$(RM) Makefile.in */Makefile.in */*/Makefile.in
$(RM) config.log autom4te.cache libtool config.h stamp-h1
$(RM) aclocal.m4 compile
$(RM) config.guess config.h.in config.sub config.status
$(RM) configure depcomp install-sh ltmain.sh missing src/libwebp.pc
$(RM) m4/*
.PHONY: all clean dist ex superclean
.SUFFIXES:

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH CWEBP 1 "January 20, 2017"
.TH CWEBP 1 "October 19, 2015"
.SH NAME
cwebp \- compress an image file to a WebP file
.SH SYNOPSIS
@ -23,7 +23,7 @@ Using "\-" as output name will direct output to 'stdout'.
.TP
.BI \-\- " string
Explicitly specify the input file. This option is useful if the input
file starts with a '\-' for instance. This option must appear \fBlast\fP.
file starts with an '\-' for instance. This option must appear \fBlast\fP.
Any other options afterward will be ignored.
.TP
.B \-h, \-help
@ -35,17 +35,6 @@ A summary of all the possible options.
.B \-version
Print the version number (as major.minor.revision) and exit.
.TP
.B \-lossless
Encode the image without any loss. For images with fully transparent area,
the invisible pixel values (R/G/B or Y/U/V) will be preserved only if the
\-exact option is used.
.TP
.BI \-near_lossless " int
Use near\-lossless image preprocessing. This option adjusts pixel values
to help compressibility, but has minimal impact on the visual quality.
It triggers lossless compression mode automatically.
Range is 0 (maximum preprocessing) to 100 (no preprocessing, the default).
.TP
.BI \-q " float
Specify the compression factor for RGB channels between 0 and 100. The default
is 75.
@ -53,31 +42,46 @@ is 75.
In case of lossy compression (default), a small factor produces a smaller file
with lower quality. Best quality is achieved by using a value of 100.
.br
In case of lossless compression (specified by the \fB\-lossless\fP option), a
small factor enables faster compression speed, but produces a larger file.
Maximum compression is achieved by using a value of 100.
.TP
.BI \-z " int
Switch on \fBlossless\fP compression mode with the specified level between 0
and 9, with level 0 being the fastest, 9 being the slowest. Fast mode
produces larger file size than slower ones. A good default is \fB\-z 6\fP.
This option is actually a shortcut for some predefined settings for quality
and method. If options \fB\-q\fP or \fB\-m\fP are subsequently used, they will
invalidate the effect of this option.
In case of lossless compression (specified by the \-lossless option), a small
factor enables faster compression speed, but produces a larger file. Maximum
compression is achieved by using a value of 100.
.\" TODO(jzern): restore post-v0.4.1
.\" .TP
.\" .BI \-z " int
.\" Switch on \fBlossless\fP compression mode with the specified level between 0
.\" and 9, with level 0 being the fastest, 9 being the slowest. Fast mode
.\" produces larger file size than slower ones. A good default is \-z 6.
.\" This option is actually a shortcut for some predefined settings for quality
.\" and method. If options \-q or \-m are subsequently used, they will invalidate
.\" the effect of this \-z option.
.TP
.BI \-alpha_q " int
Specify the compression factor for alpha compression between 0 and 100.
Lossless compression of alpha is achieved using a value of 100, while the lower
values result in a lossy compression. The default is 100.
.TP
.BI \-f " int
Specify the strength of the deblocking filter, between 0 (no filtering)
and 100 (maximum filtering). A value of 0 will turn off any filtering.
Higher value will increase the strength of the filtering process applied
after decoding the picture. The higher the value the smoother the picture will
appear. Typical values are usually in the range of 20 to 50.
.TP
.BI \-preset " string
Specify a set of pre\-defined parameters to suit a particular type of
Specify a set of pre-defined parameters to suit a particular type of
source material. Possible values are: \fBdefault\fP, \fBphoto\fP,
\fBpicture\fP, \fBdrawing\fP, \fBicon\fP, \fBtext\fP. Since
\fB\-preset\fP overwrites the other parameters' values (except the
\fB\-q\fP one), this option should preferably appear first in the
order of the arguments.
.TP
.BI \-sns " int
Specify the amplitude of the spatial noise shaping. Spatial noise shaping
(or \fBsns\fP for short) refers to a general collection of built-in algorithms
used to decide which area of the picture should use relatively less bits,
and where else to better transfer these bits. The possible range goes from
0 (algorithm is off) to 100 (the maximal effect). The default value is 80.
.TP
.BI \-m " int
Specify the compression method to use. This parameter controls the
trade off between encoding speed and the compressed file size and quality.
@ -87,18 +91,14 @@ additional encoding possibilities and decide on the quality gain.
Lower value can result in faster processing time at the expense of
larger file size and lower compression quality.
.TP
.BI \-resize " width height
Resize the source to a rectangle with size \fBwidth\fP x \fBheight\fP.
If either (but not both) of the \fBwidth\fP or \fBheight\fP parameters is 0,
the value will be calculated preserving the aspect\-ratio.
.TP
.BI \-crop " x_position y_position width height
Crop the source to a rectangle with top\-left corner at coordinates
(\fBx_position\fP, \fBy_position\fP) and size \fBwidth\fP x \fBheight\fP.
This cropping area must be fully contained within the source rectangle.
.B \-jpeg_like
Change the internal parameter mapping to better match the expected size
of JPEG compression. This flag will generally produce an output file of
similar size to its JPEG equivalent (for the same \fB\-q\fP setting), but
with less visual distortion.
.TP
.B \-mt
Use multi\-threading for encoding, if possible. This option is only effective
Use multi-threading for encoding, if possible. This option is only effective
when using lossy compression on a source with a transparency channel.
.TP
.B \-low_memory
@ -109,50 +109,13 @@ different in size and distortion. This flag is only effective for methods
some side effects on the bitstream: it forces certain bitstream features
like number of partitions (forced to 1). Note that a more detailed report
of bitstream size is printed by \fBcwebp\fP when using this option.
.SS LOSSY OPTIONS
These options are only effective when doing lossy encoding (the default, with
or without alpha).
.TP
.BI \-size " int
Specify a target size (in bytes) to try and reach for the compressed output.
The compressor will make several passes of partial encoding in order to get as
close as possible to this target. If both \fB\-size\fP and \fB\-psnr\fP
are used, \fB\-size\fP value will prevail.
.TP
.BI \-psnr " float
Specify a target PSNR (in dB) to try and reach for the compressed output.
The compressor will make several passes of partial encoding in order to get as
close as possible to this target. If both \fB\-size\fP and \fB\-psnr\fP
are used, \fB\-size\fP value will prevail.
.TP
.BI \-pass " int
Set a maximum number of passes to use during the dichotomy used by
options \fB\-size\fP or \fB\-psnr\fP. Maximum value is 10, default is 1.
If options \fB\-size\fP or \fB\-psnr\fP were used, but \fB\-pass\fP wasn't
specified, a default value of '6' passes will be used.
.TP
.B \-af
Turns auto\-filter on. This algorithm will spend additional time optimizing
the filtering strength to reach a well\-balanced quality.
.TP
.B \-jpeg_like
Change the internal parameter mapping to better match the expected size
of JPEG compression. This flag will generally produce an output file of
similar size to its JPEG equivalent (for the same \fB\-q\fP setting), but
with less visual distortion.
Turns auto-filter on. This algorithm will spend additional time optimizing
the filtering strength to reach a well-balanced quality.
.TP
Advanced options:
.TP
.BI \-f " int
Specify the strength of the deblocking filter, between 0 (no filtering)
and 100 (maximum filtering). A value of 0 will turn off any filtering.
Higher value will increase the strength of the filtering process applied
after decoding the picture. The higher the value the smoother the picture will
appear. Typical values are usually in the range of 20 to 50.
.SH ADDITIONAL OPTIONS
More advanced options are:
.TP
.BI \-sharpness " int
Specify the sharpness of the filtering (if used).
@ -166,17 +129,6 @@ Use strong filtering (if filtering is being used thanks to the
Disable strong filtering (if filtering is being used thanks to the
\fB\-f\fP option) and use simple filtering instead.
.TP
.B \-sharp_yuv
Use more accurate and sharper RGB->YUV conversion if needed. Note that this
process is slower than the default 'fast' RGB->YUV conversion.
.TP
.BI \-sns " int
Specify the amplitude of the spatial noise shaping. Spatial noise shaping
(or \fBsns\fP for short) refers to a general collection of built\-in algorithms
used to decide which area of the picture should use relatively less bits,
and where else to better transfer these bits. The possible range goes from
0 (algorithm is off) to 100 (the maximal effect). The default value is 50.
.TP
.BI \-segments " int
Change the number of partitions to use during the segmentation of the
sns algorithm. Segments should be in range 1 to 4. Default value is 4.
@ -186,30 +138,107 @@ is used.
.BI \-partition_limit " int
Degrade quality by limiting the number of bits used by some macroblocks.
Range is 0 (no degradation, the default) to 100 (full degradation).
Useful values are usually around 30\-70 for moderately large images.
In the VP8 format, the so\-called control partition has a limit of 512k and
Useful values are usually around 30-70 for moderately large images.
In the VP8 format, the so-called control partition has a limit of 512k and
is used to store the following information: whether the macroblock is skipped,
which segment it belongs to, whether it is coded as intra 4x4 or intra 16x16
mode, and finally the prediction modes to use for each of the sub\-blocks.
mode, and finally the prediction modes to use for each of the sub-blocks.
For a very large image, 512k only leaves room to few bits per 16x16 macroblock.
The absolute minimum is 4 bits per macroblock. Skip, segment, and mode
information can use up almost all these 4 bits (although the case is unlikely),
which is problematic for very large images. The partition_limit factor controls
how frequently the most bit\-costly mode (intra 4x4) will be used. This is
how frequently the most bit-costly mode (intra 4x4) will be used. This is
useful in case the 512k limit is reached and the following message is displayed:
\fIError code: 6 (PARTITION0_OVERFLOW: Partition #0 is too big to fit 512k)\fP.
If using \fB\-partition_limit\fP is not enough to meet the 512k constraint, one
If using \fB-partition_limit\fP is not enough to meet the 512k constraint, one
should use less segments in order to save more header bits per macroblock.
See the \fB\-segments\fP option.
See the \fB-segments\fP option.
.TP
.BI \-size " int
Specify a target size (in bytes) to try and reach for the compressed output.
Compressor will make several pass of partial encoding in order to get as
close as possible to this target.
.TP
.BI \-psnr " float
Specify a target PSNR (in dB) to try and reach for the compressed output.
Compressor will make several pass of partial encoding in order to get as
close as possible to this target.
.TP
.BI \-pass " int
Set a maximum number of passes to use during the dichotomy used by
options \fB\-size\fP or \fB\-psnr\fP. Maximum value is 10.
.TP
.BI \-resize " width height
Resize the source to a rectangle with size \fBwidth\fP x \fBheight\fP.
If either (but not both) of the \fBwidth\fP or \fBheight\fP parameters is 0,
the value will be calculated preserving the aspect-ratio.
.TP
.BI \-crop " x_position y_position width height
Crop the source to a rectangle with top-left corner at coordinates
(\fBx_position\fP, \fBy_position\fP) and size \fBwidth\fP x \fBheight\fP.
This cropping area must be fully contained within the source rectangle.
.TP
.BI \-s " width height
Specify that the input file actually consists of raw Y'CbCr samples following
the ITU-R BT.601 recommendation, in 4:2:0 linear format.
The luma plane has size \fBwidth\fP x \fBheight\fP.
.TP
.BI \-map " int
Output additional ASCII-map of encoding information. Possible map values
range from 1 to 6. This is only meant to help debugging.
.TP
.BI \-pre " int
Specify some pre-processing steps. Using a value of '2' will trigger
quality-dependent pseudo-random dithering during RGBA->YUVA conversion
(lossy compression only).
.TP
.BI \-alpha_filter " string
Specify the predictive filtering method for the alpha plane. One of 'none',
\&'fast' or 'best', in increasing complexity and slowness order. Default is
\&'fast'. Internally, alpha filtering is performed using four possible
predictions (none, horizontal, vertical, gradient). The 'best' mode will try
each mode in turn and pick the one which gives the smaller size. The 'fast'
mode will just try to form an a-priori guess without testing all modes.
.TP
.BI \-alpha_method " int
Specify the algorithm used for alpha compression: 0 or 1. Algorithm 0 denotes
no compression, 1 uses WebP lossless format for compression. The default is 1.
.TP
.B \-alpha_cleanup
Modify unseen RGB values under fully transparent area, to help compressibility.
The default is off.
.TP
.BI \-blend_alpha " int
This option blends the alpha channel (if present) with the source using the
background color specified in hexadecimal as 0xrrggbb. The alpha channel is
afterward reset to the opaque value 255.
.TP
.B \-noalpha
Using this option will discard the alpha channel.
.TP
.B \-lossless
Encode the image without any loss.
.TP
.BI \-hint " string
Specify the hint about input image type. Possible values are:
\fBphoto\fP, \fBpicture\fP or \fBgraph\fP.
.TP
.BI \-metadata " string
A comma separated list of metadata to copy from the input to the output if
present.
Valid values: \fBall\fP, \fBnone\fP, \fBexif\fP, \fBicc\fP, \fBxmp\fP.
The default is \fBnone\fP.
.SS LOGGING OPTIONS
These options control the level of output:
Note: each input format may not support all combinations.
.TP
.B \-noasm
Disable all assembly optimizations.
.TP
.B \-v
Print extra information (encoding time in particular).
.TP
.B \-print_psnr
Compute and report average PSNR (Peak\-Signal\-To\-Noise ratio).
Compute and report average PSNR (Peak-Signal-To-Noise ratio).
.TP
.B \-print_ssim
Compute and report average SSIM (structural similarity
@ -226,70 +255,14 @@ Report encoding progress in percent.
Do not print anything.
.TP
.B \-short
Only print brief information (output file size and PSNR) for testing purposes.
.TP
.BI \-map " int
Output additional ASCII\-map of encoding information. Possible map values
range from 1 to 6. This is only meant to help debugging.
.SS ADDITIONAL OPTIONS
More advanced options are:
.TP
.BI \-s " width height
Specify that the input file actually consists of raw Y'CbCr samples following
the ITU\-R BT.601 recommendation, in 4:2:0 linear format.
The luma plane has size \fBwidth\fP x \fBheight\fP.
.TP
.BI \-pre " int
Specify some preprocessing steps. Using a value of '2' will trigger
quality\-dependent pseudo\-random dithering during RGBA\->YUVA conversion
(lossy compression only).
.TP
.BI \-alpha_filter " string
Specify the predictive filtering method for the alpha plane. One of 'none',
\&'fast' or 'best', in increasing complexity and slowness order. Default is
\&'fast'. Internally, alpha filtering is performed using four possible
predictions (none, horizontal, vertical, gradient). The 'best' mode will try
each mode in turn and pick the one which gives the smaller size. The 'fast'
mode will just try to form an a priori guess without testing all modes.
.TP
.BI \-alpha_method " int
Specify the algorithm used for alpha compression: 0 or 1. Algorithm 0 denotes
no compression, 1 uses WebP lossless format for compression. The default is 1.
.TP
.B \-exact
Preserve RGB values in transparent area. The default is off, to help
compressibility.
.TP
.BI \-blend_alpha " int
This option blends the alpha channel (if present) with the source using the
background color specified in hexadecimal as 0xrrggbb. The alpha channel is
afterward reset to the opaque value 255.
.TP
.B \-noalpha
Using this option will discard the alpha channel.
.TP
.BI \-hint " string
Specify the hint about input image type. Possible values are:
\fBphoto\fP, \fBpicture\fP or \fBgraph\fP.
.TP
.BI \-metadata " string
A comma separated list of metadata to copy from the input to the output if
present.
Valid values: \fBall\fP, \fBnone\fP, \fBexif\fP, \fBicc\fP, \fBxmp\fP.
The default is \fBnone\fP.
Note: each input format may not support all combinations.
.TP
.B \-noasm
Disable all assembly optimizations.
Only print brief information (output file size and PSNR) for testing purpose.
.SH BUGS
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
http://www.webmproject.org/code/contribute/submitting\-patches/
http://www.webmproject.org/code/contribute/submitting-patches/
.SH EXAMPLES
cwebp \-q 50 -lossless picture.png \-o picture_lossless.webp
@ -301,10 +274,9 @@ cwebp \-sns 70 \-f 50 \-size 60000 picture.png \-o picture.webp
cwebp \-o picture.webp \-\- \-\-\-picture.png
.SH AUTHORS
\fBcwebp\fP is a part of libwebp and was written by the WebP team.
\fBcwebp\fP was written by the WebP team.
.br
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
The latest source tree is available at http://www.webmproject.org/code
.PP
This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>,
for the Debian project (and may be used by others).

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH DWEBP 1 "June 23, 2016"
.TH DWEBP 1 "October 19, 2015"
.SH NAME
dwebp \- decompress a WebP file to an image file
.SH SYNOPSIS
@ -67,11 +67,12 @@ but it will make the decoding faster.
Specify a dithering \fBstrength\fP between 0 and 100. Dithering is a
post-processing effect applied to chroma components in lossy compression.
It helps by smoothing gradients and avoiding banding artifacts.
.TP
.BI \-alpha_dither
If the compressed file contains a transparency plane that was quantized
during compression, this flag will allow dithering the reconstructed plane
in order to generate smoother transparency gradients.
.\" TODO(jzern): restore post-v0.4.1
.\" .TP
.\" .BI \-alpha_dither
.\" If the compressed file contains a transparency plane that was quantized
.\" during compression, this flag will allow dithering the reconstructed plane
.\" in order to generate smoother transparency gradients.
.TP
.B \-nodither
Disable all dithering (default).
@ -86,20 +87,16 @@ This cropping area must be fully contained within the source rectangle.
The top-left corner will be snapped to even coordinates if needed.
This option is meant to reduce the memory needed for cropping large images.
Note: the cropping is applied \fIbefore\fP any scaling.
.\" TODO(jzern): restore post-v0.4.1
.\" .TP
.\" .B \-flip
.\" Flip decoded image vertically (can be useful for OpenGL textures for instance).
.TP
.B \-flip
Flip decoded image vertically (can be useful for OpenGL textures for instance).
.TP
\fB\-resize\fR, \fB\-scale\fI width height\fR
.BI \-scale " width height
Rescale the decoded picture to dimension \fBwidth\fP x \fBheight\fP. This
option is mostly intended to reducing the memory needed to decode large images,
when only a small version is needed (thumbnail, preview, etc.). Note: scaling
when only a small version is needed (thumbnail, preview, etc.). Note: scaling
is applied \fIafter\fP cropping.
If either (but not both) of the \fBwidth\fP or \fBheight\fP parameters is 0,
the value will be calculated preserving the aspect-ratio.
.TP
.B \-quiet
Do not print anything.
.TP
.B \-v
Print extra information (decoding time in particular).
@ -108,7 +105,7 @@ Print extra information (decoding time in particular).
Disable all assembly optimizations.
.SH BUGS
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
@ -124,10 +121,9 @@ dwebp \-o output.ppm \-\- \-\-\-picture.webp
cat picture.webp | dwebp \-o \- \-\- \- > output.ppm
.SH AUTHORS
\fBdwebp\fP is a part of libwebp and was written by the WebP team.
\fBdwebp\fP was written by the WebP team.
.br
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
The latest source tree is available at http://www.webmproject.org/code
.PP
This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>,
for the Debian project (and may be used by others).

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH GIF2WEBP 1 "January 25, 2017"
.TH GIF2WEBP 1 "October 19, 2015"
.SH NAME
gif2webp \- Convert a GIF image to WebP
.SH SYNOPSIS
@ -54,12 +54,6 @@ additional encoding possibilities and decide on the quality gain.
Lower value can result is faster processing time at the expense of
larger file size and lower compression quality.
.TP
.BI \-min_size
Encode image to achieve smallest size. This disables key frame insertion and
picks the dispose method resulting in smallest output for each frame. It uses
lossless compression by default, but can be combined with \-q, \-m, \-lossy or
\-mixed options.
.TP
.BI \-kmin " int
.TP
.BI \-kmax " int
@ -68,9 +62,7 @@ Specify the minimum and maximum distance between consecutive key frames
some key frames into the output animation as needed so that this criteria is
satisfied.
.br
A 'kmax' value of 0 will turn off insertion of key frames. A 'kmax' value of 1
will result in all frames being key frames. 'kmin' value is not taken into
account in both these special cases.
A 'kmin' value of 0 will turn off insertion of key frames.
Typical values are in the range 3 to 30. Default values are kmin = 9,
kmax = 17 for lossless compression and kmin = 3, kmax = 5 for lossy compression.
.br
@ -118,7 +110,7 @@ Print extra information.
Do not print anything.
.SH BUGS
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
@ -136,10 +128,9 @@ gif2webp \-lossy \-f 50 picture.gif \-o picture.webp
gif2webp \-q 70 \-o picture.webp \-\- \-\-\-picture.gif
.SH AUTHORS
\fBgif2webp\fP is a part of libwebp and was written by the WebP team.
\fBgif2webp\fP was written by the WebP team.
.br
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
The latest source tree is available at http://www.webmproject.org/code
.PP
This manual page was written by Urvang Joshi <urvang@google.com>, for the
Debian project (and may be used by others).

View File

@ -1,97 +0,0 @@
.\" Hey, EMACS: -*- nroff -*-
.TH IMG2WEBP 1 "January 23, 2017"
.SH NAME
img2webp \- create animated WebP file from a sequence of input images.
.SH SYNOPSIS
.B img2webp
[file_level_options] [files] [per_frame_options...]
.br
.SH DESCRIPTION
This manual page documents the
.B img2webp
command.
.PP
\fBimg2webp\fP compresses a sequence of images using the animated WebP format.
Input images can either be PNG, JPEG, TIFF or WebP.
.SH FILE-LEVEL OPTIONS
The file-level options are applied at the beginning of the compression process,
before the input frames are read.
.TP
.BI \-o " string
Specify the name of the output WebP file.
.TP
.BI \-min_size
Encode images to achieve smallest size. This disables key frame insertion and
picks the parameters resulting in smallest output for each frame. It uses
lossless compression by default, but can be combined with \-q, \-m, \-lossy or
\-mixed options.
.TP
.BI \-kmin " int
.TP
.BI \-kmax " int
Specify the minimum and maximum distance between consecutive key frames
(independently decodable frames) in the output animation. The tool will insert
some key frames into the output animation as needed so that this criteria is
satisfied.
.br
.B \-mixed
Mixed compression mode: optimize compression of the image by picking either
lossy or lossless compression for each frame heuristically. This global
option disables the local option \fB-lossy\fP and \fB-lossless\fP .
.TP
.BI \-loop " int
Specifies the number of times the animation should loop. Using '0' means
'loop indefinitely'.
.TP
.BI \-v
Be more verbose.
.TP
.B \-h, \-help
A short usage summary.
.SH PER-FRAME OPTIONS
The per-frame options are applied for the images following as arguments in the
command line. They can be modified any number of times preceding each particular
input image.
.TP
.BI \-d " int
Specify the image duration in milliseconds.
.TP
.B \-lossless, \-lossy
Compress the next image(s) using lossless or lossy compression mode. The
default mode is lossless.
.TP
.BI \-q " float
Specify the compression factor between 0 and 100. The default is 75.
.TP
.BI \-m " int
Specify the compression method to use. This parameter controls the
trade off between encoding speed and the compressed file size and quality.
Possible values range from 0 to 6. Default value is 4.
.SH EXAMPLE
img2webp -loop 2 in0.png -lossy in1.jpg -d 80 in2.tiff -o out.webp
.br
.SH BUGS
Please report all bugs to the issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
http://www.webmproject.org/code/contribute/submitting\-patches/
.SH AUTHORS
\fBimg2webp\fP is a part of libwebp and was written by the WebP team.
.br
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
.PP
This manual page was written by Pascal Massimino <pascal.massimino@gmail.com>,
for the Debian project (and may be used by others).
.SH SEE ALSO
.BR webpmux (1),
.BR gif2webp (1)
.br
Please refer to http://developers.google.com/speed/webp/ for additional
information.

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH VWEBP 1 "November 25, 2016"
.TH VWEBP 1 "October 19, 2015"
.SH NAME
vwebp \- decompress a WebP file and display it in a window
.SH SYNOPSIS
@ -33,10 +33,11 @@ Disable in-loop filtering.
Specify a dithering \fBstrength\fP between 0 and 100. Dithering is a
post-processing effect applied to chroma components in lossy compression.
It helps by smoothing gradients and avoiding banding artifacts. Default: 50.
.TP
.BI \-noalphadither
By default, quantized transparency planes are dithered during decompression,
to smooth the gradients. This flag will prevent this dithering.
.\" TODO(jzern): restore post-v0.4.1
.\" .TP
.\" .BI \-noalphadither
.\" By default, quantized transparency planes are dithered during decompression,
.\" to smooth the gradients. This flag will prevent this dithering.
.TP
.B \-mt
Use multi-threading for decoding, if possible.
@ -59,14 +60,11 @@ Toggle use of color profile.
.B 'i'
Overlay file information.
.TP
.B 'd'
Disable blending and disposal process, for debugging purposes.
.TP
.B 'q' / 'Q' / ESC
Quit.
.SH BUGS
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
@ -80,10 +78,9 @@ vwebp picture.webp -mt -dither 0
vwebp \-\- \-\-\-picture.webp
.SH AUTHORS
\fBvwebp\fP is a part of libwebp and was written by the WebP team.
\fBvwebp\fP was written by the WebP team.
.br
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
The latest source tree is available at http://www.webmproject.org/code
.PP
This manual page was written for the Debian project (and may be used by others).

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH WEBPMUX 1 "November 10, 2016"
.TH WEBPMUX 1 "October 19, 2015"
.SH NAME
webpmux \- create animated WebP files from non\-animated WebP images, extract
frames from animated WebP images, and manage XMP/EXIF metadata and ICC profile.
@ -35,13 +35,6 @@ frames from animated WebP images, and manage XMP/EXIF metadata and ICC profile.
.I OUTPUT
.RE
.br
.B webpmux \-duration
.I DURATION OPTIONS
.B [ \-duration ... ]
.I INPUT
.B \-o
.I OUTPUT
.br
.B webpmux \-info
.I INPUT
.br
@ -98,42 +91,6 @@ Strip EXIF metadata.
.B xmp
Strip XMP metadata.
.SS DURATION_OPTIONS (\-duration)
Amend the duration of a specific interval of frames. This option is only
effective on animated WebP and has no effect on a single-frame file.
.TP
.I duration[,start[,end]]
Where:
.br
.B duration
is the duration for the interval in milliseconds (mandatory).
Must be non-negative.
.br
.B start
is the starting frame index of the interval (optional).
.br
.B end
is the ending frame index (inclusive) of the interval (optional).
.TP
The three typical usages of this option are:
.br
.B -duration d
set the duration to 'd' for the whole animation.
.br
.B -duration d,f
set the duration of frame 'f' to 'd'.
.br
.B -duration d,start,end
set the duration to 'd' for the whole [start,end] interval.
.TP
.P
Note that the frames outside of the [start, end] interval will remain untouched.
The 'end' value '0' has the special meaning 'last frame of the animation'.
.TP
.I Reminder:
frame indexing starts at '1'.
.br
.SS FRAME_OPTIONS (\-frame)
Create an animated WebP file from multiple (non\-animated) WebP images.
.TP
@ -171,7 +128,7 @@ Output file in WebP format.
The nature of EXIF, XMP and ICC data is not checked and is assumed to be valid.
.SH BUGS
Please report all bugs to the issue tracker:
Please report all bugs to our issue tracker:
https://bugs.chromium.org/p/webp
.br
Patches welcome! See this page to get started:
@ -238,10 +195,9 @@ webpmux \-get icc \-o image_profile.icc \-\- \-\-\-icc_container.webp
webpmux \-strip icc \-o without_icc.webp \-\- \-\-\-icc_container.webp
.SH AUTHORS
\fBwebpmux\fP is a part of libwebp and was written by the WebP team.
\fBwebpmux\fP is written by the WebP team.
.br
The latest source tree is available at
https://chromium.googlesource.com/webm/libwebp
The latest source tree is available at http://www.webmproject.org/code
.PP
This manual page was written by Vikas Arora <vikaas.arora@gmail.com>,
for the Debian project (and may be used by others).

View File

@ -1,5 +1,5 @@
# The mux and demux libraries depend on libwebp, thus the '.' to force
# the build order so it's available to them.
# The mux and demux libraries depend on libwebp, thus the '.' to force the
# build order so it's available to them.
SUBDIRS = dec enc dsp utils .
if WANT_MUX
SUBDIRS += mux
@ -35,7 +35,7 @@ libwebp_la_LIBADD += utils/libwebputils.la
# other than the ones listed on the command line, i.e., after linking, it will
# not have unresolved symbols. Some platforms (Windows among them) require all
# symbols in shared libraries to be resolved at library creation.
libwebp_la_LDFLAGS = -no-undefined -version-info 7:0:0
libwebp_la_LDFLAGS = -no-undefined -version-info 5:4:0
libwebpincludedir = $(includedir)/webp
pkgconfig_DATA = libwebp.pc
@ -47,7 +47,7 @@ if BUILD_LIBWEBPDECODER
libwebpdecoder_la_LIBADD += dsp/libwebpdspdecode.la
libwebpdecoder_la_LIBADD += utils/libwebputilsdecode.la
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 3:0:0
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 1:4:0
pkgconfig_DATA += libwebpdecoder.pc
endif

View File

@ -1,22 +1,21 @@
noinst_LTLIBRARIES = libwebpdecode.la
libwebpdecode_la_SOURCES =
libwebpdecode_la_SOURCES += alpha_dec.c
libwebpdecode_la_SOURCES += alphai_dec.h
libwebpdecode_la_SOURCES += buffer_dec.c
libwebpdecode_la_SOURCES += common_dec.h
libwebpdecode_la_SOURCES += vp8_dec.h
libwebpdecode_la_SOURCES += frame_dec.c
libwebpdecode_la_SOURCES += idec_dec.c
libwebpdecode_la_SOURCES += io_dec.c
libwebpdecode_la_SOURCES += quant_dec.c
libwebpdecode_la_SOURCES += tree_dec.c
libwebpdecode_la_SOURCES += vp8_dec.c
libwebpdecode_la_SOURCES += vp8i_dec.h
libwebpdecode_la_SOURCES += vp8l_dec.c
libwebpdecode_la_SOURCES += vp8li_dec.h
libwebpdecode_la_SOURCES += webp_dec.c
libwebpdecode_la_SOURCES += webpi_dec.h
libwebpdecode_la_SOURCES += alpha.c
libwebpdecode_la_SOURCES += alphai.h
libwebpdecode_la_SOURCES += buffer.c
libwebpdecode_la_SOURCES += decode_vp8.h
libwebpdecode_la_SOURCES += frame.c
libwebpdecode_la_SOURCES += idec.c
libwebpdecode_la_SOURCES += io.c
libwebpdecode_la_SOURCES += quant.c
libwebpdecode_la_SOURCES += tree.c
libwebpdecode_la_SOURCES += vp8.c
libwebpdecode_la_SOURCES += vp8i.h
libwebpdecode_la_SOURCES += vp8l.c
libwebpdecode_la_SOURCES += vp8li.h
libwebpdecode_la_SOURCES += webp.c
libwebpdecode_la_SOURCES += webpi.h
libwebpdecodeinclude_HEADERS =
libwebpdecodeinclude_HEADERS += ../webp/decode.h
@ -24,5 +23,5 @@ libwebpdecodeinclude_HEADERS += ../webp/types.h
noinst_HEADERS =
noinst_HEADERS += ../webp/format_constants.h
libwebpdecode_la_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
libwebpdecode_la_CPPFLAGS = $(USE_EXPERIMENTAL_CODE)
libwebpdecodeincludedir = $(includedir)/webp

165
src/dec/alpha.c Normal file
View File

@ -0,0 +1,165 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Alpha-plane decompression.
//
// Author: Skal (pascal.massimino@gmail.com)
#include <stdlib.h>
#include "./alphai.h"
#include "./vp8i.h"
#include "./vp8li.h"
#include "../utils/quant_levels_dec.h"
#include "../utils/utils.h"
#include "../webp/format_constants.h"
//------------------------------------------------------------------------------
// ALPHDecoder object.
ALPHDecoder* ALPHNew(void) {
ALPHDecoder* const dec = (ALPHDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
return dec;
}
void ALPHDelete(ALPHDecoder* const dec) {
if (dec != NULL) {
VP8LDelete(dec->vp8l_dec_);
dec->vp8l_dec_ = NULL;
WebPSafeFree(dec);
}
}
//------------------------------------------------------------------------------
// Decoding.
// Initialize alpha decoding by parsing the alpha header and decoding the image
// header for alpha data stored using lossless compression.
// Returns false in case of error in alpha header (data too short, invalid
// compression method or filter, error in lossless header data etc).
static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
size_t data_size, int width, int height, uint8_t* output) {
int ok = 0;
const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN;
const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN;
int rsrv;
assert(width > 0 && height > 0);
assert(data != NULL && output != NULL);
dec->width_ = width;
dec->height_ = height;
if (data_size <= ALPHA_HEADER_LEN) {
return 0;
}
dec->method_ = (data[0] >> 0) & 0x03;
dec->filter_ = (data[0] >> 2) & 0x03;
dec->pre_processing_ = (data[0] >> 4) & 0x03;
rsrv = (data[0] >> 6) & 0x03;
if (dec->method_ < ALPHA_NO_COMPRESSION ||
dec->method_ > ALPHA_LOSSLESS_COMPRESSION ||
dec->filter_ >= WEBP_FILTER_LAST ||
dec->pre_processing_ > ALPHA_PREPROCESSED_LEVELS ||
rsrv != 0) {
return 0;
}
if (dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t alpha_decoded_size = dec->width_ * dec->height_;
ok = (alpha_data_size >= alpha_decoded_size);
} else {
assert(dec->method_ == ALPHA_LOSSLESS_COMPRESSION);
ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size, output);
}
return ok;
}
// Decodes, unfilters and dequantizes *at least* 'num_rows' rows of alpha
// starting from row number 'row'. It assumes that rows up to (row - 1) have
// already been decoded.
// Returns false in case of bitstream error.
static int ALPHDecode(VP8Decoder* const dec, int row, int num_rows) {
ALPHDecoder* const alph_dec = dec->alph_dec_;
const int width = alph_dec->width_;
const int height = alph_dec->height_;
WebPUnfilterFunc unfilter_func = WebPUnfilters[alph_dec->filter_];
uint8_t* const output = dec->alpha_plane_;
if (alph_dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t offset = row * width;
const size_t num_pixels = num_rows * width;
assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN + offset + num_pixels);
memcpy(dec->alpha_plane_ + offset,
dec->alpha_data_ + ALPHA_HEADER_LEN + offset, num_pixels);
} else { // alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION
assert(alph_dec->vp8l_dec_ != NULL);
if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) {
return 0;
}
}
if (unfilter_func != NULL) {
unfilter_func(width, height, width, row, num_rows, output);
}
if (row + num_rows == dec->pic_hdr_.height_) {
dec->is_alpha_decoded_ = 1;
}
return 1;
}
//------------------------------------------------------------------------------
// Main entry point.
const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
int row, int num_rows) {
const int width = dec->pic_hdr_.width_;
const int height = dec->pic_hdr_.height_;
if (row < 0 || num_rows <= 0 || row + num_rows > height) {
return NULL; // sanity check.
}
if (row == 0) {
// Initialize decoding.
assert(dec->alpha_plane_ != NULL);
dec->alph_dec_ = ALPHNew();
if (dec->alph_dec_ == NULL) return NULL;
if (!ALPHInit(dec->alph_dec_, dec->alpha_data_, dec->alpha_data_size_,
width, height, dec->alpha_plane_)) {
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
return NULL;
}
// if we allowed use of alpha dithering, check whether it's needed at all
if (dec->alph_dec_->pre_processing_ != ALPHA_PREPROCESSED_LEVELS) {
dec->alpha_dithering_ = 0; // disable dithering
} else {
num_rows = height; // decode everything in one pass
}
}
if (!dec->is_alpha_decoded_) {
int ok = 0;
assert(dec->alph_dec_ != NULL);
ok = ALPHDecode(dec, row, num_rows);
if (ok && dec->alpha_dithering_ > 0) {
ok = WebPDequantizeLevels(dec->alpha_plane_, width, height,
dec->alpha_dithering_);
}
if (!ok || dec->is_alpha_decoded_) {
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
}
if (!ok) return NULL; // Error.
}
// Return a pointer to the current decoded row.
return dec->alpha_plane_ + row * width;
}

View File

@ -1,232 +0,0 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Alpha-plane decompression.
//
// Author: Skal (pascal.massimino@gmail.com)
#include <stdlib.h>
#include "./alphai_dec.h"
#include "./vp8i_dec.h"
#include "./vp8li_dec.h"
#include "../dsp/dsp.h"
#include "../utils/quant_levels_dec_utils.h"
#include "../utils/utils.h"
#include "../webp/format_constants.h"
//------------------------------------------------------------------------------
// ALPHDecoder object.
// Allocates a new alpha decoder instance.
static ALPHDecoder* ALPHNew(void) {
ALPHDecoder* const dec = (ALPHDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
return dec;
}
// Clears and deallocates an alpha decoder instance.
static void ALPHDelete(ALPHDecoder* const dec) {
if (dec != NULL) {
VP8LDelete(dec->vp8l_dec_);
dec->vp8l_dec_ = NULL;
WebPSafeFree(dec);
}
}
//------------------------------------------------------------------------------
// Decoding.
// Initialize alpha decoding by parsing the alpha header and decoding the image
// header for alpha data stored using lossless compression.
// Returns false in case of error in alpha header (data too short, invalid
// compression method or filter, error in lossless header data etc).
static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
size_t data_size, const VP8Io* const src_io,
uint8_t* output) {
int ok = 0;
const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN;
const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN;
int rsrv;
VP8Io* const io = &dec->io_;
assert(data != NULL && output != NULL && src_io != NULL);
VP8FiltersInit();
dec->output_ = output;
dec->width_ = src_io->width;
dec->height_ = src_io->height;
assert(dec->width_ > 0 && dec->height_ > 0);
if (data_size <= ALPHA_HEADER_LEN) {
return 0;
}
dec->method_ = (data[0] >> 0) & 0x03;
dec->filter_ = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03);
dec->pre_processing_ = (data[0] >> 4) & 0x03;
rsrv = (data[0] >> 6) & 0x03;
if (dec->method_ < ALPHA_NO_COMPRESSION ||
dec->method_ > ALPHA_LOSSLESS_COMPRESSION ||
dec->filter_ >= WEBP_FILTER_LAST ||
dec->pre_processing_ > ALPHA_PREPROCESSED_LEVELS ||
rsrv != 0) {
return 0;
}
// Copy the necessary parameters from src_io to io
VP8InitIo(io);
WebPInitCustomIo(NULL, io);
io->opaque = dec;
io->width = src_io->width;
io->height = src_io->height;
io->use_cropping = src_io->use_cropping;
io->crop_left = src_io->crop_left;
io->crop_right = src_io->crop_right;
io->crop_top = src_io->crop_top;
io->crop_bottom = src_io->crop_bottom;
// No need to copy the scaling parameters.
if (dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t alpha_decoded_size = dec->width_ * dec->height_;
ok = (alpha_data_size >= alpha_decoded_size);
} else {
assert(dec->method_ == ALPHA_LOSSLESS_COMPRESSION);
ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size);
}
return ok;
}
// Decodes, unfilters and dequantizes *at least* 'num_rows' rows of alpha
// starting from row number 'row'. It assumes that rows up to (row - 1) have
// already been decoded.
// Returns false in case of bitstream error.
static int ALPHDecode(VP8Decoder* const dec, int row, int num_rows) {
ALPHDecoder* const alph_dec = dec->alph_dec_;
const int width = alph_dec->width_;
const int height = alph_dec->io_.crop_bottom;
if (alph_dec->method_ == ALPHA_NO_COMPRESSION) {
int y;
const uint8_t* prev_line = dec->alpha_prev_line_;
const uint8_t* deltas = dec->alpha_data_ + ALPHA_HEADER_LEN + row * width;
uint8_t* dst = dec->alpha_plane_ + row * width;
assert(deltas <= &dec->alpha_data_[dec->alpha_data_size_]);
if (alph_dec->filter_ != WEBP_FILTER_NONE) {
assert(WebPUnfilters[alph_dec->filter_] != NULL);
for (y = 0; y < num_rows; ++y) {
WebPUnfilters[alph_dec->filter_](prev_line, deltas, dst, width);
prev_line = dst;
dst += width;
deltas += width;
}
} else {
for (y = 0; y < num_rows; ++y) {
memcpy(dst, deltas, width * sizeof(*dst));
prev_line = dst;
dst += width;
deltas += width;
}
}
dec->alpha_prev_line_ = prev_line;
} else { // alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION
assert(alph_dec->vp8l_dec_ != NULL);
if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) {
return 0;
}
}
if (row + num_rows >= height) {
dec->is_alpha_decoded_ = 1;
}
return 1;
}
static int AllocateAlphaPlane(VP8Decoder* const dec, const VP8Io* const io) {
const int stride = io->width;
const int height = io->crop_bottom;
const uint64_t alpha_size = (uint64_t)stride * height;
assert(dec->alpha_plane_mem_ == NULL);
dec->alpha_plane_mem_ =
(uint8_t*)WebPSafeMalloc(alpha_size, sizeof(*dec->alpha_plane_));
if (dec->alpha_plane_mem_ == NULL) {
return 0;
}
dec->alpha_plane_ = dec->alpha_plane_mem_;
dec->alpha_prev_line_ = NULL;
return 1;
}
void WebPDeallocateAlphaMemory(VP8Decoder* const dec) {
assert(dec != NULL);
WebPSafeFree(dec->alpha_plane_mem_);
dec->alpha_plane_mem_ = NULL;
dec->alpha_plane_ = NULL;
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
}
//------------------------------------------------------------------------------
// Main entry point.
const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
const VP8Io* const io,
int row, int num_rows) {
const int width = io->width;
const int height = io->crop_bottom;
assert(dec != NULL && io != NULL);
if (row < 0 || num_rows <= 0 || row + num_rows > height) {
return NULL; // sanity check.
}
if (!dec->is_alpha_decoded_) {
if (dec->alph_dec_ == NULL) { // Initialize decoder.
dec->alph_dec_ = ALPHNew();
if (dec->alph_dec_ == NULL) return NULL;
if (!AllocateAlphaPlane(dec, io)) goto Error;
if (!ALPHInit(dec->alph_dec_, dec->alpha_data_, dec->alpha_data_size_,
io, dec->alpha_plane_)) {
goto Error;
}
// if we allowed use of alpha dithering, check whether it's needed at all
if (dec->alph_dec_->pre_processing_ != ALPHA_PREPROCESSED_LEVELS) {
dec->alpha_dithering_ = 0; // disable dithering
} else {
num_rows = height - row; // decode everything in one pass
}
}
assert(dec->alph_dec_ != NULL);
assert(row + num_rows <= height);
if (!ALPHDecode(dec, row, num_rows)) goto Error;
if (dec->is_alpha_decoded_) { // finished?
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
if (dec->alpha_dithering_ > 0) {
uint8_t* const alpha = dec->alpha_plane_ + io->crop_top * width
+ io->crop_left;
if (!WebPDequantizeLevels(alpha,
io->crop_right - io->crop_left,
io->crop_bottom - io->crop_top,
width, dec->alpha_dithering_)) {
goto Error;
}
}
}
}
// Return a pointer to the current decoded row.
return dec->alpha_plane_ + row * width;
Error:
WebPDeallocateAlphaMemory(dec);
return NULL;
}

View File

@ -14,8 +14,8 @@
#ifndef WEBP_DEC_ALPHAI_H_
#define WEBP_DEC_ALPHAI_H_
#include "./webpi_dec.h"
#include "../utils/filters_utils.h"
#include "./webpi.h"
#include "../utils/filters.h"
#ifdef __cplusplus
extern "C" {
@ -32,18 +32,19 @@ struct ALPHDecoder {
int pre_processing_;
struct VP8LDecoder* vp8l_dec_;
VP8Io io_;
int use_8b_decode_; // Although alpha channel requires only 1 byte per
// pixel, sometimes VP8LDecoder may need to allocate
// 4 bytes per pixel internally during decode.
uint8_t* output_;
const uint8_t* prev_line_; // last output row (or NULL)
int use_8b_decode; // Although alpha channel requires only 1 byte per
// pixel, sometimes VP8LDecoder may need to allocate
// 4 bytes per pixel internally during decode.
};
//------------------------------------------------------------------------------
// internal functions. Not public.
// Deallocate memory associated to dec->alpha_plane_ decoding
void WebPDeallocateAlphaMemory(VP8Decoder* const dec);
// Allocates a new alpha decoder instance.
ALPHDecoder* ALPHNew(void);
// Clears and deallocates an alpha decoder instance.
void ALPHDelete(ALPHDecoder* const dec);
//------------------------------------------------------------------------------

View File

@ -13,8 +13,8 @@
#include <stdlib.h>
#include "./vp8i_dec.h"
#include "./webpi_dec.h"
#include "./vp8i.h"
#include "./webpi.h"
#include "../utils/utils.h"
//------------------------------------------------------------------------------
@ -92,7 +92,7 @@ static VP8StatusCode AllocateBuffer(WebPDecBuffer* const buffer) {
return VP8_STATUS_INVALID_PARAM;
}
if (buffer->is_external_memory <= 0 && buffer->private_memory == NULL) {
if (!buffer->is_external_memory && buffer->private_memory == NULL) {
uint8_t* output;
int uv_stride = 0, a_stride = 0;
uint64_t uv_size = 0, a_size = 0, total_size;
@ -189,14 +189,11 @@ VP8StatusCode WebPAllocateDecBuffer(int w, int h,
h = ch;
}
if (options->use_scaling) {
int scaled_width = options->scaled_width;
int scaled_height = options->scaled_height;
if (!WebPRescalerGetScaledDimensions(
w, h, &scaled_width, &scaled_height)) {
if (options->scaled_width <= 0 || options->scaled_height <= 0) {
return VP8_STATUS_INVALID_PARAM;
}
w = scaled_width;
h = scaled_height;
w = options->scaled_width;
h = options->scaled_height;
}
}
out->width = w;
@ -206,10 +203,12 @@ VP8StatusCode WebPAllocateDecBuffer(int w, int h,
status = AllocateBuffer(out);
if (status != VP8_STATUS_OK) return status;
#if WEBP_DECODER_ABI_VERSION > 0x0203
// Use the stride trick if vertical flip is needed.
if (options != NULL && options->flip) {
status = WebPFlipBuffer(out);
}
#endif
return status;
}
@ -227,7 +226,7 @@ int WebPInitDecBufferInternal(WebPDecBuffer* buffer, int version) {
void WebPFreeDecBuffer(WebPDecBuffer* buffer) {
if (buffer != NULL) {
if (buffer->is_external_memory <= 0) {
if (!buffer->is_external_memory) {
WebPSafeFree(buffer->private_memory);
}
buffer->private_memory = NULL;
@ -256,45 +255,5 @@ void WebPGrabDecBuffer(WebPDecBuffer* const src, WebPDecBuffer* const dst) {
}
}
VP8StatusCode WebPCopyDecBufferPixels(const WebPDecBuffer* const src_buf,
WebPDecBuffer* const dst_buf) {
assert(src_buf != NULL && dst_buf != NULL);
assert(src_buf->colorspace == dst_buf->colorspace);
dst_buf->width = src_buf->width;
dst_buf->height = src_buf->height;
if (CheckDecBuffer(dst_buf) != VP8_STATUS_OK) {
return VP8_STATUS_INVALID_PARAM;
}
if (WebPIsRGBMode(src_buf->colorspace)) {
const WebPRGBABuffer* const src = &src_buf->u.RGBA;
const WebPRGBABuffer* const dst = &dst_buf->u.RGBA;
WebPCopyPlane(src->rgba, src->stride, dst->rgba, dst->stride,
src_buf->width * kModeBpp[src_buf->colorspace],
src_buf->height);
} else {
const WebPYUVABuffer* const src = &src_buf->u.YUVA;
const WebPYUVABuffer* const dst = &dst_buf->u.YUVA;
WebPCopyPlane(src->y, src->y_stride, dst->y, dst->y_stride,
src_buf->width, src_buf->height);
WebPCopyPlane(src->u, src->u_stride, dst->u, dst->u_stride,
(src_buf->width + 1) / 2, (src_buf->height + 1) / 2);
WebPCopyPlane(src->v, src->v_stride, dst->v, dst->v_stride,
(src_buf->width + 1) / 2, (src_buf->height + 1) / 2);
if (WebPIsAlphaMode(src_buf->colorspace)) {
WebPCopyPlane(src->a, src->a_stride, dst->a, dst->a_stride,
src_buf->width, src_buf->height);
}
}
return VP8_STATUS_OK;
}
int WebPAvoidSlowMemory(const WebPDecBuffer* const output,
const WebPBitstreamFeatures* const features) {
assert(output != NULL);
return (output->is_external_memory >= 2) &&
WebPIsPremultipliedMode(output->colorspace) &&
(features != NULL && features->has_alpha);
}
//------------------------------------------------------------------------------

View File

@ -1,54 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Definitions and macros common to encoding and decoding
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DEC_COMMON_H_
#define WEBP_DEC_COMMON_H_
// intra prediction modes
enum { B_DC_PRED = 0, // 4x4 modes
B_TM_PRED = 1,
B_VE_PRED = 2,
B_HE_PRED = 3,
B_RD_PRED = 4,
B_VR_PRED = 5,
B_LD_PRED = 6,
B_VL_PRED = 7,
B_HD_PRED = 8,
B_HU_PRED = 9,
NUM_BMODES = B_HU_PRED + 1 - B_DC_PRED, // = 10
// Luma16 or UV modes
DC_PRED = B_DC_PRED, V_PRED = B_VE_PRED,
H_PRED = B_HE_PRED, TM_PRED = B_TM_PRED,
B_PRED = NUM_BMODES, // refined I4x4 mode
NUM_PRED_MODES = 4,
// special modes
B_DC_PRED_NOTOP = 4,
B_DC_PRED_NOLEFT = 5,
B_DC_PRED_NOTOPLEFT = 6,
NUM_B_DC_MODES = 7 };
enum { MB_FEATURE_TREE_PROBS = 3,
NUM_MB_SEGMENTS = 4,
NUM_REF_LF_DELTAS = 4,
NUM_MODE_LF_DELTAS = 4, // I4x4, ZERO, *, SPLIT
MAX_NUM_PARTITIONS = 8,
// Probabilities
NUM_TYPES = 4, // 0: i16-AC, 1: i16-DC, 2:chroma-AC, 3:i4-AC
NUM_BANDS = 8,
NUM_CTX = 3,
NUM_PROBAS = 11
};
#endif // WEBP_DEC_COMMON_H_

View File

@ -12,183 +12,13 @@
// Author: Skal (pascal.massimino@gmail.com)
#include <stdlib.h>
#include "./vp8i_dec.h"
#include "./vp8i.h"
#include "../utils/utils.h"
//------------------------------------------------------------------------------
// Main reconstruction function.
static const int kScan[16] = {
0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS,
0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS,
0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS
};
static int CheckMode(int mb_x, int mb_y, int mode) {
if (mode == B_DC_PRED) {
if (mb_x == 0) {
return (mb_y == 0) ? B_DC_PRED_NOTOPLEFT : B_DC_PRED_NOLEFT;
} else {
return (mb_y == 0) ? B_DC_PRED_NOTOP : B_DC_PRED;
}
}
return mode;
}
static void Copy32b(uint8_t* const dst, const uint8_t* const src) {
memcpy(dst, src, 4);
}
static WEBP_INLINE void DoTransform(uint32_t bits, const int16_t* const src,
uint8_t* const dst) {
switch (bits >> 30) {
case 3:
VP8Transform(src, dst, 0);
break;
case 2:
VP8TransformAC3(src, dst);
break;
case 1:
VP8TransformDC(src, dst);
break;
default:
break;
}
}
static void DoUVTransform(uint32_t bits, const int16_t* const src,
uint8_t* const dst) {
if (bits & 0xff) { // any non-zero coeff at all?
if (bits & 0xaa) { // any non-zero AC coefficient?
VP8TransformUV(src, dst); // note we don't use the AC3 variant for U/V
} else {
VP8TransformDCUV(src, dst);
}
}
}
#define ALIGN_MASK (32 - 1)
static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx) {
int j;
int mb_x;
const int mb_y = ctx->mb_y_;
const int cache_id = ctx->id_;
uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
// Initialize left-most block.
for (j = 0; j < 16; ++j) {
y_dst[j * BPS - 1] = 129;
}
for (j = 0; j < 8; ++j) {
u_dst[j * BPS - 1] = 129;
v_dst[j * BPS - 1] = 129;
}
// Init top-left sample on left column too.
if (mb_y > 0) {
y_dst[-1 - BPS] = u_dst[-1 - BPS] = v_dst[-1 - BPS] = 129;
} else {
// we only need to do this init once at block (0,0).
// Afterward, it remains valid for the whole topmost row.
memset(y_dst - BPS - 1, 127, 16 + 4 + 1);
memset(u_dst - BPS - 1, 127, 8 + 1);
memset(v_dst - BPS - 1, 127, 8 + 1);
}
// Reconstruct one row.
for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
const VP8MBData* const block = ctx->mb_data_ + mb_x;
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
if (mb_x > 0) {
for (j = -1; j < 16; ++j) {
Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]);
}
for (j = -1; j < 8; ++j) {
Copy32b(&u_dst[j * BPS - 4], &u_dst[j * BPS + 4]);
Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]);
}
}
{
// bring top samples into the cache
VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x;
const int16_t* const coeffs = block->coeffs_;
uint32_t bits = block->non_zero_y_;
int n;
if (mb_y > 0) {
memcpy(y_dst - BPS, top_yuv[0].y, 16);
memcpy(u_dst - BPS, top_yuv[0].u, 8);
memcpy(v_dst - BPS, top_yuv[0].v, 8);
}
// predict and add residuals
if (block->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (mb_y > 0) {
if (mb_x >= dec->mb_w_ - 1) { // on rightmost border
memset(top_right, top_yuv[0].y[15], sizeof(*top_right));
} else {
memcpy(top_right, top_yuv[1].y, sizeof(*top_right));
}
}
// replicate the top-right pixels below
top_right[BPS] = top_right[2 * BPS] = top_right[3 * BPS] = top_right[0];
// predict and add residuals for all 4x4 blocks in turn.
for (n = 0; n < 16; ++n, bits <<= 2) {
uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[block->imodes_[n]](dst);
DoTransform(bits, coeffs + n * 16, dst);
}
} else { // 16x16
const int pred_func = CheckMode(mb_x, mb_y, block->imodes_[0]);
VP8PredLuma16[pred_func](y_dst);
if (bits != 0) {
for (n = 0; n < 16; ++n, bits <<= 2) {
DoTransform(bits, coeffs + n * 16, y_dst + kScan[n]);
}
}
}
{
// Chroma
const uint32_t bits_uv = block->non_zero_uv_;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_);
VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst);
DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst);
DoUVTransform(bits_uv >> 8, coeffs + 20 * 16, v_dst);
}
// stash away top samples for next block
if (mb_y < dec->mb_h_ - 1) {
memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16);
memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8);
memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8);
}
}
// Transfer reconstructed samples from yuv_b_ cache to final destination.
{
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset;
for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
}
for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
}
}
}
}
const VP8ThreadContext* ctx); // TODO(skal): remove
//------------------------------------------------------------------------------
// Filtering
@ -282,6 +112,7 @@ static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
VP8FInfo* const info = &dec->fstrengths_[s][i4x4];
int level = base_level;
if (hdr->use_lf_delta_) {
// TODO(skal): only CURRENT is handled for now.
level += hdr->ref_lf_delta_[0];
if (i4x4) {
level += hdr->mode_lf_delta_[0];
@ -316,9 +147,6 @@ static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
//------------------------------------------------------------------------------
// Dithering
// minimal amp that will provide a non-zero dithering effect
#define MIN_DITHER_AMP 4
#define DITHER_AMP_TAB_SIZE 12
static const int kQuantToDitherAmp[DITHER_AMP_TAB_SIZE] = {
// roughly, it's dqm->uv_mat_[1]
@ -349,6 +177,7 @@ void VP8InitDithering(const WebPDecoderOptions* const options,
dec->dither_ = 1;
}
}
#if WEBP_DECODER_ABI_VERSION > 0x0204
// potentially allow alpha dithering
dec->alpha_dithering_ = options->alpha_dithering_strength;
if (dec->alpha_dithering_ > 100) {
@ -356,17 +185,31 @@ void VP8InitDithering(const WebPDecoderOptions* const options,
} else if (dec->alpha_dithering_ < 0) {
dec->alpha_dithering_ = 0;
}
#endif
}
}
// Convert to range: [-2,2] for dither=50, [-4,4] for dither=100
// minimal amp that will provide a non-zero dithering effect
#define MIN_DITHER_AMP 4
#define DITHER_DESCALE 4
#define DITHER_DESCALE_ROUNDER (1 << (DITHER_DESCALE - 1))
#define DITHER_AMP_BITS 8
#define DITHER_AMP_CENTER (1 << DITHER_AMP_BITS)
static void Dither8x8(VP8Random* const rg, uint8_t* dst, int bps, int amp) {
uint8_t dither[64];
int i;
for (i = 0; i < 8 * 8; ++i) {
dither[i] = VP8RandomBits2(rg, VP8_DITHER_AMP_BITS + 1, amp);
int i, j;
for (j = 0; j < 8; ++j) {
for (i = 0; i < 8; ++i) {
// TODO: could be made faster with SSE2
const int bits =
VP8RandomBits2(rg, DITHER_AMP_BITS + 1, amp) - DITHER_AMP_CENTER;
// Convert to range: [-2,2] for dither=50, [-4,4] for dither=100
const int delta = (bits + DITHER_DESCALE_ROUNDER) >> DITHER_DESCALE;
const int v = (int)dst[i] + delta;
dst[i] = (v < 0) ? 0 : (v > 255) ? 255u : (uint8_t)v;
}
dst += bps;
}
VP8DitherCombine8x8(dither, dst, bps);
}
static void DitherRow(VP8Decoder* const dec) {
@ -452,7 +295,7 @@ static int FinishRow(VP8Decoder* const dec, VP8Io* const io) {
if (dec->alpha_data_ != NULL && y_start < y_end) {
// TODO(skal): testing presence of alpha with dec->alpha_data_ is not a
// good idea.
io->a = VP8DecompressAlphaRows(dec, io, y_start, y_end - y_start);
io->a = VP8DecompressAlphaRows(dec, y_start, y_end - y_start);
if (io->a == NULL) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Could not decode alpha data.");
@ -711,7 +554,7 @@ static int AllocateMemory(VP8Decoder* const dec) {
const uint64_t needed = (uint64_t)intra_pred_mode_size
+ top_size + mb_info_size + f_info_size
+ yuv_size + mb_data_size
+ cache_size + alpha_size + WEBP_ALIGN_CST;
+ cache_size + alpha_size + ALIGN_MASK;
uint8_t* mem;
if (needed != (size_t)needed) return 0; // check for overflow
@ -723,7 +566,7 @@ static int AllocateMemory(VP8Decoder* const dec) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
"no memory during frame initialization.");
}
// down-cast is ok, thanks to WebPSafeMalloc() above.
// down-cast is ok, thanks to WebPSafeAlloc() above.
dec->mem_size_ = (size_t)needed;
}
@ -748,8 +591,8 @@ static int AllocateMemory(VP8Decoder* const dec) {
dec->thread_ctx_.f_info_ += mb_w;
}
mem = (uint8_t*)WEBP_ALIGN(mem);
assert((yuv_size & WEBP_ALIGN_CST) == 0);
mem = (uint8_t*)((uintptr_t)(mem + ALIGN_MASK) & ~ALIGN_MASK);
assert((yuv_size & ALIGN_MASK) == 0);
dec->yuv_b_ = (uint8_t*)mem;
mem += yuv_size;
@ -801,7 +644,7 @@ static void InitIo(VP8Decoder* const dec, VP8Io* io) {
io->a = NULL;
}
int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io) {
int VP8InitFrame(VP8Decoder* const dec, VP8Io* io) {
if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches_.
if (!AllocateMemory(dec)) return 0;
InitIo(dec, io);
@ -810,3 +653,176 @@ int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io) {
}
//------------------------------------------------------------------------------
// Main reconstruction function.
static const int kScan[16] = {
0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS,
0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS,
0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS
};
static int CheckMode(int mb_x, int mb_y, int mode) {
if (mode == B_DC_PRED) {
if (mb_x == 0) {
return (mb_y == 0) ? B_DC_PRED_NOTOPLEFT : B_DC_PRED_NOLEFT;
} else {
return (mb_y == 0) ? B_DC_PRED_NOTOP : B_DC_PRED;
}
}
return mode;
}
static void Copy32b(uint8_t* dst, uint8_t* src) {
memcpy(dst, src, 4);
}
static WEBP_INLINE void DoTransform(uint32_t bits, const int16_t* const src,
uint8_t* const dst) {
switch (bits >> 30) {
case 3:
VP8Transform(src, dst, 0);
break;
case 2:
VP8TransformAC3(src, dst);
break;
case 1:
VP8TransformDC(src, dst);
break;
default:
break;
}
}
static void DoUVTransform(uint32_t bits, const int16_t* const src,
uint8_t* const dst) {
if (bits & 0xff) { // any non-zero coeff at all?
if (bits & 0xaa) { // any non-zero AC coefficient?
VP8TransformUV(src, dst); // note we don't use the AC3 variant for U/V
} else {
VP8TransformDCUV(src, dst);
}
}
}
static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx) {
int j;
int mb_x;
const int mb_y = ctx->mb_y_;
const int cache_id = ctx->id_;
uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
const VP8MBData* const block = ctx->mb_data_ + mb_x;
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
if (mb_x > 0) {
for (j = -1; j < 16; ++j) {
Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]);
}
for (j = -1; j < 8; ++j) {
Copy32b(&u_dst[j * BPS - 4], &u_dst[j * BPS + 4]);
Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]);
}
} else {
for (j = 0; j < 16; ++j) {
y_dst[j * BPS - 1] = 129;
}
for (j = 0; j < 8; ++j) {
u_dst[j * BPS - 1] = 129;
v_dst[j * BPS - 1] = 129;
}
// Init top-left sample on left column too
if (mb_y > 0) {
y_dst[-1 - BPS] = u_dst[-1 - BPS] = v_dst[-1 - BPS] = 129;
}
}
{
// bring top samples into the cache
VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x;
const int16_t* const coeffs = block->coeffs_;
uint32_t bits = block->non_zero_y_;
int n;
if (mb_y > 0) {
memcpy(y_dst - BPS, top_yuv[0].y, 16);
memcpy(u_dst - BPS, top_yuv[0].u, 8);
memcpy(v_dst - BPS, top_yuv[0].v, 8);
} else if (mb_x == 0) {
// we only need to do this init once at block (0,0).
// Afterward, it remains valid for the whole topmost row.
memset(y_dst - BPS - 1, 127, 16 + 4 + 1);
memset(u_dst - BPS - 1, 127, 8 + 1);
memset(v_dst - BPS - 1, 127, 8 + 1);
}
// predict and add residuals
if (block->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (mb_y > 0) {
if (mb_x >= dec->mb_w_ - 1) { // on rightmost border
memset(top_right, top_yuv[0].y[15], sizeof(*top_right));
} else {
memcpy(top_right, top_yuv[1].y, sizeof(*top_right));
}
}
// replicate the top-right pixels below
top_right[BPS] = top_right[2 * BPS] = top_right[3 * BPS] = top_right[0];
// predict and add residuals for all 4x4 blocks in turn.
for (n = 0; n < 16; ++n, bits <<= 2) {
uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[block->imodes_[n]](dst);
DoTransform(bits, coeffs + n * 16, dst);
}
} else { // 16x16
const int pred_func = CheckMode(mb_x, mb_y,
block->imodes_[0]);
VP8PredLuma16[pred_func](y_dst);
if (bits != 0) {
for (n = 0; n < 16; ++n, bits <<= 2) {
DoTransform(bits, coeffs + n * 16, y_dst + kScan[n]);
}
}
}
{
// Chroma
const uint32_t bits_uv = block->non_zero_uv_;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_);
VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst);
DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst);
DoUVTransform(bits_uv >> 8, coeffs + 20 * 16, v_dst);
}
// stash away top samples for next block
if (mb_y < dec->mb_h_ - 1) {
memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16);
memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8);
memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8);
}
}
// Transfer reconstructed samples from yuv_b_ cache to final destination.
{
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset;
for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
}
for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
}
}
}
}
//------------------------------------------------------------------------------

View File

@ -15,9 +15,9 @@
#include <string.h>
#include <stdlib.h>
#include "./alphai_dec.h"
#include "./webpi_dec.h"
#include "./vp8i_dec.h"
#include "./alphai.h"
#include "./webpi.h"
#include "./vp8i.h"
#include "../utils/utils.h"
// In append mode, buffer allocations increase as multiples of this value.
@ -70,9 +70,7 @@ struct WebPIDecoder {
VP8Io io_;
MemBuffer mem_; // input memory buffer.
WebPDecBuffer output_; // output buffer (when no external one is supplied,
// or if the external one has slow-memory)
WebPDecBuffer* final_output_; // Slow-memory output to copy to eventually.
WebPDecBuffer output_; // output buffer (when no external one is supplied)
size_t chunk_size_; // Compressed VP8/VP8L size extracted from Header.
int last_mb_y_; // last row reached for intra-mode decoding
@ -120,9 +118,9 @@ static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
if (idec->dec_ != NULL) {
if (!idec->is_lossless_) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
const uint32_t last_part = dec->num_parts_minus_one_;
const int last_part = dec->num_parts_ - 1;
if (offset != 0) {
uint32_t p;
int p;
for (p = 0; p <= last_part; ++p) {
VP8RemapBitReader(dec->parts_ + p, offset);
}
@ -132,11 +130,8 @@ static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
VP8RemapBitReader(&dec->br_, offset);
}
}
{
const uint8_t* const last_start = dec->parts_[last_part].buf_;
VP8BitReaderSetBuffer(&dec->parts_[last_part], last_start,
mem->buf_ + mem->end_ - last_start);
}
assert(last_part >= 0);
dec->parts_[last_part].buf_end_ = mem->buf_ + mem->end_;
if (NeedCompressedAlpha(idec)) {
ALPHDecoder* const alph_dec = dec->alph_dec_;
dec->alpha_data_ += offset;
@ -245,20 +240,16 @@ static int CheckMemBufferMode(MemBuffer* const mem, MemBufferMode expected) {
// To be called last.
static VP8StatusCode FinishDecoding(WebPIDecoder* const idec) {
#if WEBP_DECODER_ABI_VERSION > 0x0203
const WebPDecoderOptions* const options = idec->params_.options;
WebPDecBuffer* const output = idec->params_.output;
idec->state_ = STATE_DONE;
if (options != NULL && options->flip) {
const VP8StatusCode status = WebPFlipBuffer(output);
if (status != VP8_STATUS_OK) return status;
}
if (idec->final_output_ != NULL) {
WebPCopyDecBufferPixels(output, idec->final_output_); // do the slow-copy
WebPFreeDecBuffer(&idec->output_);
*output = *idec->final_output_;
idec->final_output_ = NULL;
return WebPFlipBuffer(output);
}
#endif
idec->state_ = STATE_DONE;
return VP8_STATUS_OK;
}
@ -386,7 +377,8 @@ static VP8StatusCode CopyParts0Data(WebPIDecoder* const idec) {
}
memcpy(part0_buf, br->buf_, part_size);
mem->part0_buf_ = part0_buf;
VP8BitReaderSetBuffer(br, part0_buf, part_size);
br->buf_ = part0_buf;
br->buf_end_ = part0_buf + part_size;
} else {
// Else: just keep pointers to the partition #0's data in dec_->br_.
}
@ -464,20 +456,19 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
}
for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) {
VP8BitReader* const token_br =
&dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_];
&dec->parts_[dec->mb_y_ & (dec->num_parts_ - 1)];
MBContext context;
SaveContext(dec, token_br, &context);
if (!VP8DecodeMB(dec, token_br)) {
// We shouldn't fail when MAX_MB data was available
if (dec->num_parts_minus_one_ == 0 &&
MemDataSize(&idec->mem_) > MAX_MB_SIZE) {
if (dec->num_parts_ == 1 && MemDataSize(&idec->mem_) > MAX_MB_SIZE) {
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
RestoreContext(&context, dec, token_br);
return VP8_STATUS_SUSPENDED;
}
// Release buffer only if there is only one partition
if (dec->num_parts_minus_one_ == 0) {
if (dec->num_parts_ == 1) {
idec->mem_.start_ = token_br->buf_ - idec->mem_.buf_;
assert(idec->mem_.start_ <= idec->mem_.end_);
}
@ -515,15 +506,9 @@ static VP8StatusCode DecodeVP8LHeader(WebPIDecoder* const idec) {
// Wait until there's enough data for decoding header.
if (curr_size < (idec->chunk_size_ >> 3)) {
dec->status_ = VP8_STATUS_SUSPENDED;
return ErrorStatusLossless(idec, dec->status_);
return VP8_STATUS_SUSPENDED;
}
if (!VP8LDecodeHeader(dec, io)) {
if (dec->status_ == VP8_STATUS_BITSTREAM_ERROR &&
curr_size < idec->chunk_size_) {
dec->status_ = VP8_STATUS_SUSPENDED;
}
return ErrorStatusLossless(idec, dec->status_);
}
// Allocate/verify output buffer now.
@ -542,15 +527,23 @@ static VP8StatusCode DecodeVP8LData(WebPIDecoder* const idec) {
const size_t curr_size = MemDataSize(&idec->mem_);
assert(idec->is_lossless_);
// Switch to incremental decoding if we don't have all the bytes available.
dec->incremental_ = (curr_size < idec->chunk_size_);
// At present Lossless decoder can't decode image incrementally. So wait till
// all the image data is aggregated before image can be decoded.
if (curr_size < idec->chunk_size_) {
return VP8_STATUS_SUSPENDED;
}
if (!VP8LDecodeImage(dec)) {
// The decoding is called after all the data-bytes are aggregated. Change
// the error to VP8_BITSTREAM_ERROR in case lossless decoder fails to decode
// all the pixels (VP8_STATUS_SUSPENDED).
if (dec->status_ == VP8_STATUS_SUSPENDED) {
dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
}
return ErrorStatusLossless(idec, dec->status_);
}
assert(dec->status_ == VP8_STATUS_OK || dec->status_ == VP8_STATUS_SUSPENDED);
return (dec->status_ == VP8_STATUS_SUSPENDED) ? dec->status_
: FinishDecoding(idec);
return FinishDecoding(idec);
}
// Main decoding loop
@ -583,10 +576,9 @@ static VP8StatusCode IDecode(WebPIDecoder* idec) {
}
//------------------------------------------------------------------------------
// Internal constructor
// Public functions
static WebPIDecoder* NewDecoder(WebPDecBuffer* const output_buffer,
const WebPBitstreamFeatures* const features) {
WebPIDecoder* WebPINewDecoder(WebPDecBuffer* output_buffer) {
WebPIDecoder* idec = (WebPIDecoder*)WebPSafeCalloc(1ULL, sizeof(*idec));
if (idec == NULL) {
return NULL;
@ -602,46 +594,25 @@ static WebPIDecoder* NewDecoder(WebPDecBuffer* const output_buffer,
VP8InitIo(&idec->io_);
WebPResetDecParams(&idec->params_);
if (output_buffer == NULL || WebPAvoidSlowMemory(output_buffer, features)) {
idec->params_.output = &idec->output_;
idec->final_output_ = output_buffer;
if (output_buffer != NULL) {
idec->params_.output->colorspace = output_buffer->colorspace;
}
} else {
idec->params_.output = output_buffer;
idec->final_output_ = NULL;
}
idec->params_.output = (output_buffer != NULL) ? output_buffer
: &idec->output_;
WebPInitCustomIo(&idec->params_, &idec->io_); // Plug the I/O functions.
return idec;
}
//------------------------------------------------------------------------------
// Public functions
WebPIDecoder* WebPINewDecoder(WebPDecBuffer* output_buffer) {
return NewDecoder(output_buffer, NULL);
}
WebPIDecoder* WebPIDecode(const uint8_t* data, size_t data_size,
WebPDecoderConfig* config) {
WebPIDecoder* idec;
WebPBitstreamFeatures tmp_features;
WebPBitstreamFeatures* const features =
(config == NULL) ? &tmp_features : &config->input;
memset(&tmp_features, 0, sizeof(tmp_features));
// Parse the bitstream's features, if requested:
if (data != NULL && data_size > 0) {
if (WebPGetFeatures(data, data_size, features) != VP8_STATUS_OK) {
if (data != NULL && data_size > 0 && config != NULL) {
if (WebPGetFeatures(data, data_size, &config->input) != VP8_STATUS_OK) {
return NULL;
}
}
// Create an instance of the incremental decoder
idec = (config != NULL) ? NewDecoder(&config->output, features)
: NewDecoder(NULL, features);
idec = WebPINewDecoder(config ? &config->output : NULL);
if (idec == NULL) {
return NULL;
}
@ -675,11 +646,11 @@ void WebPIDelete(WebPIDecoder* idec) {
WebPIDecoder* WebPINewRGB(WEBP_CSP_MODE mode, uint8_t* output_buffer,
size_t output_buffer_size, int output_stride) {
const int is_external_memory = (output_buffer != NULL) ? 1 : 0;
const int is_external_memory = (output_buffer != NULL);
WebPIDecoder* idec;
if (mode >= MODE_YUV) return NULL;
if (is_external_memory == 0) { // Overwrite parameters to sane values.
if (!is_external_memory) { // Overwrite parameters to sane values.
output_buffer_size = 0;
output_stride = 0;
} else { // A buffer was passed. Validate the other params.
@ -701,11 +672,11 @@ WebPIDecoder* WebPINewYUVA(uint8_t* luma, size_t luma_size, int luma_stride,
uint8_t* u, size_t u_size, int u_stride,
uint8_t* v, size_t v_size, int v_stride,
uint8_t* a, size_t a_size, int a_stride) {
const int is_external_memory = (luma != NULL) ? 1 : 0;
const int is_external_memory = (luma != NULL);
WebPIDecoder* idec;
WEBP_CSP_MODE colorspace;
if (is_external_memory == 0) { // Overwrite parameters to sane values.
if (!is_external_memory) { // Overwrite parameters to sane values.
luma_size = u_size = v_size = a_size = 0;
luma_stride = u_stride = v_stride = a_stride = 0;
u = v = a = NULL;
@ -813,9 +784,6 @@ static const WebPDecBuffer* GetOutputBuffer(const WebPIDecoder* const idec) {
if (idec->state_ <= STATE_VP8_PARTS0) {
return NULL;
}
if (idec->final_output_ != NULL) {
return NULL; // not yet slow-copied
}
return idec->params_.output;
}
@ -825,7 +793,8 @@ const WebPDecBuffer* WebPIDecodedArea(const WebPIDecoder* idec,
const WebPDecBuffer* const src = GetOutputBuffer(idec);
if (left != NULL) *left = 0;
if (top != NULL) *top = 0;
if (src != NULL) {
// TODO(skal): later include handling of rotations.
if (src) {
if (width != NULL) *width = src->width;
if (height != NULL) *height = idec->params_.last_y;
} else {
@ -890,3 +859,4 @@ int WebPISetIOHooks(WebPIDecoder* const idec,
return 1;
}

View File

@ -13,8 +13,8 @@
#include <assert.h>
#include <stdlib.h>
#include "../dec/vp8i_dec.h"
#include "./webpi_dec.h"
#include "../dec/vp8i.h"
#include "./webpi.h"
#include "../dsp/dsp.h"
#include "../dsp/yuv.h"
#include "../utils/utils.h"
@ -55,6 +55,32 @@ static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) {
return io->mb_h;
}
//------------------------------------------------------------------------------
// YUV444 -> RGB conversion
#if 0 // TODO(skal): this is for future rescaling.
static int EmitRGB(const VP8Io* const io, WebPDecParams* const p) {
WebPDecBuffer* output = p->output;
const WebPRGBABuffer* const buf = &output->u.RGBA;
uint8_t* dst = buf->rgba + io->mb_y * buf->stride;
const uint8_t* y_src = io->y;
const uint8_t* u_src = io->u;
const uint8_t* v_src = io->v;
const WebPYUV444Converter convert = WebPYUV444Converters[output->colorspace];
const int mb_w = io->mb_w;
const int last = io->mb_h;
int j;
for (j = 0; j < last; ++j) {
convert(y_src, u_src, v_src, dst, mb_w);
y_src += io->y_stride;
u_src += io->uv_stride;
v_src += io->uv_stride;
dst += buf->stride;
}
return io->mb_h;
}
#endif
//------------------------------------------------------------------------------
// Fancy upsampling
@ -119,24 +145,14 @@ static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
//------------------------------------------------------------------------------
static void FillAlphaPlane(uint8_t* dst, int w, int h, int stride) {
int j;
for (j = 0; j < h; ++j) {
memset(dst, 0xff, w * sizeof(*dst));
dst += stride;
}
}
static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p) {
const uint8_t* alpha = io->a;
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
const int mb_w = io->mb_w;
const int mb_h = io->mb_h;
uint8_t* dst = buf->a + io->mb_y * buf->a_stride;
int j;
(void)expected_num_lines_out;
assert(expected_num_lines_out == mb_h);
if (alpha != NULL) {
for (j = 0; j < mb_h; ++j) {
memcpy(dst, alpha, mb_w * sizeof(*dst));
@ -145,7 +161,10 @@ static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
}
} else if (buf->a != NULL) {
// the user requested alpha, but there is none, set it to opaque.
FillAlphaPlane(dst, mb_w, mb_h, buf->a_stride);
for (j = 0; j < mb_h; ++j) {
memset(dst, 0xff, mb_w * sizeof(*dst));
dst += buf->a_stride;
}
}
return 0;
}
@ -176,8 +195,7 @@ static int GetAlphaSourceRow(const VP8Io* const io,
return start_y;
}
static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p) {
const uint8_t* alpha = io->a;
if (alpha != NULL) {
const int mb_w = io->mb_w;
@ -188,13 +206,21 @@ static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
int num_rows;
const int start_y = GetAlphaSourceRow(io, &alpha, &num_rows);
uint8_t* const base_rgba = buf->rgba + start_y * buf->stride;
uint8_t* const dst = base_rgba + (alpha_first ? 0 : 3);
const int has_alpha = WebPDispatchAlpha(alpha, io->width, mb_w,
num_rows, dst, buf->stride);
(void)expected_num_lines_out;
assert(expected_num_lines_out == num_rows);
// has_alpha is true if there's non-trivial alpha to premultiply with.
if (has_alpha && WebPIsPremultipliedMode(colorspace)) {
uint8_t* dst = base_rgba + (alpha_first ? 0 : 3);
uint32_t alpha_mask = 0xff;
int i, j;
for (j = 0; j < num_rows; ++j) {
for (i = 0; i < mb_w; ++i) {
const uint32_t alpha_value = alpha[i];
dst[4 * i] = alpha_value;
alpha_mask &= alpha_value;
}
alpha += io->width;
dst += buf->stride;
}
// alpha_mask is < 0xff if there's non-trivial alpha to premultiply with.
if (alpha_mask != 0xff && WebPIsPremultipliedMode(colorspace)) {
WebPApplyAlphaMultiply(base_rgba, alpha_first,
mb_w, num_rows, buf->stride);
}
@ -202,8 +228,7 @@ static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
return 0;
}
static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p) {
const uint8_t* alpha = io->a;
if (alpha != NULL) {
const int mb_w = io->mb_w;
@ -219,6 +244,7 @@ static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p,
#endif
uint32_t alpha_mask = 0x0f;
int i, j;
for (j = 0; j < num_rows; ++j) {
for (i = 0; i < mb_w; ++i) {
// Fill in the alpha value (converted to 4 bits).
@ -229,8 +255,6 @@ static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p,
alpha += io->width;
alpha_dst += buf->stride;
}
(void)expected_num_lines_out;
assert(expected_num_lines_out == num_rows);
if (alpha_mask != 0x0f && WebPIsPremultipliedMode(colorspace)) {
WebPApplyAlphaMultiply4444(base_rgba, mb_w, num_rows, buf->stride);
}
@ -256,7 +280,7 @@ static int Rescale(const uint8_t* src, int src_stride,
static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) {
const int mb_h = io->mb_h;
const int uv_mb_h = (mb_h + 1) >> 1;
WebPRescaler* const scaler = p->scaler_y;
WebPRescaler* const scaler = &p->scaler_y;
int num_lines_out = 0;
if (WebPIsAlphaMode(p->output->colorspace) && io->a != NULL) {
// Before rescaling, we premultiply the luma directly into the io->y
@ -267,28 +291,21 @@ static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) {
io->a, io->width, io->mb_w, mb_h, 0);
}
num_lines_out = Rescale(io->y, io->y_stride, mb_h, scaler);
Rescale(io->u, io->uv_stride, uv_mb_h, p->scaler_u);
Rescale(io->v, io->uv_stride, uv_mb_h, p->scaler_v);
Rescale(io->u, io->uv_stride, uv_mb_h, &p->scaler_u);
Rescale(io->v, io->uv_stride, uv_mb_h, &p->scaler_v);
return num_lines_out;
}
static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p,
int expected_num_lines_out) {
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
uint8_t* const dst_a = buf->a + p->last_y * buf->a_stride;
static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p) {
if (io->a != NULL) {
uint8_t* const dst_y = buf->y + p->last_y * buf->y_stride;
const int num_lines_out = Rescale(io->a, io->width, io->mb_h, p->scaler_a);
assert(expected_num_lines_out == num_lines_out);
const WebPYUVABuffer* const buf = &p->output->u.YUVA;
uint8_t* dst_y = buf->y + p->last_y * buf->y_stride;
const uint8_t* src_a = buf->a + p->last_y * buf->a_stride;
const int num_lines_out = Rescale(io->a, io->width, io->mb_h, &p->scaler_a);
if (num_lines_out > 0) { // unmultiply the Y
WebPMultRows(dst_y, buf->y_stride, dst_a, buf->a_stride,
p->scaler_a->dst_width, num_lines_out, 1);
WebPMultRows(dst_y, buf->y_stride, src_a, buf->a_stride,
p->scaler_a.dst_width, num_lines_out, 1);
}
} else if (buf->a != NULL) {
// the user requested alpha, but there is none, set it to opaque.
assert(p->last_y + expected_num_lines_out <= io->scaled_height);
FillAlphaPlane(dst_a, io->scaled_width, expected_num_lines_out,
buf->a_stride);
}
return 0;
}
@ -304,42 +321,31 @@ static int InitYUVRescaler(const VP8Io* const io, WebPDecParams* const p) {
const int uv_in_height = (io->mb_h + 1) >> 1;
const size_t work_size = 2 * out_width; // scratch memory for luma rescaler
const size_t uv_work_size = 2 * uv_out_width; // and for each u/v ones
size_t tmp_size, rescaler_size;
size_t tmp_size;
rescaler_t* work;
WebPRescaler* scalers;
const int num_rescalers = has_alpha ? 4 : 3;
tmp_size = (work_size + 2 * uv_work_size) * sizeof(*work);
if (has_alpha) {
tmp_size += work_size * sizeof(*work);
}
rescaler_size = num_rescalers * sizeof(*p->scaler_y) + WEBP_ALIGN_CST;
p->memory = WebPSafeMalloc(1ULL, tmp_size + rescaler_size);
p->memory = WebPSafeMalloc(1ULL, tmp_size);
if (p->memory == NULL) {
return 0; // memory error
}
work = (rescaler_t*)p->memory;
scalers = (WebPRescaler*)WEBP_ALIGN((const uint8_t*)work + tmp_size);
p->scaler_y = &scalers[0];
p->scaler_u = &scalers[1];
p->scaler_v = &scalers[2];
p->scaler_a = has_alpha ? &scalers[3] : NULL;
WebPRescalerInit(p->scaler_y, io->mb_w, io->mb_h,
WebPRescalerInit(&p->scaler_y, io->mb_w, io->mb_h,
buf->y, out_width, out_height, buf->y_stride, 1,
work);
WebPRescalerInit(p->scaler_u, uv_in_width, uv_in_height,
WebPRescalerInit(&p->scaler_u, uv_in_width, uv_in_height,
buf->u, uv_out_width, uv_out_height, buf->u_stride, 1,
work + work_size);
WebPRescalerInit(p->scaler_v, uv_in_width, uv_in_height,
WebPRescalerInit(&p->scaler_v, uv_in_width, uv_in_height,
buf->v, uv_out_width, uv_out_height, buf->v_stride, 1,
work + work_size + uv_work_size);
p->emit = EmitRescaledYUV;
if (has_alpha) {
WebPRescalerInit(p->scaler_a, io->mb_w, io->mb_h,
WebPRescalerInit(&p->scaler_a, io->mb_w, io->mb_h,
buf->a, out_width, out_height, buf->a_stride, 1,
work + work_size + 2 * uv_work_size);
p->emit_alpha = EmitRescaledAlphaYUV;
@ -355,19 +361,19 @@ static int ExportRGB(WebPDecParams* const p, int y_pos) {
const WebPYUV444Converter convert =
WebPYUV444Converters[p->output->colorspace];
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* dst = buf->rgba + y_pos * buf->stride;
uint8_t* dst = buf->rgba + (p->last_y + y_pos) * buf->stride;
int num_lines_out = 0;
// For RGB rescaling, because of the YUV420, current scan position
// U/V can be +1/-1 line from the Y one. Hence the double test.
while (WebPRescalerHasPendingOutput(p->scaler_y) &&
WebPRescalerHasPendingOutput(p->scaler_u)) {
assert(y_pos + num_lines_out < p->output->height);
assert(p->scaler_u->y_accum == p->scaler_v->y_accum);
WebPRescalerExportRow(p->scaler_y);
WebPRescalerExportRow(p->scaler_u);
WebPRescalerExportRow(p->scaler_v);
convert(p->scaler_y->dst, p->scaler_u->dst, p->scaler_v->dst,
dst, p->scaler_y->dst_width);
while (WebPRescalerHasPendingOutput(&p->scaler_y) &&
WebPRescalerHasPendingOutput(&p->scaler_u)) {
assert(p->last_y + y_pos + num_lines_out < p->output->height);
assert(p->scaler_u.y_accum == p->scaler_v.y_accum);
WebPRescalerExportRow(&p->scaler_y);
WebPRescalerExportRow(&p->scaler_u);
WebPRescalerExportRow(&p->scaler_v);
convert(p->scaler_y.dst, p->scaler_u.dst, p->scaler_v.dst,
dst, p->scaler_y.dst_width);
dst += buf->stride;
++num_lines_out;
}
@ -381,56 +387,57 @@ static int EmitRescaledRGB(const VP8Io* const io, WebPDecParams* const p) {
int num_lines_out = 0;
while (j < mb_h) {
const int y_lines_in =
WebPRescalerImport(p->scaler_y, mb_h - j,
WebPRescalerImport(&p->scaler_y, mb_h - j,
io->y + j * io->y_stride, io->y_stride);
const int u_lines_in =
WebPRescalerImport(&p->scaler_u, uv_mb_h - uv_j,
io->u + uv_j * io->uv_stride, io->uv_stride);
const int v_lines_in =
WebPRescalerImport(&p->scaler_v, uv_mb_h - uv_j,
io->v + uv_j * io->uv_stride, io->uv_stride);
(void)v_lines_in; // remove a gcc warning
assert(u_lines_in == v_lines_in);
j += y_lines_in;
if (WebPRescaleNeededLines(p->scaler_u, uv_mb_h - uv_j)) {
const int u_lines_in =
WebPRescalerImport(p->scaler_u, uv_mb_h - uv_j,
io->u + uv_j * io->uv_stride, io->uv_stride);
const int v_lines_in =
WebPRescalerImport(p->scaler_v, uv_mb_h - uv_j,
io->v + uv_j * io->uv_stride, io->uv_stride);
(void)v_lines_in; // remove a gcc warning
assert(u_lines_in == v_lines_in);
uv_j += u_lines_in;
}
num_lines_out += ExportRGB(p, p->last_y + num_lines_out);
uv_j += u_lines_in;
num_lines_out += ExportRGB(p, num_lines_out);
}
return num_lines_out;
}
static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) {
static int ExportAlpha(WebPDecParams* const p, int y_pos) {
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* const base_rgba = buf->rgba + y_pos * buf->stride;
uint8_t* const base_rgba = buf->rgba + (p->last_y + y_pos) * buf->stride;
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const int alpha_first =
(colorspace == MODE_ARGB || colorspace == MODE_Argb);
uint8_t* dst = base_rgba + (alpha_first ? 0 : 3);
int num_lines_out = 0;
const int is_premult_alpha = WebPIsPremultipliedMode(colorspace);
uint32_t non_opaque = 0;
const int width = p->scaler_a->dst_width;
uint32_t alpha_mask = 0xff;
const int width = p->scaler_a.dst_width;
while (WebPRescalerHasPendingOutput(p->scaler_a) &&
num_lines_out < max_lines_out) {
assert(y_pos + num_lines_out < p->output->height);
WebPRescalerExportRow(p->scaler_a);
non_opaque |= WebPDispatchAlpha(p->scaler_a->dst, 0, width, 1, dst, 0);
while (WebPRescalerHasPendingOutput(&p->scaler_a)) {
int i;
assert(p->last_y + y_pos + num_lines_out < p->output->height);
WebPRescalerExportRow(&p->scaler_a);
for (i = 0; i < width; ++i) {
const uint32_t alpha_value = p->scaler_a.dst[i];
dst[4 * i] = alpha_value;
alpha_mask &= alpha_value;
}
dst += buf->stride;
++num_lines_out;
}
if (is_premult_alpha && non_opaque) {
if (is_premult_alpha && alpha_mask != 0xff) {
WebPApplyAlphaMultiply(base_rgba, alpha_first,
width, num_lines_out, buf->stride);
}
return num_lines_out;
}
static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos,
int max_lines_out) {
static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos) {
const WebPRGBABuffer* const buf = &p->output->u.RGBA;
uint8_t* const base_rgba = buf->rgba + y_pos * buf->stride;
uint8_t* const base_rgba = buf->rgba + (p->last_y + y_pos) * buf->stride;
#ifdef WEBP_SWAP_16BIT_CSP
uint8_t* alpha_dst = base_rgba;
#else
@ -438,18 +445,17 @@ static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos,
#endif
int num_lines_out = 0;
const WEBP_CSP_MODE colorspace = p->output->colorspace;
const int width = p->scaler_a->dst_width;
const int width = p->scaler_a.dst_width;
const int is_premult_alpha = WebPIsPremultipliedMode(colorspace);
uint32_t alpha_mask = 0x0f;
while (WebPRescalerHasPendingOutput(p->scaler_a) &&
num_lines_out < max_lines_out) {
while (WebPRescalerHasPendingOutput(&p->scaler_a)) {
int i;
assert(y_pos + num_lines_out < p->output->height);
WebPRescalerExportRow(p->scaler_a);
assert(p->last_y + y_pos + num_lines_out < p->output->height);
WebPRescalerExportRow(&p->scaler_a);
for (i = 0; i < width; ++i) {
// Fill in the alpha value (converted to 4 bits).
const uint32_t alpha_value = p->scaler_a->dst[i] >> 4;
const uint32_t alpha_value = p->scaler_a.dst[i] >> 4;
alpha_dst[2 * i] = (alpha_dst[2 * i] & 0xf0) | alpha_value;
alpha_mask &= alpha_value;
}
@ -462,17 +468,15 @@ static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos,
return num_lines_out;
}
static int EmitRescaledAlphaRGB(const VP8Io* const io, WebPDecParams* const p,
int expected_num_out_lines) {
static int EmitRescaledAlphaRGB(const VP8Io* const io, WebPDecParams* const p) {
if (io->a != NULL) {
WebPRescaler* const scaler = p->scaler_a;
int lines_left = expected_num_out_lines;
const int y_end = p->last_y + lines_left;
while (lines_left > 0) {
const int row_offset = scaler->src_y - io->mb_y;
WebPRescalerImport(scaler, io->mb_h + io->mb_y - scaler->src_y,
io->a + row_offset * io->width, io->width);
lines_left -= p->emit_alpha_row(p, y_end - lines_left, lines_left);
WebPRescaler* const scaler = &p->scaler_a;
int j = 0;
int pos = 0;
while (j < io->mb_h) {
j += WebPRescalerImport(scaler, io->mb_h - j,
io->a + j * io->width, io->width);
pos += p->emit_alpha_row(p, pos);
}
}
return 0;
@ -487,9 +491,7 @@ static int InitRGBRescaler(const VP8Io* const io, WebPDecParams* const p) {
const size_t work_size = 2 * out_width; // scratch memory for one rescaler
rescaler_t* work; // rescalers work area
uint8_t* tmp; // tmp storage for scaled YUV444 samples before RGB conversion
size_t tmp_size1, tmp_size2, total_size, rescaler_size;
WebPRescaler* scalers;
const int num_rescalers = has_alpha ? 4 : 3;
size_t tmp_size1, tmp_size2, total_size;
tmp_size1 = 3 * work_size;
tmp_size2 = 3 * out_width;
@ -498,35 +500,25 @@ static int InitRGBRescaler(const VP8Io* const io, WebPDecParams* const p) {
tmp_size2 += out_width;
}
total_size = tmp_size1 * sizeof(*work) + tmp_size2 * sizeof(*tmp);
rescaler_size = num_rescalers * sizeof(*p->scaler_y) + WEBP_ALIGN_CST;
p->memory = WebPSafeMalloc(1ULL, total_size + rescaler_size);
p->memory = WebPSafeMalloc(1ULL, total_size);
if (p->memory == NULL) {
return 0; // memory error
}
work = (rescaler_t*)p->memory;
tmp = (uint8_t*)(work + tmp_size1);
scalers = (WebPRescaler*)WEBP_ALIGN((const uint8_t*)work + total_size);
p->scaler_y = &scalers[0];
p->scaler_u = &scalers[1];
p->scaler_v = &scalers[2];
p->scaler_a = has_alpha ? &scalers[3] : NULL;
WebPRescalerInit(p->scaler_y, io->mb_w, io->mb_h,
WebPRescalerInit(&p->scaler_y, io->mb_w, io->mb_h,
tmp + 0 * out_width, out_width, out_height, 0, 1,
work + 0 * work_size);
WebPRescalerInit(p->scaler_u, uv_in_width, uv_in_height,
WebPRescalerInit(&p->scaler_u, uv_in_width, uv_in_height,
tmp + 1 * out_width, out_width, out_height, 0, 1,
work + 1 * work_size);
WebPRescalerInit(p->scaler_v, uv_in_width, uv_in_height,
WebPRescalerInit(&p->scaler_v, uv_in_width, uv_in_height,
tmp + 2 * out_width, out_width, out_height, 0, 1,
work + 2 * work_size);
p->emit = EmitRescaledRGB;
WebPInitYUV444Converters();
if (has_alpha) {
WebPRescalerInit(p->scaler_a, io->mb_w, io->mb_h,
WebPRescalerInit(&p->scaler_a, io->mb_w, io->mb_h,
tmp + 3 * out_width, out_width, out_height, 0, 1,
work + 3 * work_size);
p->emit_alpha = EmitRescaledAlphaRGB;
@ -567,7 +559,6 @@ static int CustomSetup(VP8Io* io) {
}
} else {
if (is_rgb) {
WebPInitSamplers();
p->emit = EmitSampledRGB; // default
if (io->fancy_upsampling) {
#ifdef FANCY_UPSAMPLING
@ -582,6 +573,8 @@ static int CustomSetup(VP8Io* io) {
p->emit = EmitFancyRGB;
WebPInitUpsamplers();
#endif
} else {
WebPInitSamplers();
}
} else {
p->emit = EmitYUV;
@ -618,7 +611,7 @@ static int CustomPut(const VP8Io* io) {
}
num_lines_out = p->emit(io, p);
if (p->emit_alpha != NULL) {
p->emit_alpha(io, p, num_lines_out);
p->emit_alpha(io, p);
}
p->last_y += num_lines_out;
return 1;

View File

@ -11,7 +11,7 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include "./vp8i_dec.h"
#include "./vp8i.h"
static WEBP_INLINE int clip(int v, int M) {
return v < 0 ? 0 : v > M ? M : v;

View File

@ -11,13 +11,10 @@
//
// Author: Skal (pascal.massimino@gmail.com)
#include "./vp8i_dec.h"
#include "../utils/bit_reader_inl_utils.h"
#include "./vp8i.h"
#include "../utils/bit_reader_inl.h"
#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__)
// using a table is ~1-2% slower on ARM. Prefer the coded-tree approach then.
#define USE_GENERIC_TREE
#endif
#ifdef USE_GENERIC_TREE
static const int8_t kYModesIntra4[18] = {
@ -497,12 +494,6 @@ static const uint8_t
};
// Paragraph 9.9
static const int kBands[16 + 1] = {
0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7,
0 // extra entry as sentinel
};
void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
VP8Proba* const proba = &dec->proba_;
int t, b, c, p;
@ -516,9 +507,6 @@ void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
}
}
}
for (b = 0; b < 16 + 1; ++b) {
proba->bands_ptr_[t][b] = &proba->bands_[t][kBands[b]];
}
}
dec->use_skip_proba_ = VP8Get(br);
if (dec->use_skip_proba_) {

View File

@ -13,11 +13,11 @@
#include <stdlib.h>
#include "./alphai_dec.h"
#include "./vp8i_dec.h"
#include "./vp8li_dec.h"
#include "./webpi_dec.h"
#include "../utils/bit_reader_inl_utils.h"
#include "./alphai.h"
#include "./vp8i.h"
#include "./vp8li.h"
#include "./webpi.h"
#include "../utils/bit_reader_inl.h"
#include "../utils/utils.h"
//------------------------------------------------------------------------------
@ -26,16 +26,6 @@ int WebPGetDecoderVersion(void) {
return (DEC_MAJ_VERSION << 16) | (DEC_MIN_VERSION << 8) | DEC_REV_VERSION;
}
//------------------------------------------------------------------------------
// Signature and pointer-to-function for GetCoeffs() variants below.
typedef int (*GetCoeffsFunc)(VP8BitReader* const br,
const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out);
static volatile GetCoeffsFunc GetCoeffs = NULL;
static void InitGetCoeffs(void);
//------------------------------------------------------------------------------
// VP8Decoder
@ -60,8 +50,7 @@ VP8Decoder* VP8New(void) {
SetOk(dec);
WebPGetWorkerInterface()->Init(&dec->worker_);
dec->ready_ = 0;
dec->num_parts_minus_one_ = 0;
InitGetCoeffs();
dec->num_parts_ = 1;
}
return dec;
}
@ -86,7 +75,10 @@ void VP8Delete(VP8Decoder* const dec) {
int VP8SetError(VP8Decoder* const dec,
VP8StatusCode error, const char* const msg) {
// The oldest error reported takes precedence over the new one.
// TODO This check would be unnecessary if alpha decompression was separated
// from VP8ProcessRow/FinishRow. This avoids setting 'dec->status_' to
// something other than VP8_STATUS_BITSTREAM_ERROR on alpha decompression
// failure.
if (dec->status_ == VP8_STATUS_OK) {
dec->status_ = error;
dec->error_msg_ = msg;
@ -201,27 +193,25 @@ static VP8StatusCode ParsePartitions(VP8Decoder* const dec,
const uint8_t* sz = buf;
const uint8_t* buf_end = buf + size;
const uint8_t* part_start;
size_t size_left = size;
size_t last_part;
size_t p;
int last_part;
int p;
dec->num_parts_minus_one_ = (1 << VP8GetValue(br, 2)) - 1;
last_part = dec->num_parts_minus_one_;
if (size < 3 * last_part) {
dec->num_parts_ = 1 << VP8GetValue(br, 2);
last_part = dec->num_parts_ - 1;
part_start = buf + last_part * 3;
if (buf_end < part_start) {
// we can't even read the sizes with sz[]! That's a failure.
return VP8_STATUS_NOT_ENOUGH_DATA;
}
part_start = buf + last_part * 3;
size_left -= last_part * 3;
for (p = 0; p < last_part; ++p) {
size_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16);
if (psize > size_left) psize = size_left;
VP8InitBitReader(dec->parts_ + p, part_start, psize);
part_start += psize;
size_left -= psize;
const uint32_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16);
const uint8_t* part_end = part_start + psize;
if (part_end > buf_end) part_end = buf_end;
VP8InitBitReader(dec->parts_ + p, part_start, part_end);
part_start = part_end;
sz += 3;
}
VP8InitBitReader(dec->parts_ + last_part, part_start, size_left);
VP8InitBitReader(dec->parts_ + last_part, part_start, buf_end);
return (part_start < buf_end) ? VP8_STATUS_OK :
VP8_STATUS_SUSPENDED; // Init is ok, but there's not enough data
}
@ -284,14 +274,12 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
frm_hdr->profile_ = (bits >> 1) & 7;
frm_hdr->show_ = (bits >> 4) & 1;
frm_hdr->partition_length_ = (bits >> 5);
if (frm_hdr->profile_ > 3) {
if (frm_hdr->profile_ > 3)
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Incorrect keyframe parameters.");
}
if (!frm_hdr->show_) {
if (!frm_hdr->show_)
return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
"Frame not displayable.");
}
buf += 3;
buf_size -= 3;
}
@ -316,22 +304,15 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
dec->mb_w_ = (pic_hdr->width_ + 15) >> 4;
dec->mb_h_ = (pic_hdr->height_ + 15) >> 4;
// Setup default output area (can be later modified during io->setup())
io->width = pic_hdr->width_;
io->height = pic_hdr->height_;
// IMPORTANT! use some sane dimensions in crop_* and scaled_* fields.
// So they can be used interchangeably without always testing for
// 'use_cropping'.
io->use_scaling = 0;
io->use_cropping = 0;
io->crop_top = 0;
io->crop_left = 0;
io->crop_right = io->width;
io->crop_bottom = io->height;
io->use_scaling = 0;
io->scaled_width = io->width;
io->scaled_height = io->height;
io->mb_w = io->width; // sanity check
io->mb_h = io->height; // ditto
@ -347,7 +328,7 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
}
br = &dec->br_;
VP8InitBitReader(br, buf, frm_hdr->partition_length_);
VP8InitBitReader(br, buf, buf + frm_hdr->partition_length_);
buf += frm_hdr->partition_length_;
buf_size -= frm_hdr->partition_length_;
@ -390,6 +371,11 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
//------------------------------------------------------------------------------
// Residual decoding (Paragraph 13.2 / 13.3)
static const int kBands[16 + 1] = {
0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7,
0 // extra entry as sentinel
};
static const uint8_t kCat3[] = { 173, 148, 140, 0 };
static const uint8_t kCat4[] = { 176, 155, 140, 135, 0 };
static const uint8_t kCat5[] = { 180, 157, 141, 134, 130, 0 };
@ -433,20 +419,20 @@ static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) {
}
// Returns the position of the last non-zero coeff plus one
static int GetCoeffsFast(VP8BitReader* const br,
const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out) {
const uint8_t* p = prob[n]->probas_[ctx];
static int GetCoeffs(VP8BitReader* const br, const VP8BandProbas* const prob,
int ctx, const quant_t dq, int n, int16_t* out) {
// n is either 0 or 1 here. kBands[n] is not necessary for extracting '*p'.
const uint8_t* p = prob[n].probas_[ctx];
for (; n < 16; ++n) {
if (!VP8GetBit(br, p[0])) {
return n; // previous coeff was last non-zero coeff
}
while (!VP8GetBit(br, p[1])) { // sequence of zero coeffs
p = prob[++n]->probas_[0];
p = prob[kBands[++n]].probas_[0];
if (n == 16) return 16;
}
{ // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0];
const VP8ProbaArray* const p_ctx = &prob[kBands[n + 1]].probas_[0];
int v;
if (!VP8GetBit(br, p[2])) {
v = 1;
@ -461,46 +447,6 @@ static int GetCoeffsFast(VP8BitReader* const br,
return 16;
}
// This version of GetCoeffs() uses VP8GetBitAlt() which is an alternate version
// of VP8GetBitAlt() targeting specific platforms.
static int GetCoeffsAlt(VP8BitReader* const br,
const VP8BandProbas* const prob[],
int ctx, const quant_t dq, int n, int16_t* out) {
const uint8_t* p = prob[n]->probas_[ctx];
for (; n < 16; ++n) {
if (!VP8GetBitAlt(br, p[0])) {
return n; // previous coeff was last non-zero coeff
}
while (!VP8GetBitAlt(br, p[1])) { // sequence of zero coeffs
p = prob[++n]->probas_[0];
if (n == 16) return 16;
}
{ // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0];
int v;
if (!VP8GetBitAlt(br, p[2])) {
v = 1;
p = p_ctx[1];
} else {
v = GetLargeValue(br, p);
p = p_ctx[2];
}
out[kZigzag[n]] = VP8GetSigned(br, v) * dq[n > 0];
}
}
return 16;
}
WEBP_TSAN_IGNORE_FUNCTION static void InitGetCoeffs(void) {
if (GetCoeffs == NULL) {
if (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kSlowSSSE3)) {
GetCoeffs = GetCoeffsAlt;
} else {
GetCoeffs = GetCoeffsFast;
}
}
}
static WEBP_INLINE uint32_t NzCodeBits(uint32_t nz_coeffs, int nz, int dc_nz) {
nz_coeffs <<= 2;
nz_coeffs |= (nz > 3) ? 3 : (nz > 1) ? 2 : dc_nz;
@ -509,8 +455,8 @@ static WEBP_INLINE uint32_t NzCodeBits(uint32_t nz_coeffs, int nz, int dc_nz) {
static int ParseResiduals(VP8Decoder* const dec,
VP8MB* const mb, VP8BitReader* const token_br) {
const VP8BandProbas* (* const bands)[16 + 1] = dec->proba_.bands_ptr_;
const VP8BandProbas* const * ac_proba;
VP8BandProbas (* const bands)[NUM_BANDS] = dec->proba_.bands_;
const VP8BandProbas* ac_proba;
VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
const VP8QuantMatrix* const q = &dec->dqm_[block->segment_];
int16_t* dst = block->coeffs_;
@ -640,7 +586,7 @@ static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
for (dec->mb_y_ = 0; dec->mb_y_ < dec->br_mb_y_; ++dec->mb_y_) {
// Parse bitstream for this row.
VP8BitReader* const token_br =
&dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_];
&dec->parts_[dec->mb_y_ & (dec->num_parts_ - 1)];
if (!VP8ParseIntraModeRow(&dec->br_, dec)) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-partition0 encountered.");
@ -710,7 +656,8 @@ void VP8Clear(VP8Decoder* const dec) {
return;
}
WebPGetWorkerInterface()->End(&dec->worker_);
WebPDeallocateAlphaMemory(dec);
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
WebPSafeFree(dec->mem_);
dec->mem_ = NULL;
dec->mem_size_ = 0;
@ -719,3 +666,4 @@ void VP8Clear(VP8Decoder* const dec) {
}
//------------------------------------------------------------------------------

View File

@ -15,11 +15,10 @@
#define WEBP_DEC_VP8I_H_
#include <string.h> // for memcpy()
#include "./common_dec.h"
#include "./vp8li_dec.h"
#include "../utils/bit_reader_utils.h"
#include "../utils/random_utils.h"
#include "../utils/thread_utils.h"
#include "./vp8li.h"
#include "../utils/bit_reader.h"
#include "../utils/random.h"
#include "../utils/thread.h"
#include "../dsp/dsp.h"
#ifdef __cplusplus
@ -31,10 +30,46 @@ extern "C" {
// version numbers
#define DEC_MAJ_VERSION 0
#define DEC_MIN_VERSION 6
#define DEC_REV_VERSION 0
#define DEC_MIN_VERSION 4
#define DEC_REV_VERSION 4
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
// intra prediction modes
enum { B_DC_PRED = 0, // 4x4 modes
B_TM_PRED,
B_VE_PRED,
B_HE_PRED,
B_RD_PRED,
B_VR_PRED,
B_LD_PRED,
B_VL_PRED,
B_HD_PRED,
B_HU_PRED,
NUM_BMODES = B_HU_PRED + 1 - B_DC_PRED, // = 10
// Luma16 or UV modes
DC_PRED = B_DC_PRED, V_PRED = B_VE_PRED,
H_PRED = B_HE_PRED, TM_PRED = B_TM_PRED,
B_PRED = NUM_BMODES, // refined I4x4 mode
// special modes
B_DC_PRED_NOTOP = 4,
B_DC_PRED_NOLEFT = 5,
B_DC_PRED_NOTOPLEFT = 6,
NUM_B_DC_MODES = 7 };
enum { MB_FEATURE_TREE_PROBS = 3,
NUM_MB_SEGMENTS = 4,
NUM_REF_LF_DELTAS = 4,
NUM_MODE_LF_DELTAS = 4, // I4x4, ZERO, *, SPLIT
MAX_NUM_PARTITIONS = 8,
// Probabilities
NUM_TYPES = 4,
NUM_BANDS = 8,
NUM_CTX = 3,
NUM_PROBAS = 11,
NUM_MV_PROBAS = 19 };
// YUV-cache parameters.
// Constraints are: We need to store one 16x16 block of luma samples (y),
// and two 8x8 chroma blocks (u/v). These are better be 16-bytes aligned,
// in order to be SIMD-friendly. We also need to store the top, left and
@ -56,6 +91,8 @@ extern "C" {
// 'y' = y-samples 'u' = u-samples 'v' = u-samples
// '|' = left sample, '-' = top sample, '+' = top-left sample
// 't' = extra top-right sample for 4x4 modes
// With this layout, BPS (=Bytes Per Scan-line) is one cacheline size.
#define BPS 32 // this is the common stride used by yuv[]
#define YUV_SIZE (BPS * 17 + BPS * 9)
#define Y_SIZE (BPS * 17)
#define Y_OFF (BPS * 1 + 8)
@ -93,6 +130,7 @@ typedef struct {
int8_t filter_strength_[NUM_MB_SEGMENTS]; // filter strength for segments
} VP8SegmentHeader;
// probas associated to one of the contexts
typedef uint8_t VP8ProbaArray[NUM_PROBAS];
@ -105,7 +143,6 @@ typedef struct {
uint8_t segments_[MB_FEATURE_TREE_PROBS];
// Type: 0:Intra16-AC 1:Intra16-DC 2:Chroma 3:Intra4
VP8BandProbas bands_[NUM_TYPES][NUM_BANDS];
const VP8BandProbas* bands_ptr_[NUM_TYPES][16 + 1];
} VP8Proba;
// Filter parameters
@ -209,8 +246,8 @@ struct VP8Decoder {
int tl_mb_x_, tl_mb_y_; // top-left MB that must be in-loop filtered
int br_mb_x_, br_mb_y_; // last bottom-right MB that must be decoded
// number of partitions minus one.
uint32_t num_parts_minus_one_;
// number of partitions.
int num_parts_;
// per-partition boolean decoders.
VP8BitReader parts_[MAX_NUM_PARTITIONS];
@ -258,11 +295,9 @@ struct VP8Decoder {
struct ALPHDecoder* alph_dec_; // alpha-plane decoder object
const uint8_t* alpha_data_; // compressed alpha data (if present)
size_t alpha_data_size_;
int is_alpha_decoded_; // true if alpha_data_ is decoded in alpha_plane_
uint8_t* alpha_plane_mem_; // memory allocated for alpha_plane_
uint8_t* alpha_plane_; // output. Persistent, contains the whole data.
const uint8_t* alpha_prev_line_; // last decoded alpha row (or NULL)
int alpha_dithering_; // derived from decoding options (0=off, 100=full)
int is_alpha_decoded_; // true if alpha_data_ is decoded in alpha_plane_
uint8_t* alpha_plane_; // output. Persistent, contains the whole data.
int alpha_dithering_; // derived from decoding options (0=off, 100=full).
};
//------------------------------------------------------------------------------
@ -282,7 +317,7 @@ int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec);
void VP8ParseQuant(VP8Decoder* const dec);
// in frame.c
int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io);
int VP8InitFrame(VP8Decoder* const dec, VP8Io* io);
// Call io->setup() and finish setting up scan parameters.
// After this call returns, one must always call VP8ExitCritical() with the
// same parameters. Both functions should be used in pair. Returns VP8_STATUS_OK
@ -308,7 +343,6 @@ int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br);
// in alpha.c
const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
const VP8Io* const io,
int row, int num_rows);
//------------------------------------------------------------------------------

File diff suppressed because it is too large Load Diff

View File

@ -16,10 +16,10 @@
#define WEBP_DEC_VP8LI_H_
#include <string.h> // for memcpy()
#include "./webpi_dec.h"
#include "../utils/bit_reader_utils.h"
#include "../utils/color_cache_utils.h"
#include "../utils/huffman_utils.h"
#include "./webpi.h"
#include "../utils/bit_reader.h"
#include "../utils/color_cache.h"
#include "../utils/huffman.h"
#ifdef __cplusplus
extern "C" {
@ -43,7 +43,6 @@ struct VP8LTransform {
typedef struct {
int color_cache_size_;
VP8LColorCache color_cache_;
VP8LColorCache saved_color_cache_; // for incremental
int huffman_mask_;
int huffman_subsample_bits_;
@ -51,12 +50,12 @@ typedef struct {
uint32_t *huffman_image_;
int num_htree_groups_;
HTreeGroup *htree_groups_;
HuffmanCode *huffman_tables_;
} VP8LMetadata;
typedef struct VP8LDecoder VP8LDecoder;
struct VP8LDecoder {
VP8StatusCode status_;
VP8LDecodeState action_;
VP8LDecodeState state_;
VP8Io *io_;
@ -67,9 +66,6 @@ struct VP8LDecoder {
uint32_t *argb_cache_; // Scratch buffer for temporary BGRA storage.
VP8LBitReader br_;
int incremental_; // if true, incremental decoding is expected
VP8LBitReader saved_br_; // note: could be local variables too
int saved_last_pixel_;
int width_;
int height_;
@ -100,7 +96,8 @@ struct ALPHDecoder; // Defined in dec/alphai.h.
// Decodes image header for alpha data stored using lossless compression.
// Returns false in case of error.
int VP8LDecodeAlphaHeader(struct ALPHDecoder* const alph_dec,
const uint8_t* const data, size_t data_size);
const uint8_t* const data, size_t data_size,
uint8_t* const output);
// Decodes *at least* 'last_row' rows of alpha. If some of the initial rows are
// already decoded in previous call(s), it will resume decoding from where it

View File

@ -13,10 +13,9 @@
#include <stdlib.h>
#include "./vp8i_dec.h"
#include "./vp8li_dec.h"
#include "./webpi_dec.h"
#include "../utils/utils.h"
#include "./vp8i.h"
#include "./vp8li.h"
#include "./webpi.h"
#include "../webp/mux_types.h" // ALPHA_FLAG
//------------------------------------------------------------------------------
@ -39,11 +38,19 @@
// 20..23 VP8X flags bit-map corresponding to the chunk-types present.
// 24..26 Width of the Canvas Image.
// 27..29 Height of the Canvas Image.
// There can be extra chunks after the "VP8X" chunk (ICCP, ANMF, VP8, VP8L,
// XMP, EXIF ...)
// There can be extra chunks after the "VP8X" chunk (ICCP, FRGM, ANMF, VP8,
// VP8L, XMP, EXIF ...)
// All sizes are in little-endian order.
// Note: chunk data size must be padded to multiple of 2 when written.
static WEBP_INLINE uint32_t get_le24(const uint8_t* const data) {
return data[0] | (data[1] << 8) | (data[2] << 16);
}
static WEBP_INLINE uint32_t get_le32(const uint8_t* const data) {
return (uint32_t)get_le24(data) | (data[3] << 24);
}
// Validates the RIFF container (if detected) and skips over it.
// If a RIFF container is detected, returns:
// VP8_STATUS_BITSTREAM_ERROR for invalid header,
@ -63,7 +70,7 @@ static VP8StatusCode ParseRIFF(const uint8_t** const data,
if (memcmp(*data + 8, "WEBP", TAG_SIZE)) {
return VP8_STATUS_BITSTREAM_ERROR; // Wrong image file signature.
} else {
const uint32_t size = GetLE32(*data + TAG_SIZE);
const uint32_t size = get_le32(*data + TAG_SIZE);
// Check that we have at least one chunk (i.e "WEBP" + "VP8?nnnn").
if (size < TAG_SIZE + CHUNK_HEADER_SIZE) {
return VP8_STATUS_BITSTREAM_ERROR;
@ -109,7 +116,7 @@ static VP8StatusCode ParseVP8X(const uint8_t** const data,
if (!memcmp(*data, "VP8X", TAG_SIZE)) {
int width, height;
uint32_t flags;
const uint32_t chunk_size = GetLE32(*data + TAG_SIZE);
const uint32_t chunk_size = get_le32(*data + TAG_SIZE);
if (chunk_size != VP8X_CHUNK_SIZE) {
return VP8_STATUS_BITSTREAM_ERROR; // Wrong chunk size.
}
@ -118,9 +125,9 @@ static VP8StatusCode ParseVP8X(const uint8_t** const data,
if (*data_size < vp8x_size) {
return VP8_STATUS_NOT_ENOUGH_DATA; // Insufficient data.
}
flags = GetLE32(*data + 8);
width = 1 + GetLE24(*data + 12);
height = 1 + GetLE24(*data + 15);
flags = get_le32(*data + 8);
width = 1 + get_le24(*data + 12);
height = 1 + get_le24(*data + 15);
if (width * (uint64_t)height >= MAX_IMAGE_AREA) {
return VP8_STATUS_BITSTREAM_ERROR; // image is too large
}
@ -174,7 +181,7 @@ static VP8StatusCode ParseOptionalChunks(const uint8_t** const data,
return VP8_STATUS_NOT_ENOUGH_DATA;
}
chunk_size = GetLE32(buf + TAG_SIZE);
chunk_size = get_le32(buf + TAG_SIZE);
if (chunk_size > MAX_CHUNK_PAYLOAD) {
return VP8_STATUS_BITSTREAM_ERROR; // Not a valid chunk size.
}
@ -240,7 +247,7 @@ static VP8StatusCode ParseVP8Header(const uint8_t** const data_ptr,
if (is_vp8 || is_vp8l) {
// Bitstream contains VP8/VP8L header.
const uint32_t size = GetLE32(data + TAG_SIZE);
const uint32_t size = get_le32(data + TAG_SIZE);
if ((riff_size >= minimal_size) && (size > riff_size - minimal_size)) {
return VP8_STATUS_BITSTREAM_ERROR; // Inconsistent size information.
}
@ -289,6 +296,7 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
int found_riff = 0;
int found_vp8x = 0;
int animation_present = 0;
int fragments_present = 0;
const int have_all_data = (headers != NULL) ? headers->have_all_data : 0;
VP8StatusCode status;
@ -317,6 +325,7 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
return status; // Wrong VP8X / insufficient data.
}
animation_present = !!(flags & ANIMATION_FLAG);
fragments_present = !!(flags & FRAGMENTS_FLAG);
if (!found_riff && found_vp8x) {
// Note: This restriction may be removed in the future, if it becomes
// necessary to send VP8X chunk to the decoder.
@ -328,7 +337,8 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
image_width = canvas_width;
image_height = canvas_height;
if (found_vp8x && animation_present && headers == NULL) {
if (found_vp8x && (animation_present || fragments_present) &&
headers == NULL) {
status = VP8_STATUS_OK;
goto ReturnWidthHeight; // Just return features from VP8X header.
}
@ -359,7 +369,7 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
return VP8_STATUS_BITSTREAM_ERROR;
}
if (format != NULL && !animation_present) {
if (format != NULL && !(animation_present || fragments_present)) {
*format = hdrs.is_lossless ? 2 : 1;
}
@ -412,8 +422,7 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
}
VP8StatusCode WebPParseHeaders(WebPHeaderStructure* const headers) {
// status is marked volatile as a workaround for a clang-3.8 (aarch64) bug
volatile VP8StatusCode status;
VP8StatusCode status;
int has_animation = 0;
assert(headers != NULL);
// fill out headers, ignore width/height/has_alpha.
@ -510,13 +519,13 @@ static VP8StatusCode DecodeInto(const uint8_t* const data, size_t data_size,
if (status != VP8_STATUS_OK) {
WebPFreeDecBuffer(params->output);
} else {
if (params->options != NULL && params->options->flip) {
// This restores the original stride values if options->flip was used
// during the call to WebPAllocateDecBuffer above.
status = WebPFlipBuffer(params->output);
}
}
#if WEBP_DECODER_ABI_VERSION > 0x0203
if (params->options != NULL && params->options->flip) {
status = WebPFlipBuffer(params->output);
}
#endif
return status;
}
@ -758,24 +767,9 @@ VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size,
}
WebPResetDecParams(&params);
params.options = &config->options;
params.output = &config->output;
if (WebPAvoidSlowMemory(params.output, &config->input)) {
// decoding to slow memory: use a temporary in-mem buffer to decode into.
WebPDecBuffer in_mem_buffer;
WebPInitDecBuffer(&in_mem_buffer);
in_mem_buffer.colorspace = config->output.colorspace;
in_mem_buffer.width = config->input.width;
in_mem_buffer.height = config->input.height;
params.output = &in_mem_buffer;
status = DecodeInto(data, data_size, &params);
if (status == VP8_STATUS_OK) { // do the slow-copy
status = WebPCopyDecBufferPixels(&in_mem_buffer, &config->output);
}
WebPFreeDecBuffer(&in_mem_buffer);
} else {
status = DecodeInto(data, data_size, &params);
}
params.options = &config->options;
status = DecodeInto(data, data_size, &params);
return status;
}
@ -814,17 +808,15 @@ int WebPIoInitFromOptions(const WebPDecoderOptions* const options,
// Scaling
io->use_scaling = (options != NULL) && (options->use_scaling > 0);
if (io->use_scaling) {
int scaled_width = options->scaled_width;
int scaled_height = options->scaled_height;
if (!WebPRescalerGetScaledDimensions(w, h, &scaled_width, &scaled_height)) {
if (options->scaled_width <= 0 || options->scaled_height <= 0) {
return 0;
}
io->scaled_width = scaled_width;
io->scaled_height = scaled_height;
io->scaled_width = options->scaled_width;
io->scaled_height = options->scaled_height;
}
// Filter
io->bypass_filtering = (options != NULL) && options->bypass_filtering;
io->bypass_filtering = options && options->bypass_filtering;
// Fancy upsampler
#ifdef FANCY_UPSAMPLING
@ -841,3 +833,4 @@ int WebPIoInitFromOptions(const WebPDecoderOptions* const options,
}
//------------------------------------------------------------------------------

View File

@ -18,18 +18,15 @@
extern "C" {
#endif
#include "../utils/rescaler_utils.h"
#include "./vp8_dec.h"
#include "../utils/rescaler.h"
#include "./decode_vp8.h"
//------------------------------------------------------------------------------
// WebPDecParams: Decoding output parameters. Transient internal object.
typedef struct WebPDecParams WebPDecParams;
typedef int (*OutputFunc)(const VP8Io* const io, WebPDecParams* const p);
typedef int (*OutputAlphaFunc)(const VP8Io* const io, WebPDecParams* const p,
int expected_num_out_lines);
typedef int (*OutputRowFunc)(WebPDecParams* const p, int y_pos,
int max_out_lines);
typedef int (*OutputRowFunc)(WebPDecParams* const p, int y_pos);
struct WebPDecParams {
WebPDecBuffer* output; // output buffer.
@ -38,12 +35,12 @@ struct WebPDecParams {
int last_y; // coordinate of the line that was last output
const WebPDecoderOptions* options; // if not NULL, use alt decoding features
WebPRescaler* scaler_y, *scaler_u, *scaler_v, *scaler_a; // rescalers
// rescalers
WebPRescaler scaler_y, scaler_u, scaler_v, scaler_a;
void* memory; // overall scratch memory for the output work.
OutputFunc emit; // output RGB or YUV samples
OutputAlphaFunc emit_alpha; // output alpha channel
OutputFunc emit_alpha; // output alpha channel
OutputRowFunc emit_alpha_row; // output one line of rescaled alpha values
};
@ -107,23 +104,13 @@ VP8StatusCode WebPAllocateDecBuffer(int width, int height,
VP8StatusCode WebPFlipBuffer(WebPDecBuffer* const buffer);
// Copy 'src' into 'dst' buffer, making sure 'dst' is not marked as owner of the
// memory (still held by 'src'). No pixels are copied.
// memory (still held by 'src').
void WebPCopyDecBuffer(const WebPDecBuffer* const src,
WebPDecBuffer* const dst);
// Copy and transfer ownership from src to dst (beware of parameter order!)
void WebPGrabDecBuffer(WebPDecBuffer* const src, WebPDecBuffer* const dst);
// Copy pixels from 'src' into a *preallocated* 'dst' buffer. Returns
// VP8_STATUS_INVALID_PARAM if the 'dst' is not set up correctly for the copy.
VP8StatusCode WebPCopyDecBufferPixels(const WebPDecBuffer* const src,
WebPDecBuffer* const dst);
// Returns true if decoding will be slow with the current configuration
// and bitstream features.
int WebPAvoidSlowMemory(const WebPDecBuffer* const output,
const WebPBitstreamFeatures* const features);
//------------------------------------------------------------------------------
#ifdef __cplusplus

View File

@ -1,7 +1,7 @@
lib_LTLIBRARIES = libwebpdemux.la
libwebpdemux_la_SOURCES =
libwebpdemux_la_SOURCES += anim_decode.c demux.c
libwebpdemux_la_SOURCES += demux.c
libwebpdemuxinclude_HEADERS =
libwebpdemuxinclude_HEADERS += ../webp/demux.h
@ -9,6 +9,6 @@ libwebpdemuxinclude_HEADERS += ../webp/mux_types.h
libwebpdemuxinclude_HEADERS += ../webp/types.h
libwebpdemux_la_LIBADD = ../libwebp.la
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 2:2:0
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 1:2:0
libwebpdemuxincludedir = $(includedir)/webp
pkgconfig_DATA = libwebpdemux.pc

View File

@ -1,454 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// AnimDecoder implementation.
//
#ifdef HAVE_CONFIG_H
#include "../webp/config.h"
#endif
#include <assert.h>
#include <string.h>
#include "../utils/utils.h"
#include "../webp/decode.h"
#include "../webp/demux.h"
#define NUM_CHANNELS 4
typedef void (*BlendRowFunc)(uint32_t* const, const uint32_t* const, int);
static void BlendPixelRowNonPremult(uint32_t* const src,
const uint32_t* const dst, int num_pixels);
static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst,
int num_pixels);
struct WebPAnimDecoder {
WebPDemuxer* demux_; // Demuxer created from given WebP bitstream.
WebPDecoderConfig config_; // Decoder config.
// Note: we use a pointer to a function blending multiple pixels at a time to
// allow possible inlining of per-pixel blending function.
BlendRowFunc blend_func_; // Pointer to the chose blend row function.
WebPAnimInfo info_; // Global info about the animation.
uint8_t* curr_frame_; // Current canvas (not disposed).
uint8_t* prev_frame_disposed_; // Previous canvas (properly disposed).
int prev_frame_timestamp_; // Previous frame timestamp (milliseconds).
WebPIterator prev_iter_; // Iterator object for previous frame.
int prev_frame_was_keyframe_; // True if previous frame was a keyframe.
int next_frame_; // Index of the next frame to be decoded
// (starting from 1).
};
static void DefaultDecoderOptions(WebPAnimDecoderOptions* const dec_options) {
dec_options->color_mode = MODE_RGBA;
dec_options->use_threads = 0;
}
int WebPAnimDecoderOptionsInitInternal(WebPAnimDecoderOptions* dec_options,
int abi_version) {
if (dec_options == NULL ||
WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) {
return 0;
}
DefaultDecoderOptions(dec_options);
return 1;
}
static int ApplyDecoderOptions(const WebPAnimDecoderOptions* const dec_options,
WebPAnimDecoder* const dec) {
WEBP_CSP_MODE mode;
WebPDecoderConfig* config = &dec->config_;
assert(dec_options != NULL);
mode = dec_options->color_mode;
if (mode != MODE_RGBA && mode != MODE_BGRA &&
mode != MODE_rgbA && mode != MODE_bgrA) {
return 0;
}
dec->blend_func_ = (mode == MODE_RGBA || mode == MODE_BGRA)
? &BlendPixelRowNonPremult
: &BlendPixelRowPremult;
WebPInitDecoderConfig(config);
config->output.colorspace = mode;
config->output.is_external_memory = 1;
config->options.use_threads = dec_options->use_threads;
// Note: config->output.u.RGBA is set at the time of decoding each frame.
return 1;
}
WebPAnimDecoder* WebPAnimDecoderNewInternal(
const WebPData* webp_data, const WebPAnimDecoderOptions* dec_options,
int abi_version) {
WebPAnimDecoderOptions options;
WebPAnimDecoder* dec = NULL;
if (webp_data == NULL ||
WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) {
return NULL;
}
// Note: calloc() so that the pointer members are initialized to NULL.
dec = (WebPAnimDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec));
if (dec == NULL) goto Error;
if (dec_options != NULL) {
options = *dec_options;
} else {
DefaultDecoderOptions(&options);
}
if (!ApplyDecoderOptions(&options, dec)) goto Error;
dec->demux_ = WebPDemux(webp_data);
if (dec->demux_ == NULL) goto Error;
dec->info_.canvas_width = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_WIDTH);
dec->info_.canvas_height = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_HEIGHT);
dec->info_.loop_count = WebPDemuxGetI(dec->demux_, WEBP_FF_LOOP_COUNT);
dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR);
dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT);
// Note: calloc() because we fill frame with zeroes as well.
dec->curr_frame_ = (uint8_t*)WebPSafeCalloc(
dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
if (dec->curr_frame_ == NULL) goto Error;
dec->prev_frame_disposed_ = (uint8_t*)WebPSafeCalloc(
dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
if (dec->prev_frame_disposed_ == NULL) goto Error;
WebPAnimDecoderReset(dec);
return dec;
Error:
WebPAnimDecoderDelete(dec);
return NULL;
}
int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) {
if (dec == NULL || info == NULL) return 0;
*info = dec->info_;
return 1;
}
// Returns true if the frame covers the full canvas.
static int IsFullFrame(int width, int height, int canvas_width,
int canvas_height) {
return (width == canvas_width && height == canvas_height);
}
// Clear the canvas to transparent.
static int ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width,
uint32_t canvas_height) {
const uint64_t size =
(uint64_t)canvas_width * canvas_height * NUM_CHANNELS * sizeof(*buf);
if (size != (size_t)size) return 0;
memset(buf, 0, (size_t)size);
return 1;
}
// Clear given frame rectangle to transparent.
static void ZeroFillFrameRect(uint8_t* buf, int buf_stride, int x_offset,
int y_offset, int width, int height) {
int j;
assert(width * NUM_CHANNELS <= buf_stride);
buf += y_offset * buf_stride + x_offset * NUM_CHANNELS;
for (j = 0; j < height; ++j) {
memset(buf, 0, width * NUM_CHANNELS);
buf += buf_stride;
}
}
// Copy width * height pixels from 'src' to 'dst'.
static int CopyCanvas(const uint8_t* src, uint8_t* dst,
uint32_t width, uint32_t height) {
const uint64_t size = (uint64_t)width * height * NUM_CHANNELS;
if (size != (size_t)size) return 0;
assert(src != NULL && dst != NULL);
memcpy(dst, src, (size_t)size);
return 1;
}
// Returns true if the current frame is a key-frame.
static int IsKeyFrame(const WebPIterator* const curr,
const WebPIterator* const prev,
int prev_frame_was_key_frame,
int canvas_width, int canvas_height) {
if (curr->frame_num == 1) {
return 1;
} else if ((!curr->has_alpha || curr->blend_method == WEBP_MUX_NO_BLEND) &&
IsFullFrame(curr->width, curr->height,
canvas_width, canvas_height)) {
return 1;
} else {
return (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) &&
(IsFullFrame(prev->width, prev->height, canvas_width,
canvas_height) ||
prev_frame_was_key_frame);
}
}
// Blend a single channel of 'src' over 'dst', given their alpha channel values.
// 'src' and 'dst' are assumed to be NOT pre-multiplied by alpha.
static uint8_t BlendChannelNonPremult(uint32_t src, uint8_t src_a,
uint32_t dst, uint8_t dst_a,
uint32_t scale, int shift) {
const uint8_t src_channel = (src >> shift) & 0xff;
const uint8_t dst_channel = (dst >> shift) & 0xff;
const uint32_t blend_unscaled = src_channel * src_a + dst_channel * dst_a;
assert(blend_unscaled < (1ULL << 32) / scale);
return (blend_unscaled * scale) >> 24;
}
// Blend 'src' over 'dst' assuming they are NOT pre-multiplied by alpha.
static uint32_t BlendPixelNonPremult(uint32_t src, uint32_t dst) {
const uint8_t src_a = (src >> 24) & 0xff;
if (src_a == 0) {
return dst;
} else {
const uint8_t dst_a = (dst >> 24) & 0xff;
// This is the approximate integer arithmetic for the actual formula:
// dst_factor_a = (dst_a * (255 - src_a)) / 255.
const uint8_t dst_factor_a = (dst_a * (256 - src_a)) >> 8;
const uint8_t blend_a = src_a + dst_factor_a;
const uint32_t scale = (1UL << 24) / blend_a;
const uint8_t blend_r =
BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 0);
const uint8_t blend_g =
BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 8);
const uint8_t blend_b =
BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 16);
assert(src_a + dst_factor_a < 256);
return (blend_r << 0) |
(blend_g << 8) |
(blend_b << 16) |
((uint32_t)blend_a << 24);
}
}
// Blend 'num_pixels' in 'src' over 'dst' assuming they are NOT pre-multiplied
// by alpha.
static void BlendPixelRowNonPremult(uint32_t* const src,
const uint32_t* const dst, int num_pixels) {
int i;
for (i = 0; i < num_pixels; ++i) {
const uint8_t src_alpha = (src[i] >> 24) & 0xff;
if (src_alpha != 0xff) {
src[i] = BlendPixelNonPremult(src[i], dst[i]);
}
}
}
// Individually multiply each channel in 'pix' by 'scale'.
static WEBP_INLINE uint32_t ChannelwiseMultiply(uint32_t pix, uint32_t scale) {
uint32_t mask = 0x00FF00FF;
uint32_t rb = ((pix & mask) * scale) >> 8;
uint32_t ag = ((pix >> 8) & mask) * scale;
return (rb & mask) | (ag & ~mask);
}
// Blend 'src' over 'dst' assuming they are pre-multiplied by alpha.
static uint32_t BlendPixelPremult(uint32_t src, uint32_t dst) {
const uint8_t src_a = (src >> 24) & 0xff;
return src + ChannelwiseMultiply(dst, 256 - src_a);
}
// Blend 'num_pixels' in 'src' over 'dst' assuming they are pre-multiplied by
// alpha.
static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst,
int num_pixels) {
int i;
for (i = 0; i < num_pixels; ++i) {
const uint8_t src_alpha = (src[i] >> 24) & 0xff;
if (src_alpha != 0xff) {
src[i] = BlendPixelPremult(src[i], dst[i]);
}
}
}
// Returns two ranges (<left, width> pairs) at row 'canvas_y', that belong to
// 'src' but not 'dst'. A point range is empty if the corresponding width is 0.
static void FindBlendRangeAtRow(const WebPIterator* const src,
const WebPIterator* const dst, int canvas_y,
int* const left1, int* const width1,
int* const left2, int* const width2) {
const int src_max_x = src->x_offset + src->width;
const int dst_max_x = dst->x_offset + dst->width;
const int dst_max_y = dst->y_offset + dst->height;
assert(canvas_y >= src->y_offset && canvas_y < (src->y_offset + src->height));
*left1 = -1;
*width1 = 0;
*left2 = -1;
*width2 = 0;
if (canvas_y < dst->y_offset || canvas_y >= dst_max_y ||
src->x_offset >= dst_max_x || src_max_x <= dst->x_offset) {
*left1 = src->x_offset;
*width1 = src->width;
return;
}
if (src->x_offset < dst->x_offset) {
*left1 = src->x_offset;
*width1 = dst->x_offset - src->x_offset;
}
if (src_max_x > dst_max_x) {
*left2 = dst_max_x;
*width2 = src_max_x - dst_max_x;
}
}
int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
uint8_t** buf_ptr, int* timestamp_ptr) {
WebPIterator iter;
uint32_t width;
uint32_t height;
int is_key_frame;
int timestamp;
BlendRowFunc blend_row;
if (dec == NULL || buf_ptr == NULL || timestamp_ptr == NULL) return 0;
if (!WebPAnimDecoderHasMoreFrames(dec)) return 0;
width = dec->info_.canvas_width;
height = dec->info_.canvas_height;
blend_row = dec->blend_func_;
// Get compressed frame.
if (!WebPDemuxGetFrame(dec->demux_, dec->next_frame_, &iter)) {
return 0;
}
timestamp = dec->prev_frame_timestamp_ + iter.duration;
// Initialize.
is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_,
dec->prev_frame_was_keyframe_, width, height);
if (is_key_frame) {
if (!ZeroFillCanvas(dec->curr_frame_, width, height)) {
goto Error;
}
} else {
if (!CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_,
width, height)) {
goto Error;
}
}
// Decode.
{
const uint8_t* in = iter.fragment.bytes;
const size_t in_size = iter.fragment.size;
const size_t out_offset =
(iter.y_offset * width + iter.x_offset) * NUM_CHANNELS;
WebPDecoderConfig* const config = &dec->config_;
WebPRGBABuffer* const buf = &config->output.u.RGBA;
buf->stride = NUM_CHANNELS * width;
buf->size = buf->stride * iter.height;
buf->rgba = dec->curr_frame_ + out_offset;
if (WebPDecode(in, in_size, config) != VP8_STATUS_OK) {
goto Error;
}
}
// During the decoding of current frame, we may have set some pixels to be
// transparent (i.e. alpha < 255). However, the value of each of these
// pixels should have been determined by blending it against the value of
// that pixel in the previous frame if blending method of is WEBP_MUX_BLEND.
if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND &&
!is_key_frame) {
if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_NONE) {
int y;
// Blend transparent pixels with pixels in previous canvas.
for (y = 0; y < iter.height; ++y) {
const size_t offset =
(iter.y_offset + y) * width + iter.x_offset;
blend_row((uint32_t*)dec->curr_frame_ + offset,
(uint32_t*)dec->prev_frame_disposed_ + offset, iter.width);
}
} else {
int y;
assert(dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND);
// We need to blend a transparent pixel with its value just after
// initialization. That is, blend it with:
// * Fully transparent pixel if it belongs to prevRect <-- No-op.
// * The pixel in the previous canvas otherwise <-- Need alpha-blending.
for (y = 0; y < iter.height; ++y) {
const int canvas_y = iter.y_offset + y;
int left1, width1, left2, width2;
FindBlendRangeAtRow(&iter, &dec->prev_iter_, canvas_y, &left1, &width1,
&left2, &width2);
if (width1 > 0) {
const size_t offset1 = canvas_y * width + left1;
blend_row((uint32_t*)dec->curr_frame_ + offset1,
(uint32_t*)dec->prev_frame_disposed_ + offset1, width1);
}
if (width2 > 0) {
const size_t offset2 = canvas_y * width + left2;
blend_row((uint32_t*)dec->curr_frame_ + offset2,
(uint32_t*)dec->prev_frame_disposed_ + offset2, width2);
}
}
}
}
// Update info of the previous frame and dispose it for the next iteration.
dec->prev_frame_timestamp_ = timestamp;
WebPDemuxReleaseIterator(&dec->prev_iter_);
dec->prev_iter_ = iter;
dec->prev_frame_was_keyframe_ = is_key_frame;
CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height);
if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
ZeroFillFrameRect(dec->prev_frame_disposed_, width * NUM_CHANNELS,
dec->prev_iter_.x_offset, dec->prev_iter_.y_offset,
dec->prev_iter_.width, dec->prev_iter_.height);
}
++dec->next_frame_;
// All OK, fill in the values.
*buf_ptr = dec->curr_frame_;
*timestamp_ptr = timestamp;
return 1;
Error:
WebPDemuxReleaseIterator(&iter);
return 0;
}
int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) {
if (dec == NULL) return 0;
return (dec->next_frame_ <= (int)dec->info_.frame_count);
}
void WebPAnimDecoderReset(WebPAnimDecoder* dec) {
if (dec != NULL) {
dec->prev_frame_timestamp_ = 0;
WebPDemuxReleaseIterator(&dec->prev_iter_);
memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_));
dec->prev_frame_was_keyframe_ = 0;
dec->next_frame_ = 1;
}
}
const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) {
if (dec == NULL) return NULL;
return dec->demux_;
}
void WebPAnimDecoderDelete(WebPAnimDecoder* dec) {
if (dec != NULL) {
WebPDemuxReleaseIterator(&dec->prev_iter_);
WebPDemuxDelete(dec->demux_);
WebPSafeFree(dec->curr_frame_);
WebPSafeFree(dec->prev_frame_disposed_);
WebPSafeFree(dec);
}
}

View File

@ -24,7 +24,7 @@
#include "../webp/format_constants.h"
#define DMUX_MAJ_VERSION 0
#define DMUX_MIN_VERSION 3
#define DMUX_MIN_VERSION 2
#define DMUX_REV_VERSION 2
typedef struct {
@ -47,7 +47,8 @@ typedef struct Frame {
int duration_;
WebPMuxAnimDispose dispose_method_;
WebPMuxAnimBlend blend_method_;
int frame_num_;
int is_fragment_; // this is a frame fragment (and not a full frame).
int frame_num_; // the referent frame number for use in assembling fragments.
int complete_; // img_components_ contains a full image.
ChunkData img_components_[2]; // 0=VP8{,L} 1=ALPH
struct Frame* next_;
@ -192,19 +193,6 @@ static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) {
return 1;
}
static void SetFrameInfo(size_t start_offset, size_t size,
int frame_num, int complete,
const WebPBitstreamFeatures* const features,
Frame* const frame) {
frame->img_components_[0].offset_ = start_offset;
frame->img_components_[0].size_ = size;
frame->width_ = features->width;
frame->height_ = features->height;
frame->has_alpha_ |= features->has_alpha;
frame->frame_num_ = frame_num;
frame->complete_ = complete;
}
// Store image bearing chunks to 'frame'.
static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
MemBuffer* const mem, Frame* const frame) {
@ -260,8 +248,13 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
return PARSE_ERROR;
}
++image_chunks;
SetFrameInfo(chunk_start_offset, chunk_size, frame_num,
status == PARSE_OK, &features, frame);
frame->img_components_[0].offset_ = chunk_start_offset;
frame->img_components_[0].size_ = chunk_size;
frame->width_ = features.width;
frame->height_ = features.height;
frame->has_alpha_ |= features.has_alpha;
frame->frame_num_ = frame_num;
frame->complete_ = (status == PARSE_OK);
Skip(mem, payload_available);
} else {
goto Done;
@ -344,6 +337,42 @@ static ParseStatus ParseAnimationFrame(
return status;
}
#ifdef WEBP_EXPERIMENTAL_FEATURES
// Parse a 'FRGM' chunk and any image bearing chunks that immediately follow.
// 'fragment_chunk_size' is the previously validated, padded chunk size.
static ParseStatus ParseFragment(WebPDemuxer* const dmux,
uint32_t fragment_chunk_size) {
const int frame_num = 1; // All fragments belong to the 1st (and only) frame.
const int is_fragmented = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
const uint32_t frgm_payload_size = fragment_chunk_size - FRGM_CHUNK_SIZE;
int added_fragment = 0;
MemBuffer* const mem = &dmux->mem_;
Frame* frame;
ParseStatus status =
NewFrame(mem, FRGM_CHUNK_SIZE, fragment_chunk_size, &frame);
if (status != PARSE_OK) return status;
frame->is_fragment_ = 1;
frame->x_offset_ = 2 * ReadLE24s(mem);
frame->y_offset_ = 2 * ReadLE24s(mem);
// Store a fragment only if the 'fragments' flag is set and there is some
// data available.
status = StoreFrame(frame_num, frgm_payload_size, mem, frame);
if (status != PARSE_ERROR && is_fragmented && frame->frame_num_ > 0) {
added_fragment = AddFrame(dmux, frame);
if (!added_fragment) {
status = PARSE_ERROR;
} else {
dmux->num_frames_ = 1;
}
}
if (!added_fragment) WebPSafeFree(frame);
return status;
}
#endif // WEBP_EXPERIMENTAL_FEATURES
// General chunk storage, starting with the header at 'start_offset', allowing
// the user to request the payload via a fourcc string. 'size' includes the
// header and the unpadded payload size.
@ -484,6 +513,12 @@ static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
status = ParseAnimationFrame(dmux, chunk_size_padded);
break;
}
#ifdef WEBP_EXPERIMENTAL_FEATURES
case MKFOURCC('F', 'R', 'G', 'M'): {
status = ParseFragment(dmux, chunk_size_padded);
break;
}
#endif
case MKFOURCC('I', 'C', 'C', 'P'): {
store_chunk = !!(dmux->feature_flags_ & ICCP_FLAG);
goto Skip;
@ -571,6 +606,8 @@ static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
// If 'exact' is true, check that the image resolution matches the canvas.
// If 'exact' is false, check that the x/y offsets do not exceed the canvas.
// TODO(jzern): this is insufficient in the fragmented image case if the
// expectation is that the fragments completely cover the canvas.
static int CheckFrameBounds(const Frame* const frame, int exact,
int canvas_width, int canvas_height) {
if (exact) {
@ -590,6 +627,7 @@ static int CheckFrameBounds(const Frame* const frame, int exact,
static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const int is_fragmented = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
const Frame* f = dmux->frames_;
if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
@ -597,17 +635,22 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
if (dmux->loop_count_ < 0) return 0;
if (dmux->state_ == WEBP_DEMUX_DONE && dmux->frames_ == NULL) return 0;
if (dmux->feature_flags_ & ~ALL_VALID_FLAGS) return 0; // invalid bitstream
#ifndef WEBP_EXPERIMENTAL_FEATURES
if (is_fragmented) return 0;
#endif
while (f != NULL) {
const int cur_frame_set = f->frame_num_;
int frame_count = 0;
int frame_count = 0, fragment_count = 0;
// Check frame properties.
// Check frame properties and if the image is composed of fragments that
// each fragment came from a fragment.
for (; f != NULL && f->frame_num_ == cur_frame_set; f = f->next_) {
const ChunkData* const image = f->img_components_;
const ChunkData* const alpha = f->img_components_ + 1;
if (is_fragmented && !f->is_fragment_) return 0;
if (!is_fragmented && f->is_fragment_) return 0;
if (!is_animation && f->frame_num_ > 1) return 0;
if (f->complete_) {
@ -632,13 +675,16 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
}
if (f->width_ > 0 && f->height_ > 0 &&
!CheckFrameBounds(f, !is_animation,
!CheckFrameBounds(f, !(is_animation || is_fragmented),
dmux->canvas_width_, dmux->canvas_height_)) {
return 0;
}
fragment_count += f->is_fragment_;
++frame_count;
}
if (!is_fragmented && frame_count > 1) return 0;
if (fragment_count > 0 && frame_count != fragment_count) return 0;
}
return 1;
}
@ -657,41 +703,6 @@ static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) {
dmux->mem_ = *mem;
}
static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem,
WebPDemuxer** demuxer) {
WebPBitstreamFeatures features;
const VP8StatusCode status =
WebPGetFeatures(mem->buf_, mem->buf_size_, &features);
*demuxer = NULL;
if (status != VP8_STATUS_OK) {
return (status == VP8_STATUS_NOT_ENOUGH_DATA) ? PARSE_NEED_MORE_DATA
: PARSE_ERROR;
}
{
WebPDemuxer* const dmux = (WebPDemuxer*)WebPSafeCalloc(1ULL, sizeof(*dmux));
Frame* const frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(*frame));
if (dmux == NULL || frame == NULL) goto Error;
InitDemux(dmux, mem);
SetFrameInfo(0, mem->buf_size_, 1 /*frame_num*/, 1 /*complete*/, &features,
frame);
if (!AddFrame(dmux, frame)) goto Error;
dmux->state_ = WEBP_DEMUX_DONE;
dmux->canvas_width_ = frame->width_;
dmux->canvas_height_ = frame->height_;
dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0;
dmux->num_frames_ = 1;
assert(IsValidSimpleFormat(dmux));
*demuxer = dmux;
return PARSE_OK;
Error:
WebPSafeFree(dmux);
WebPSafeFree(frame);
return PARSE_ERROR;
}
}
WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
WebPDemuxState* state, int version) {
const ChunkParser* parser;
@ -708,15 +719,6 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
if (!InitMemBuffer(&mem, data->bytes, data->size)) return NULL;
status = ReadHeader(&mem);
if (status != PARSE_OK) {
// If parsing of the webp file header fails attempt to handle a raw
// VP8/VP8L frame. Note 'allow_partial' is ignored in this case.
if (status == PARSE_ERROR) {
status = CreateRawImageDemuxer(&mem, &dmux);
if (status == PARSE_OK) {
if (state != NULL) *state = WEBP_DEMUX_DONE;
return dmux;
}
}
if (state != NULL) {
*state = (status == PARSE_NEED_MORE_DATA) ? WEBP_DEMUX_PARSING_HEADER
: WEBP_DEMUX_PARSE_ERROR;
@ -788,6 +790,8 @@ uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) {
// -----------------------------------------------------------------------------
// Frame iteration
// Find the first 'frame_num' frame. There may be multiple such frames in a
// fragmented frame.
static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) {
const Frame* f;
for (f = dmux->frames_; f != NULL; f = f->next_) {
@ -796,6 +800,21 @@ static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) {
return f;
}
// Returns fragment 'fragment_num' and the total count.
static const Frame* GetFragment(
const Frame* const frame_set, int fragment_num, int* const count) {
const int this_frame = frame_set->frame_num_;
const Frame* f = frame_set;
const Frame* fragment = NULL;
int total;
for (total = 0; f != NULL && f->frame_num_ == this_frame; f = f->next_) {
if (++total == fragment_num) fragment = f;
}
*count = total;
return fragment;
}
static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
const Frame* const frame,
size_t* const data_size) {
@ -822,27 +841,34 @@ static const uint8_t* GetFramePayload(const uint8_t* const mem_buf,
// Create a whole 'frame' from VP8 (+ alpha) or lossless.
static int SynthesizeFrame(const WebPDemuxer* const dmux,
const Frame* const frame,
WebPIterator* const iter) {
const Frame* const first_frame,
int fragment_num, WebPIterator* const iter) {
const uint8_t* const mem_buf = dmux->mem_.buf_;
int num_fragments;
size_t payload_size = 0;
const uint8_t* const payload = GetFramePayload(mem_buf, frame, &payload_size);
const Frame* const fragment =
GetFragment(first_frame, fragment_num, &num_fragments);
const uint8_t* const payload =
GetFramePayload(mem_buf, fragment, &payload_size);
if (payload == NULL) return 0;
assert(frame != NULL);
assert(first_frame != NULL);
iter->frame_num = frame->frame_num_;
iter->frame_num = first_frame->frame_num_;
iter->num_frames = dmux->num_frames_;
iter->x_offset = frame->x_offset_;
iter->y_offset = frame->y_offset_;
iter->width = frame->width_;
iter->height = frame->height_;
iter->has_alpha = frame->has_alpha_;
iter->duration = frame->duration_;
iter->dispose_method = frame->dispose_method_;
iter->blend_method = frame->blend_method_;
iter->complete = frame->complete_;
iter->fragment_num = fragment_num;
iter->num_fragments = num_fragments;
iter->x_offset = fragment->x_offset_;
iter->y_offset = fragment->y_offset_;
iter->width = fragment->width_;
iter->height = fragment->height_;
iter->has_alpha = fragment->has_alpha_;
iter->duration = fragment->duration_;
iter->dispose_method = fragment->dispose_method_;
iter->blend_method = fragment->blend_method_;
iter->complete = fragment->complete_;
iter->fragment.bytes = payload;
iter->fragment.size = payload_size;
// TODO(jzern): adjust offsets for 'FRGM's embedded in 'ANMF's
return 1;
}
@ -856,7 +882,7 @@ static int SetFrame(int frame_num, WebPIterator* const iter) {
frame = GetFrame(dmux, frame_num);
if (frame == NULL) return 0;
return SynthesizeFrame(dmux, frame, iter);
return SynthesizeFrame(dmux, frame, 1, iter);
}
int WebPDemuxGetFrame(const WebPDemuxer* dmux, int frame, WebPIterator* iter) {
@ -878,6 +904,17 @@ int WebPDemuxPrevFrame(WebPIterator* iter) {
return SetFrame(iter->frame_num - 1, iter);
}
int WebPDemuxSelectFragment(WebPIterator* iter, int fragment_num) {
if (iter != NULL && iter->private_ != NULL && fragment_num > 0) {
const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_;
const Frame* const frame = GetFrame(dmux, iter->frame_num);
if (frame == NULL) return 0;
return SynthesizeFrame(dmux, frame, fragment_num, iter);
}
return 0;
}
void WebPDemuxReleaseIterator(WebPIterator* iter) {
(void)iter;
}

View File

@ -1,41 +0,0 @@
#define APSTUDIO_READONLY_SYMBOLS
#include "winres.h"
#undef APSTUDIO_READONLY_SYMBOLS
#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
VS_VERSION_INFO VERSIONINFO
FILEVERSION 0,3,0,2
PRODUCTVERSION 0,3,0,2
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x40004L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0"
BEGIN
VALUE "CompanyName", "Google, Inc."
VALUE "FileDescription", "libwebpdemux DLL"
VALUE "FileVersion", "0.3.2"
VALUE "InternalName", "libwebpdemux.dll"
VALUE "LegalCopyright", "Copyright (C) 2017"
VALUE "OriginalFilename", "libwebpdemux.dll"
VALUE "ProductName", "WebP Image Demuxer"
VALUE "ProductVersion", "0.3.2"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END
#endif // English (United States) resources

View File

@ -1,8 +1,5 @@
noinst_LTLIBRARIES = libwebpdsp.la libwebpdsp_avx2.la
noinst_LTLIBRARIES += libwebpdsp_sse2.la libwebpdspdecode_sse2.la
noinst_LTLIBRARIES += libwebpdsp_sse41.la libwebpdspdecode_sse41.la
noinst_LTLIBRARIES += libwebpdsp_neon.la libwebpdspdecode_neon.la
noinst_LTLIBRARIES += libwebpdsp_msa.la libwebpdspdecode_msa.la
if BUILD_LIBWEBPDECODER
noinst_LTLIBRARIES += libwebpdspdecode.la
@ -13,141 +10,63 @@ commondir = $(includedir)/webp
COMMON_SOURCES =
COMMON_SOURCES += alpha_processing.c
COMMON_SOURCES += alpha_processing_mips_dsp_r2.c
COMMON_SOURCES += common_sse2.h
COMMON_SOURCES += cpu.c
COMMON_SOURCES += dec.c
COMMON_SOURCES += dec_clip_tables.c
COMMON_SOURCES += dec_mips32.c
COMMON_SOURCES += dec_mips_dsp_r2.c
COMMON_SOURCES += dec_neon.c
COMMON_SOURCES += dsp.h
COMMON_SOURCES += filters.c
COMMON_SOURCES += filters_mips_dsp_r2.c
COMMON_SOURCES += lossless.c
COMMON_SOURCES += lossless.h
COMMON_SOURCES += lossless_common.h
COMMON_SOURCES += lossless_mips_dsp_r2.c
COMMON_SOURCES += mips_macro.h
COMMON_SOURCES += rescaler.c
COMMON_SOURCES += rescaler_mips32.c
COMMON_SOURCES += rescaler_mips_dsp_r2.c
COMMON_SOURCES += lossless_mips32.c
COMMON_SOURCES += lossless_neon.c
COMMON_SOURCES += neon.h
COMMON_SOURCES += upsampling.c
COMMON_SOURCES += upsampling_mips_dsp_r2.c
COMMON_SOURCES += upsampling_neon.c
COMMON_SOURCES += yuv.c
COMMON_SOURCES += yuv.h
COMMON_SOURCES += yuv_mips32.c
COMMON_SOURCES += yuv_mips_dsp_r2.c
ENC_SOURCES =
ENC_SOURCES += argb.c
ENC_SOURCES += argb_mips_dsp_r2.c
ENC_SOURCES += cost.c
ENC_SOURCES += cost_mips32.c
ENC_SOURCES += cost_mips_dsp_r2.c
ENC_SOURCES += enc.c
ENC_SOURCES += enc_mips32.c
ENC_SOURCES += enc_mips_dsp_r2.c
ENC_SOURCES += lossless_enc.c
ENC_SOURCES += lossless_enc_mips32.c
ENC_SOURCES += lossless_enc_mips_dsp_r2.c
ENC_SOURCES += enc_neon.c
libwebpdsp_avx2_la_SOURCES =
libwebpdsp_avx2_la_SOURCES += enc_avx2.c
libwebpdsp_avx2_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_avx2_la_CFLAGS = $(AM_CFLAGS) $(AVX2_FLAGS)
libwebpdspdecode_sse41_la_SOURCES =
libwebpdspdecode_sse41_la_SOURCES += alpha_processing_sse41.c
libwebpdspdecode_sse41_la_SOURCES += dec_sse41.c
libwebpdspdecode_sse41_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdspdecode_sse41_la_CFLAGS = $(AM_CFLAGS) $(SSE41_FLAGS)
libwebpdspdecode_sse2_la_SOURCES =
libwebpdspdecode_sse2_la_SOURCES += alpha_processing_sse2.c
libwebpdspdecode_sse2_la_SOURCES += dec_sse2.c
libwebpdspdecode_sse2_la_SOURCES += filters_sse2.c
libwebpdspdecode_sse2_la_SOURCES += lossless_sse2.c
libwebpdspdecode_sse2_la_SOURCES += rescaler_sse2.c
libwebpdspdecode_sse2_la_SOURCES += upsampling_sse2.c
libwebpdspdecode_sse2_la_SOURCES += yuv_sse2.c
libwebpdspdecode_sse2_la_SOURCES += yuv_tables_sse2.h
libwebpdspdecode_sse2_la_CPPFLAGS = $(libwebpdsp_sse2_la_CPPFLAGS)
libwebpdspdecode_sse2_la_CFLAGS = $(libwebpdsp_sse2_la_CFLAGS)
libwebpdspdecode_neon_la_SOURCES =
libwebpdspdecode_neon_la_SOURCES += alpha_processing_neon.c
libwebpdspdecode_neon_la_SOURCES += dec_neon.c
libwebpdspdecode_neon_la_SOURCES += filters_neon.c
libwebpdspdecode_neon_la_SOURCES += lossless_neon.c
libwebpdspdecode_neon_la_SOURCES += neon.h
libwebpdspdecode_neon_la_SOURCES += rescaler_neon.c
libwebpdspdecode_neon_la_SOURCES += upsampling_neon.c
libwebpdspdecode_neon_la_CPPFLAGS = $(libwebpdsp_neon_la_CPPFLAGS)
libwebpdspdecode_neon_la_CFLAGS = $(libwebpdsp_neon_la_CFLAGS)
libwebpdspdecode_msa_la_SOURCES =
libwebpdspdecode_msa_la_SOURCES += dec_msa.c
libwebpdspdecode_msa_la_SOURCES += filters_msa.c
libwebpdspdecode_msa_la_SOURCES += lossless_msa.c
libwebpdspdecode_msa_la_SOURCES += msa_macro.h
libwebpdspdecode_msa_la_SOURCES += rescaler_msa.c
libwebpdspdecode_msa_la_SOURCES += upsampling_msa.c
libwebpdspdecode_msa_la_CPPFLAGS = $(libwebpdsp_msa_la_CPPFLAGS)
libwebpdspdecode_msa_la_CFLAGS = $(libwebpdsp_msa_la_CFLAGS)
libwebpdsp_sse2_la_SOURCES =
libwebpdsp_sse2_la_SOURCES += argb_sse2.c
libwebpdsp_sse2_la_SOURCES += cost_sse2.c
libwebpdsp_sse2_la_SOURCES += enc_sse2.c
libwebpdsp_sse2_la_SOURCES += lossless_enc_sse2.c
libwebpdsp_sse2_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_sse2_la_CFLAGS = $(AM_CFLAGS) $(SSE2_FLAGS)
libwebpdsp_sse2_la_LIBADD = libwebpdspdecode_sse2.la
libwebpdsp_sse41_la_SOURCES =
libwebpdsp_sse41_la_SOURCES += enc_sse41.c
libwebpdsp_sse41_la_SOURCES += lossless_enc_sse41.c
libwebpdsp_sse41_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_sse41_la_CFLAGS = $(AM_CFLAGS) $(SSE41_FLAGS)
libwebpdsp_sse41_la_LIBADD = libwebpdspdecode_sse41.la
libwebpdsp_neon_la_SOURCES =
libwebpdsp_neon_la_SOURCES += enc_neon.c
libwebpdsp_neon_la_SOURCES += lossless_enc_neon.c
libwebpdsp_neon_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_neon_la_CFLAGS = $(AM_CFLAGS) $(NEON_FLAGS)
libwebpdsp_neon_la_LIBADD = libwebpdspdecode_neon.la
libwebpdsp_msa_la_SOURCES =
libwebpdsp_msa_la_SOURCES += enc_msa.c
libwebpdsp_msa_la_SOURCES += lossless_enc_msa.c
libwebpdsp_msa_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdsp_msa_la_CFLAGS = $(AM_CFLAGS)
libwebpdsp_msa_la_LIBADD = libwebpdspdecode_msa.la
libwebpdsp_la_SOURCES = $(COMMON_SOURCES) $(ENC_SOURCES)
noinst_HEADERS =
noinst_HEADERS += ../dec/vp8_dec.h
noinst_HEADERS += ../dec/decode_vp8.h
noinst_HEADERS += ../webp/decode.h
libwebpdsp_la_CPPFLAGS =
libwebpdsp_la_CPPFLAGS += $(AM_CPPFLAGS)
libwebpdsp_la_CPPFLAGS += $(USE_EXPERIMENTAL_CODE) $(USE_SWAP_16BIT_CSP)
libwebpdsp_la_CPPFLAGS = $(USE_EXPERIMENTAL_CODE) $(USE_SWAP_16BIT_CSP)
libwebpdsp_la_LDFLAGS = -lm
libwebpdsp_la_LIBADD =
libwebpdsp_la_LIBADD += libwebpdsp_avx2.la libwebpdsp_sse2.la
libwebpdsp_la_LIBADD += libwebpdsp_sse41.la
libwebpdsp_la_LIBADD += libwebpdsp_neon.la
libwebpdsp_la_LIBADD += libwebpdsp_msa.la
libwebpdsp_la_LIBADD = libwebpdsp_avx2.la libwebpdsp_sse2.la
if BUILD_LIBWEBPDECODER
libwebpdspdecode_la_SOURCES = $(COMMON_SOURCES)
libwebpdspdecode_la_CPPFLAGS = $(libwebpdsp_la_CPPFLAGS)
libwebpdspdecode_la_LDFLAGS = $(libwebpdsp_la_LDFLAGS)
libwebpdspdecode_la_LIBADD =
libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse2.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_sse41.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_neon.la
libwebpdspdecode_la_LIBADD += libwebpdspdecode_msa.la
libwebpdspdecode_la_LIBADD = libwebpdspdecode_sse2.la
endif

View File

@ -134,7 +134,7 @@ static WEBP_INLINE uint32_t GetScale(uint32_t a, int inverse) {
#endif // USE_TABLES_FOR_ALPHA_MULT
void WebPMultARGBRowC(uint32_t* const ptr, int width, int inverse) {
static void MultARGBRow(uint32_t* const ptr, int width, int inverse) {
int x;
for (x = 0; x < width; ++x) {
const uint32_t argb = ptr[x];
@ -154,8 +154,8 @@ void WebPMultARGBRowC(uint32_t* const ptr, int width, int inverse) {
}
}
void WebPMultRowC(uint8_t* const ptr, const uint8_t* const alpha,
int width, int inverse) {
static void MultRow(uint8_t* const ptr, const uint8_t* const alpha,
int width, int inverse) {
int x;
for (x = 0; x < width; ++x) {
const uint32_t a = alpha[x];
@ -284,41 +284,9 @@ static void ApplyAlphaMultiply_16b(uint8_t* rgba4444,
#endif
}
static int DispatchAlpha_C(const uint8_t* alpha, int alpha_stride,
int width, int height,
uint8_t* dst, int dst_stride) {
uint32_t alpha_mask = 0xff;
int i, j;
for (j = 0; j < height; ++j) {
for (i = 0; i < width; ++i) {
const uint32_t alpha_value = alpha[i];
dst[4 * i] = alpha_value;
alpha_mask &= alpha_value;
}
alpha += alpha_stride;
dst += dst_stride;
}
return (alpha_mask != 0xff);
}
static void DispatchAlphaToGreen_C(const uint8_t* alpha, int alpha_stride,
int width, int height,
uint32_t* dst, int dst_stride) {
int i, j;
for (j = 0; j < height; ++j) {
for (i = 0; i < width; ++i) {
dst[i] = alpha[i] << 8; // leave A/R/B channels zero'd.
}
alpha += alpha_stride;
dst += dst_stride;
}
}
static int ExtractAlpha_C(const uint8_t* argb, int argb_stride,
int width, int height,
uint8_t* alpha, int alpha_stride) {
static int ExtractAlpha(const uint8_t* argb, int argb_stride,
int width, int height,
uint8_t* alpha, int alpha_stride) {
uint8_t alpha_mask = 0xff;
int i, j;
@ -334,62 +302,32 @@ static int ExtractAlpha_C(const uint8_t* argb, int argb_stride,
return (alpha_mask == 0xff);
}
static void ExtractGreen_C(const uint32_t* argb, uint8_t* alpha, int size) {
int i;
for (i = 0; i < size; ++i) alpha[i] = argb[i] >> 8;
}
void (*WebPApplyAlphaMultiply)(uint8_t*, int, int, int, int);
void (*WebPApplyAlphaMultiply4444)(uint8_t*, int, int, int);
int (*WebPDispatchAlpha)(const uint8_t*, int, int, int, uint8_t*, int);
void (*WebPDispatchAlphaToGreen)(const uint8_t*, int, int, int, uint32_t*, int);
int (*WebPExtractAlpha)(const uint8_t*, int, int, int, uint8_t*, int);
void (*WebPExtractGreen)(const uint32_t* argb, uint8_t* alpha, int size);
//------------------------------------------------------------------------------
// Init function
extern void WebPInitAlphaProcessingMIPSdspR2(void);
extern void WebPInitAlphaProcessingSSE2(void);
extern void WebPInitAlphaProcessingSSE41(void);
extern void WebPInitAlphaProcessingNEON(void);
static volatile VP8CPUInfo alpha_processing_last_cpuinfo_used =
(VP8CPUInfo)&alpha_processing_last_cpuinfo_used;
WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessing(void) {
void WebPInitAlphaProcessing(void) {
if (alpha_processing_last_cpuinfo_used == VP8GetCPUInfo) return;
WebPMultARGBRow = WebPMultARGBRowC;
WebPMultRow = WebPMultRowC;
WebPMultARGBRow = MultARGBRow;
WebPMultRow = MultRow;
WebPApplyAlphaMultiply = ApplyAlphaMultiply;
WebPApplyAlphaMultiply4444 = ApplyAlphaMultiply_16b;
WebPDispatchAlpha = DispatchAlpha_C;
WebPDispatchAlphaToGreen = DispatchAlphaToGreen_C;
WebPExtractAlpha = ExtractAlpha_C;
WebPExtractGreen = ExtractGreen_C;
WebPExtractAlpha = ExtractAlpha;
// If defined, use CPUInfo() to overwrite some pointers with faster versions.
if (VP8GetCPUInfo != NULL) {
#if defined(WEBP_USE_SSE2)
if (VP8GetCPUInfo(kSSE2)) {
WebPInitAlphaProcessingSSE2();
#if defined(WEBP_USE_SSE41)
if (VP8GetCPUInfo(kSSE4_1)) {
WebPInitAlphaProcessingSSE41();
}
#endif
}
#endif
#if defined(WEBP_USE_NEON)
if (VP8GetCPUInfo(kNEON)) {
WebPInitAlphaProcessingNEON();
}
#endif
#if defined(WEBP_USE_MIPS_DSP_R2)
if (VP8GetCPUInfo(kMIPSdspR2)) {
WebPInitAlphaProcessingMIPSdspR2();
}
#endif
}

Some files were not shown because too many files have changed in this diff Show More