Compare commits

..

449 Commits

Author SHA1 Message Date
68e7901da5 update ChangeLog
Change-Id: If51472e72adaec0a198a8b09becb8be192153ca8
2013-12-20 00:49:40 -08:00
256e4333e1 update NEWS description with new general features
Change-Id: I22be12d843e84f80965dc5f2cb16ba2475ce16ad
2013-12-20 00:36:47 -08:00
296253407c Merge "gif2webp: don't use C99 %zu" into 0.4.0 2013-12-20 00:06:33 -08:00
3b9f9dd07b gif2webp: don't use C99 %zu
this would require a PRIuS or similar macro for proper platform
compatibility (Visual Studio for instance would be variants of %lu)

Change-Id: I2b9fcb1639db024775fb47dbcf79a2240f3d98f2
2013-12-19 18:52:48 -08:00
b5b2e3c7f5 cwebp: fix metadata output w/lossy+alpha
Change-Id: Ic96da6523fc73a7b67c68ef8a4c9d12081f8a535
2013-12-19 10:17:08 -08:00
ad26df1a05 makefile.unix: clean up libgif2webp_util.a
Change-Id: Iee8650ce91f65f2db568926773d44441633459f3
2013-12-18 23:26:18 -08:00
c3b4557064 update Changelog
Change-Id: I60525ce8dedf0843578c1881f6e140098e01211c
2013-12-18 19:36:44 -08:00
ca84112120 Merge "bump version to 0.4.0" into 0.4.0 2013-12-18 19:33:24 -08:00
8c524db84c bump version to 0.4.0
libwebp{,decoder} - 0.4.0
libwebp libtool - 5.0.0
libwebpdecoder libtool - 1.0.0

mux/demux - 0.2.0
libtool - 1.0.0

Change-Id: Idbd067f95a6af2f0057d6a63ab43176fcdbb767d
2013-12-18 19:20:00 -08:00
eec2398cec update AUTHORS & .mailmap
Change-Id: I5419731cd7ac6a06e42ef12433286a97e8e13bb4
2013-12-18 19:09:11 -08:00
b9bbf6a1a0 update NEWS for 0.4.0
Change-Id: I90327d5169b1dba8245c427a03064d562aa26cc9
2013-12-18 19:05:46 -08:00
c72e08119a Merge "dec/webp.c: don't wait for data before reporting w/h" 2013-12-18 18:47:43 -08:00
5ad653145a dec/frame.c: fix formatting
since 26d842e NEON speed up

+ drop a duplicate and from a comment

Change-Id: I710f46f83b80161064910c7efc16788b88c089fe
2013-12-18 17:16:31 -08:00
f7fc4bc89b dec/webp.c: don't wait for data before reporting w/h
this partially reverts
f626fe2 Detect canvas and image size mismatch in decoder.

the original change would cause calls to e.g., WebPGetInfo to fail until
a portion of the image chunk was available. With lossy+alpha this meant
waiting for the entire ALPH chunk to be received.
this change restores the original behavior -- reporting the values from
VP8X if available -- while retaining some of the added canvas/image size
checks if the image data is available

Change-Id: I6295b00a2e2d0d4d8847371756af347e4a80bc0e
2013-12-18 17:09:04 -08:00
66a32af5e1 Merge "NEON speed up" 2013-12-18 14:17:19 -08:00
26d842eb8f NEON speed up
add TransformDC special case, and make the switch function inlined.
Recovers a few of the CPU lost during the addition of TransformAC3
(only on ARM)

Change-Id: I21c1f0c6a9cb9d1dfc1e307b4f473a2791273bd6
2013-12-18 22:32:58 +01:00
f307f98b21 Merge "webpmux: let -- stop parameter parsing" 2013-12-18 11:05:43 -08:00
fe051da772 Merge "README: add a section on gif2webp" 2013-12-18 11:02:55 -08:00
6fd2bd626f Merge "manpage pedantry" 2013-12-18 10:57:27 -08:00
4af19007a4 README: add a section on gif2webp
Change-Id: Ie701da16223df7482f7b017150242ba2d381d0b5
2013-12-18 10:49:49 -08:00
6f36ade926 manpage pedantry
from man-pages (7):
provides a comma-separated list of related man pages, ordered by section
number and then alphabetically by name, possibly followed by other
related pages or documents. Do not terminate this with a period.

Change-Id: I6763b3b3a89f3c2e9ed5c1a4e9d05fb9ce85dd40
2013-12-17 23:30:34 -08:00
f9016cb964 README: update dwebp options
Change-Id: I25300e374215578d091557380ce3dc599d58bd49
2013-12-17 21:51:41 -08:00
b4fa0a47ea webpmux: let -- stop parameter parsing
this enables webpmux to accept input files starting with '-' when using
-get/-set/-strip; -info & -frame expect an input file as one of their
parameters so no changes are necessary there.

Change-Id: I154eb6dc388258a7fb743ec76ba869bf9589be1b
2013-12-17 20:50:46 -08:00
a9a20acfa6 gif2webp: Add a multi-threaded encode option
Change-Id: If16a5f434e0ab2965a7ac9392633ecaf8c4aff46
2013-12-17 17:01:59 -08:00
495bef413d fix bug in TrellisQuantize
the *quantized* level should be clipped to 2047, not the
original coeff.
(similar problem was fixed in the regular quantize function
quite some time ago)

Change-Id: I2fd2f8d94561ff0204e60535321ab41a565e8f85
2013-12-17 11:08:01 -08:00
605a712701 simplify __cplusplus ifdef
drop c_plusplus which is from a quite ancient pre-standard compiler

Change-Id: I9e357b3292a6b52b14c2641ba11f4f872c04b7fb
2013-12-16 20:16:02 -08:00
33109f990a Merge "drop: ifdef __cplusplus checks from C files" 2013-12-16 20:13:09 -08:00
7f9de0b948 Merge changes I994a5587,I8467bb71,I13b50688,I1e2c9c7b
* changes:
  gif2webp: let -- stop parameter parsing
  vwebp: let -- stop parameter parsing
  cwebp: let -- stop parameter parsing
  dwebp: let -- stop parameter parsing
2013-12-16 16:53:27 -08:00
5459030be0 gif2webp: let -- stop parameter parsing
this enables gif2webp to accept input files starting with '-'

Change-Id: I994a5587e16e3af83ca45bfda8caf2dc39068b6c
2013-12-16 15:15:20 -08:00
a4b0aa06ee vwebp: let -- stop parameter parsing
this enables vwebp to accept input files starting with '-'

Change-Id: I8467bb719ba745b2aa14a5c5ce2ff392cd9601d5
2013-12-16 15:14:00 -08:00
98af68fe26 cwebp: let -- stop parameter parsing
this enables cwebp to accept input files starting with '-'

Change-Id: I13b506886c3df8a4e8fb350f3d365eaf7e072a4a
2013-12-16 15:13:54 -08:00
a33831e21a dwebp: let -- stop parameter parsing
this enables dwebp to accept input files starting with '-'

Change-Id: I1e2c9c7bc2963d77be039c99e72f744560f0b0a2
2013-12-16 15:13:32 -08:00
363012497b add some checks on error paths
malloc failure could lead to free'ing non-initialized pointers

Change-Id: I8156aac1fce9a47825cfd0d88eb2bd8c38a630d5
2013-12-16 13:33:45 -08:00
ce4c713904 Merge "autoconf: add --disable-wic" 2013-12-16 10:45:11 -08:00
5227d99146 drop: ifdef __cplusplus checks from C files
the prototypes are already marked in the headers

Change-Id: I172fe742200c939ca32a70a2299809b8baf9b094
2013-12-13 11:42:13 -08:00
f64535591c dwebp.1: fix typo
ICM4 -> IMC4

Change-Id: I065a1ac0dfd0b60efa782bc1a4df1adcb28b90e4
2013-12-12 20:38:23 -08:00
f91034f241 Merge "cwebp: print metadata stats when no output file is given" 2013-12-12 19:28:35 -08:00
d49345533f gif2webp: Backward compatibility for giflib version <= 4.1.3
Fixes https://code.google.com/p/webp/issues/detail?id=172

We use 'int' instead of 'GifWord', which was introduced in version
4.1.4.

Tested OK on 4.1.3
(http://sourceforge.net/projects/giflib/files/giflib-4.x/giflib%204.1.3/)
and 5.0.4

Change-Id: I77628b0f9175a91eb3a22e64fc55dd00633add1f
2013-12-11 08:46:38 -08:00
4c617d3286 gif2webp: Disable output of ICC profile by default
This is to discourage generation of animated WebP images with ICC
profile, as
the real-world use-case for such images is rare at best.

Output of ICC/XMP metadata is now controlled by the '-metadata' option.

Change-Id: I8e3e29878c32bf46cbc661f50661bac602603c43
2013-12-10 14:53:06 -08:00
73b731fb42 introduce a special quantization function for WHT
WHT is somewhat a special case: no sharpen[] bias, etc.
Will be useful in a later CL when precision of input is changed.

Change-Id: I851b06deb94abdfc1ef00acafb8aa731801b4299
2013-12-10 14:21:47 +01:00
41c0cc4b9a Make Forward WHT transform use 32bit fixed-point calculation
This is in preparation for a future change where input will
be 16bit instead of 12bit

No speed diff observed.

Note that the NEON implementation was using 32bit calc already.

Change-Id: If06935db5c56a77fc9cefcb2dec617483f5f62b4
2013-12-10 06:10:52 +01:00
a3359f5d2c Only compute quantization params once
(all quantization params #1..#15 are the same)

Change-Id: If04058bd89fe2677b5b118ee4e1bcce88f0e4bf5
2013-12-10 05:36:23 +01:00
7049043775 cwebp: print metadata stats when no output file is given
this is more consistent with the encode stats

Change-Id: I5793062b3b29f5cb891461d6faaa53b43c0a4482
2013-12-09 20:15:54 -08:00
d513bb62bc * fix off-by-one zthresh calculation
* remove the sharpening for non luma-AC coeffs
* adjust the bias a little bit to compensate for this

Using the multiply-by-reciprocal doesn't always give the same result
as the exact divide, given the QFIX fixed-point precision we use.
-> removed few now-unneeded SSE2 instructions (and checked for
bit-exactness using -noasm)

Change-Id: Ib68057cbdd69c4e589af56a01a8e7085db762c24
2013-12-09 13:56:04 +01:00
ad9dec0c24 Merge "cosmetics: dwebp: fix local function name format" 2013-12-07 13:38:54 -08:00
f737f0376a Merge "dwebp: remove a dead store" 2013-12-07 13:38:42 -08:00
3c3a70da6d Merge "makefile.unix: install binaries in $(DESTDIR)/bin/" 2013-12-07 13:38:28 -08:00
150b655f74 Merge "Android.mk: add some release compile flags" 2013-12-07 13:38:15 -08:00
dbebd33be7 cosmetics: dwebp: fix local function name format
error_function -> PNGErrorFunction

Change-Id: Ic6c0e676cc47bca2c1a1e0d8e95639e780982b1b
2013-12-07 13:26:05 -08:00
2774995185 dwebp: remove a dead store
Change-Id: I48205472e99e997702d25c39f1b4d7a4699c15d6
2013-12-07 13:22:22 -08:00
a01e04fee0 autoconf: add --disable-wic
this allows windows builds to prefer libjpeg, libpng etc. for import and
export

Change-Id: Ibe1648b68a3f7b5016044e82530843dea46a94a4
2013-12-07 11:56:34 -08:00
5009b22746 makefile.unix: install binaries in $(DESTDIR)/bin/
Change-Id: I6c8f87aaa185fc043b80800430d99deaa1e5966b
2013-12-06 20:40:12 -08:00
bab30fcab5 Merge "fix -print_psnr / ssim options" 2013-12-05 14:04:37 -08:00
ebef7fb307 fix -print_psnr / ssim options
original/compressed pictures were not converted to an adequate YUVA
colorspace before computing the distortion.

Change-Id: I37775e9b7dbd6eca16c38e235e1df325858d36a1
2013-12-05 14:03:59 -08:00
cb63785545 Merge "fix bug due to overzealous check in WebPPictureYUVAToARGB()" 2013-12-04 00:54:35 -08:00
8189885b6d Merge "EstimateBestFilter: use an int to iterate WEBP_FILTER_TYPE" 2013-12-03 23:32:41 -08:00
4ad7d33510 Android.mk: add some release compile flags
-ffunction-sections / -fdata-sections
can improve final binary size when used with --gc-sections, speed impact
untested

Change-Id: I37f4b5da2f34acede7965c2da2e1b97125473adc
2013-12-03 23:00:49 -08:00
c12e2369d8 cosmetics: fix a few typos
Change-Id: I73b1900b2d960c4c57ef7078df137c776b321a1b
2013-12-03 22:36:29 -08:00
6f104034a1 fix bug due to overzealous check in WebPPictureYUVAToARGB()
This tests prevented views to be converted to ARGB

https://code.google.com/p/webp/issues/detail?id=178

Change-Id: I5ba66da2791e6f1d2bfd8c55b5fffe6955263374
2013-12-02 15:23:12 +01:00
3f6c35c6f3 EstimateBestFilter: use an int to iterate WEBP_FILTER_TYPE
this change allows the code to be built with a C++ compiler without the
addition of an operator++

Change-Id: I2f2fae720b9772abfc3c540bb2e3bf9107d96cc9
2013-11-27 20:10:45 -08:00
cc55790e37 Merge changes I8bb7a4dc,I2c180051,I021a014f,I8a224a62
* changes:
  mux: add some missing casts
  enc/vp8l: add a missing cast
  idec: add some missing casts
  ErrorStatusLossless: correct return type
2013-11-27 17:06:35 -08:00
c536afb57b Merge "cosmetics: fix some typos" 2013-11-27 17:04:00 -08:00
cbdd3e6e53 add a -dither dithering option to the decoder
Even at high quality setting, the U/V quantizer step is limited
to 4 which can lead to banding on gradient.
This option allows to selectively apply some randomness to
potentially flattened-out U/V blocks and attenuate the banding.

This option is off by default in 'dwebp', but set to -dither 50
by default in 'vwebp'.

Note: depending on the number of blocks selectively dithered,
we can have up to a 10% slow-down in decoding speed it seems.

Change-Id: Icc2446007f33ddacb60b3a80a9e63f2d5ad162de
2013-11-27 00:57:51 -08:00
e812401299 Updated iosbuild.sh for XCode 5.x
Change-Id: I4bf545c4f394c28842cbb85ac70214d2e5477ae0
2013-11-26 20:08:02 -08:00
4931c3294b cosmetics: fix some typos
Change-Id: I0d6efebd817815139db5ae87236fd8911df4d53c
2013-11-26 19:21:14 -08:00
05aacf77c2 mux: add some missing casts
+ fix a return value
+ fix a param type

Change-Id: I8bb7a4dc4c76b11140f8693c909aeb10f660e6e5
2013-11-25 20:53:15 -08:00
617d93480e enc/vp8l: add a missing cast
Change-Id: I2c1800516eb4573ae2599866ace10017b865f23a
2013-11-25 20:48:13 -08:00
46db286572 idec: add some missing casts
Change-Id: I021a014f1f3a597f37ba8d9f4006c8cb4100723c
2013-11-25 20:47:35 -08:00
b524e3369c ErrorStatusLossless: correct return type
int -> VP8StatusCode

Change-Id: I8a224a622e98d401a6e401047780fa6b25cb0ae4
2013-11-25 20:45:53 -08:00
cb261f790f fix a descaling bug for vertical/horizontal U/V interpolation
RGBToU/V calls expects two extra precision bits, they were only
given one by SUM2H and SUM2H macros.

For rounding coherency, also changed SUM1 macro.

Change-Id: I05f96a46f5d4f17b830d0420eaf79b066cdf78d4
2013-11-19 11:24:09 +01:00
bcb3955c41 Merge changes I48968468,I181bc736
* changes:
  demux: split chunk parsing from ParseVP8X
  demux: add a tail pointer for chunks
2013-11-18 17:03:33 -08:00
73f52133a1 gif2webp: Add a mixed compression mode
When '-mixed' option is given, each frame would be heuristically chosen
to be
encoded using lossy or lossless compression.

The heuristic is based on the number of colors in the image:
- If num_colors <= 31, pick lossless compression
- If num_colors >= 194, pick lossy compression
- Otherwise, try both and pick the one that compresses better.

Change-Id: I908c73493ddc38e8db35b7b1959300569e6d3a97
2013-11-17 18:04:07 -08:00
6198715e89 demux: split chunk parsing from ParseVP8X
makes the header and chunk parsing portions slightly more digestible

Change-Id: I48968468b2529506b830e96a2f13e8d29976179b
2013-11-16 11:06:43 -08:00
d2e3f4e6b5 demux: add a tail pointer for chunks
a large number of non-frame chunks could slow the parse

Change-Id: I181bc73626e92263c3c5b2a6dc2bf6e6a0481d52
2013-11-16 10:57:09 -08:00
87cffcc3c9 demux: cosmetics: s/has_frames/is_animation/
animation matches the flag name
s/has_fragments/is_fragmented/ to match.

Change-Id: I86c834e19bab6db6610ea2ccf900ab3ebffafa8a
2013-11-16 10:32:56 -08:00
e18e66779b demux: strictly enforce the animation flag
if the flag is incorrectly set in a VP8L or VP8/ALPH single image file
the demux will now fail.

Change-Id: Id4d5f2d3f6922a29b442c5d3d0acbe3d679e468a
2013-11-14 16:01:36 -08:00
c4f39f4a31 demux: cosmetics: remove a useless break
+ reword a comment

Change-Id: I52b64abcde2c8322b59c594f2f75b509ed367bff
2013-11-13 21:08:07 -08:00
61cb884d79 demux: (non-exp) fail if the fragmented flag is set
otherwise make sure that all frames are marked as a fragment. there's
still some work to do with validation if fragments are expected to cover
the entire canvas.

Change-Id: Id59e95ac01b9340ba8c6039b0c3b65484b91c42f
2013-11-12 21:50:27 -08:00
ff379db317 few % speedup of lossless encoding
mostly visible for method 4 and up

Change-Id: I1561d871bc055ec5f7998eb193d927927d3f2add
2013-11-12 00:09:45 +01:00
df3649a287 remove all disabled code related to P-frames
it's drifting out of sync, and won't be used anyway...

Change-Id: I931b288e1c8480bf3bccd685b3a356bb6bd8e10b
2013-11-04 11:52:05 +01:00
6d0cb3debf Merge "gif2webp: kmin = 0 should suppress key-frame addition." 2013-11-01 12:22:26 -07:00
36555983f0 gif2webp: kmin = 0 should suppress key-frame addition.
This is to conform to man/gif2webp.1
Earlier, one needed to give both '-kmin 0' and '-kmax 0' for this to
work.

Also, suppress further warnings for kmin = 0 and/or kmax = 0 case.

Change-Id: I6f5eeb609aeffc159d0252a40a5734162f7e4e7d
2013-11-01 12:01:59 -07:00
7708e60908 Merge "detect flatness in blocks and favor DC prediction" 2013-11-01 11:24:09 -07:00
06b1503eff Merge "add comment about the kLevelsFromDelta[][] LUT generation" 2013-10-31 21:37:55 -07:00
5935259cd0 add comment about the kLevelsFromDelta[][] LUT generation
Change-Id: Id1a91932bf05aeee421761667af351ef2200c334
2013-10-31 21:35:55 -07:00
e3312ea681 detect flatness in blocks and favor DC prediction
this avoids local-minima that look bad, even if the distortion
looks low (e.g. gradients, sky,...). Mostly visible in the q=50-80 range.
Output size is mostly unchanged.

Change-Id: I425b600ec45420db409911367cda375870bc2c63
2013-11-01 00:47:04 +01:00
ebc9b1eedf Merge "VPLBitReader bugfix: Catch error if bit_pos > LBITS too." 2013-10-31 16:38:15 -07:00
96ad0e0aef VPLBitReader bugfix: Catch error if bit_pos > LBITS too.
Earlier we were only testing for bit_pos == LBITS. But this is not
sufficient,
as bit_pos can jump from < LBITS to > LBITS.

This was resulting in some bit-stream truncation errors not being
caught.

Note: Not a security bug though, as br->pos wasn't incremented in such
cases
and so we weren't reading beyond the buffer.

Change-Id: Idadcdcbc6a5713f8fac3470f907fa37a63074836
2013-10-30 16:33:36 -07:00
a014e9c9cd tune quantization biases toward higher precision
* raise U/V quantization bias to more neutral values
* also raise the non-zero AC bias for Y1/Y2 matrices

(we need all the precision we can for U/V leves, which are often empty)
This will increase quality in the higher range (q >= 90) mostly.
Files size is exacted to raise a little (5-7%). and SSIM accordingly of course.

Change-Id: I8a9ffdb6d8fb6dadb959e3fd392e66dc5aaed64e
2013-10-30 23:57:23 +01:00
1e898619cb add helpful PrintBlockInfo() function
(protected under a DEBUG_BLOCK compile flag)

Change-Id: Icb8da02dbd00e5cf856c314943c212f1c9578d9b
2013-10-30 19:25:27 +01:00
596a6d73ce make use of 'extern' consistent in function declarations
Change-Id: I18e050db3111e52acfe97da09cdf1860f3e15936
2013-10-30 03:23:21 -07:00
c8d48c6e0d Merge "extract random utils to their own file util/random.[ch]" 2013-10-30 02:07:28 -07:00
98aa33cf1e extract random utils to their own file util/random.[ch]
they'll be used for decoding too, probably.

Change-Id: Id9cbb250c74fc0e876d4ea46b1b3dbf8356d6725
2013-10-30 02:00:33 -07:00
432a723e0b Merge "swig: add basic go bindings" 2013-10-30 01:03:32 -07:00
fab618b5fb Merge "rename libwebp.i -> libwebp.swig" 2013-10-30 01:02:05 -07:00
e4e7fcd69b swig: add basic go bindings
WebPGetDecoderVersion
WebPGetInfo

Change-Id: Icb66207c2f492c9d20642544c0b31cc92b91cdcc
2013-10-29 21:44:20 +01:00
d3408720d8 Merge "fast auto-determined filtering strength" 2013-10-29 12:47:58 -07:00
f8bfd5cd1e fast auto-determined filtering strength
kLevelsFromDelta[sharpness][delta] is an inverse look-up table
that tells the minimum filtering strength needed to trigger the
filtering of a step with amplitude 'delta'. We use this table
in various situations:

a) when computing the initial (/global) filtering
strength for each segment. We look at the quantization
step and deduce the proper filtering strength needed
to result this quantization noise (talking the -f option
into account).

b) during intra16 calculation, when a block ends up
very empty (only DC coeffs are non-zero, all ACs have
vanished). We'll rely on the in-loop filtering to
restore the smoothness (if the source was gradient-like
smooth. That's why we look at the distortion too before
triggering the filtering).

Step b) goes _in addition_ to a), potentially raising
the filtering strength if blockiness is likely.

Change-Id: Icaeca93ef21da195b079e6587a44d9edfc8e9efa
2013-10-29 20:13:29 +01:00
ac0bf951ca small clean-up in ExpandMatrix()
Change-Id: Ib06cb1658a6548f06bb7320310b3864881b606a7
2013-10-29 19:58:57 +01:00
1939607e7f rename libwebp.i -> libwebp.swig
.swig provides better tool compatibility

Change-Id: I35587b84dd32e12e8cedad14b36ac518b63896bc
2013-10-29 19:51:04 +01:00
43148b6cd2 filtering: precompute ilimit and hev_threshold
no speed change, just simplifying the logic

Change-Id: I518800494428596733d4fbae69072049828aec3c
2013-10-28 13:37:33 +01:00
18f992ec0f simplify f_inner calculation a little
by incorporating the is_4x4 flag at init

Change-Id: I042e04aacb15181db0bf86f3212c880087519189
2013-10-28 01:49:09 -07:00
241d11f141 add missing const
Change-Id: Id1c767d21d52197ed2e4497005eb9c4795c602f0
2013-10-25 20:34:14 +02:00
86c0031eb2 add a 'format' field to WebPBitstreamFeatures
Change-Id: I79a688e4c34fb77527127bbdf4bc844efa6aa9a4
2013-10-25 20:34:06 +02:00
dde91fde96 Demux: Correct the extended format validation
Earlier "f = f->next_" was executing for both inner and outer loop, thus
skipping validation of some frames.

Change-Id: Ice5cdb4ff5da78384aa0573addd3a5e5efa0b10c
2013-10-23 17:25:23 -07:00
5d6c5bd2a5 add entry for '-resize' option in cwebp's man
Change-Id: I04b2853825527b03cfa73377004b1a2a40b4c94f
2013-10-23 02:22:15 -07:00
7c098d1814 Use some gamma-curve range compression when computing U/V average
This helps for discolorated chroma-subsampled edges.

Change-Id: I1d8ce87b66cb7e8b3572e6722905beabf0f50554
2013-10-18 21:28:05 +02:00
0b2b05049f Use deterministic random-dithering during RGB->YUV conversion
-> helps debanding (sky, gradients, etc.)

This dithering can only be triggered when using -preset photo
or -pre 2 (as a preprocessing). Everything is unchanged otherwise.

Note that this change is likely to make the perceived PSNR/SSIM drop
since we're altering the input internally.

Change-Id: Id8d4326245d9b828141de162c94ba381b1fa5813
2013-10-17 22:36:49 +02:00
8a2fa099cc Add a second multi-thread method
method 1 grouping: [parse + reconstruction] // [filtering + output]
method 2 grouping: [parse] // [reconstruction+filtering + output]

Depending on some heuristics (see VP8ThreadMethod()), we
can pick one of the other when -mt flag (or option.use_threads)
is selected.

Conservatively, we always use method #2 for now until the heuristic
is refined (so, timing should be the same the before this patch)

+ replace 'use_threads' by 'mt_method'
+ define MIN_WIDTH_FOR_THREADS constant
+ fix comment alignment

Change-Id: I11a756dea9070d6e21b1a9481d357a1e8aa0663e
2013-10-15 23:58:31 +02:00
7d6f2da075 Merge "up to 20% faster multi-threaded decoding" 2013-10-15 14:51:12 -07:00
266f63ea89 Merge "libwebp.jar: build w/Java 1.6 for Android compat" 2013-10-15 02:04:41 -07:00
0532149c8a up to 20% faster multi-threaded decoding
Mostly visible for large images.
Reconstruction+filtering is now done in parallel to bitstream-parsing.

Change-Id: I4cc4483d803b255f4d97a2fcd9158b1c291dd900
2013-10-15 00:25:21 +02:00
38efdc2e9c Simplify the gif2webp tool: move the optimization details to util
Specifically:
- Merge OptimizeAndEncodeFrame with WebPFrameCacheAddFrame: they use the same
  if-else structure.
- Move maintenance of 'prev_canvas' and 'curr_canvas' to util.
- Move ReduceTransparency() and FlattenPixels() calls to SetFrame(): This is in
  preparation for the next patch: which will try try lossless encoding for
  each frame, even when '-lossy' option is given.
- Make most methods static inside util.

No changes to output expected.

Change-Id: I1f65af25246665508cb20f0f6e338f9aaba9367b
2013-10-14 14:39:46 -07:00
de899516c7 libwebp.jar: build w/Java 1.6 for Android compat
broken since:
 a5c297c swig/java: reduce wrapper function code duplication

this was a part of v0.3.1, but not v0.3.0.

Change-Id: I001d4bd0a7a1aa1b2d267bc63bc1d8226bff00c1
2013-10-11 17:59:37 +02:00
cb22155201 Decode a full row of bitstream before reconstructing
Needs more memory but allows for future parallelization.
Noticeably faster on ARM, slightly faster on x86

also: remove dec->filter_row_ unnecessary field

Change-Id: I044a808839b4e000c838a477e3e8688820436d9a
2013-10-10 21:29:58 +02:00
dca8a4d315 Merge "NEON/simple loopfilter: avoid q4-q7 registers" 2013-10-10 01:58:41 -07:00
9e84d901d2 Merge "NEON/TransformWHT: avoid q4-q7 registers" 2013-10-09 09:32:59 -07:00
fc10249b36 NEON/simple loopfilter: avoid q4-q7 registers
very tiny speed improvement

Change-Id: I3024f120feb7275ce20bfff21af31ea8650a5a03
2013-10-09 18:17:31 +02:00
2f09d63e30 NEON/TransformWHT: avoid q4-q7 registers
very tiny speed improvement

Change-Id: Iace78b9038af412d0a794845ff19f54afa88ccdc
2013-10-09 18:17:23 +02:00
77585a2be6 Merge "use a macrofunc for setting NzCoeffs bits" 2013-10-09 03:00:49 -07:00
d155507cb9 Merge "use HINT_GRAPH as image_hint for gif source" 2013-10-09 02:58:15 -07:00
9c561646ac Merge "only print GIF_DISPOSE_WARNING once" 2013-10-09 02:56:45 -07:00
05879865da use HINT_GRAPH as image_hint for gif source
quite rarely, it gives a different output
then without the HINT, but that's often for
a smaller size (tested with default -m and -m 6)

Change-Id: I51d221ab61f8e007983325031345728e8d80b241
2013-10-09 11:55:51 +02:00
0b28d7ab08 use a macrofunc for setting NzCoeffs bits
(avoids code dup)

Change-Id: I776f065538e562673ca08f3bc43c7167d13254d9
2013-10-09 11:46:32 +02:00
f9bbc2a034 Special-case sparse transform
If the number of non-zero coeffs is <= 3, use a
simplified transform for luma.

Change-Id: I78a1252704228d21720d4bc1221252c84338d9c8
2013-10-08 22:05:38 +02:00
00125196f3 gif2webp: detect and flatten uniformly similar blocks
helps during lossless compression.

10% average saving, but that's mostly on what was previously
'difficult' cases, where the gain is ~30-50% actually.
Non-difficult cases are mostly unchanged.
Tested over ~7k random web gifs.

Change-Id: I09db4560e4ab09105d1cad28e6dbf83842eda8e9
2013-10-08 15:04:52 +02:00
0deaf0fa44 only print GIF_DISPOSE_WARNING once
Change-Id: Iece235a6ee767cc2e3866bb6c9bb1d56d9a9ffb9
2013-10-08 15:02:33 +02:00
6a8c0eb718 Merge "small optimization in segment-smoothing loop" 2013-10-08 04:13:37 -07:00
f7146bc1e6 small optimization in segment-smoothing loop
probably not much of a speed difference

Change-Id: I08c41d82c3c2eb5ff9ec9ca9d81af2bb09b362de
2013-10-07 07:44:51 -07:00
5a7533ceb3 small gif2webp fix
the old kmin value was printed instead of the new one.

Change-Id: I0dcf23a29b7ce6bad08013a1980c5ce027b1bfe9
2013-10-04 21:13:35 +02:00
4df0c89e24 Merge changes Ic697660c,I27285521
* changes:
  Android.mk: add a dwebp target
  Android.mk: update build flags
2013-10-04 05:02:13 -07:00
5b2e6bd3e8 Android.mk: add a dwebp target
Change-Id: Ic697660c1a5f7185d2ad00934b314b44870cec00
2013-10-04 11:26:37 +02:00
f910a84ea5 Android.mk: update build flags
- split out release specific flags
- set LOCAL_ARM_MODE to arm

Change-Id: I272855216583d6c8d0a4106e8b3fde46aa59dfa9
2013-10-04 11:18:09 +02:00
63f9aba4b3 special-case WHT transform when there's only DC
happens surprisingly often at low quality, so we might
as well hard-code a simplified TransformWHT() directly.

Change-Id: Ib7a858ef74e8f334bd59d6512bf5bd3e455c5459
2013-10-02 14:27:36 +02:00
80911aef38 Merge "7-8% faster decoding by rewriting GetCoeffs()" 2013-10-02 01:51:53 -07:00
606c4304c4 gif2webp: Improved compression for lossy animated WebP
We reduce transparency by turning some transparent pixels into
corresponding RGB values from previous canvas.

This improves compression by about 23%.

Change-Id: I02d70a43a1d0906ac09a7e2dc510be3b2d38f593
2013-09-30 16:48:39 -07:00
fb887f7fe6 gif2webp: Different kmin/kmax defaults for lossy and lossless
These were the best values found considering the compression gain and
decoding speed.

Change-Id: Iddae4c5b78c6aa42b1f8a034d1c1b93843071a81
2013-09-30 14:25:58 -07:00
2a98136667 7-8% faster decoding by rewriting GetCoeffs()
Change-Id: Ib7c27e985d3b5222e8fa1f98cec462458caa9541
2013-09-30 23:17:34 +02:00
92d47e4ca9 improve VP8L signature detection by checking the version bits too
Change-Id: I20bea00b9582d7ea8c7b643616c78f717ce1bdf2
2013-09-27 18:17:23 +02:00
5cd43e4316 Add -incremental option to dwebp
useful for testing. Not listed in the man or README, since
it's only useful for testing the incremental decoding
(e.g. measuring the timing difference compared to non-incremental)

Change-Id: I8df8046e031d21006242babb5bcac09f8ff9f710
2013-09-19 13:20:45 +02:00
54b8e3f6e2 webpmux: DisplayInfo(): remove unnecessary error checks.
As WebPMuxCreate() would have succeeded by then, many of the calls
cannot fail.

Change-Id: I3220c59e9aa47c948da9f8741b4098c47b91a8fc
2013-09-18 11:44:24 -07:00
40ae3520b1 fix memleak in WebPIDelete()
happens when decoding is partial (past Partition0), without error and
interrupted by calling WebPIDelete()

WebPIDelete() needs to call VP8ExitCritical() to free in-flight resources

Change-Id: Id4faef1b92f7edd8c17d642c58860e70dd570506
2013-09-17 15:45:43 -07:00
d9662658d9 mux.h doc: WebPMuxGetFrame() can return WEBP_MUX_MEMORY_ERROR too.
Change-Id: Icc0305f162d1f5d15ad1b4713ac08e210c85c306
2013-09-17 14:37:12 -07:00
0e6747f88d webpmux -info: display dimensions and has_alpha per frame
Also, more readable dispose and blend methods.

Change-Id: I318431f94dcdee4c5da296a48e6f3762aa254c1f
2013-09-16 15:29:24 -07:00
d78a82c407 Sanity check for underflow
Change-Id: I52dbdcbd02c344c6260f196afad30005d14d3ee0
2013-09-16 13:35:13 -07:00
8498f4bfe2 Merge "remove -Wshadow warnings" 2013-09-16 13:19:39 -07:00
e89c6fc867 Avoid a potential memleak
Change-Id: I76f30fb40743a989bce25b40238cf0db55bd07e0
2013-09-16 13:12:33 -07:00
3ebe175781 Merge "break down the proba 4D-array into some handy structs" 2013-09-14 03:14:30 -07:00
6a44550a8c break down the proba 4D-array into some handy structs
Makes it easy to later add derived satellite fields...

Change-Id: I445767ea78cc788d11aec367479e74e485fdabe5
2013-09-14 03:12:09 -07:00
2f5e893400 remove -Wshadow warnings
"warning: declaration of ‘index’ shadows a global declaration"

Change-Id: I65f079aa4cd300cd1b3887a040b75c6b0f36ca1d
2013-09-14 02:02:09 -07:00
bf3a29b302 Merge "add proper WEBP_HAVE_GIF and WEBP_HAVE_GL flags" 2013-09-14 01:57:36 -07:00
2b0a759335 Merge "fix some warnings from static analysis" 2013-09-13 13:00:12 -07:00
22dd07cee9 mux.h: Some doc corrections
Change-Id: Ia2840a489d87001de241a9a8fbc88a127af55ff9
2013-09-13 12:51:36 -07:00
79ff03460a add proper WEBP_HAVE_GIF and WEBP_HAVE_GL flags
...and make gif2webp and vwebp compile without them.

Makefile.vc still to be updated...
Meanwhile the CL environment variable can be supplemented with

set CL=/DWEBP_HAVE_GL /IC:\opt\freeglut\include

Change-Id: I37a60b8c32aafd125bffa98b6cc9f57c022ebbd0
2013-09-13 12:46:41 -07:00
d51f45f047 fix some warnings from static analysis
http://code.google.com/p/webp/issues/detail?id=138

Change-Id: I21470e965357cc14eab356e2c477c7846ff76ef2
2013-09-13 11:33:30 +02:00
d134307b7f fix conversion warning on MSVC
"src\enc\frame.c(88) : warning C4244: '=' : conversion from 'const double' to 'float', possible loss of data"

Change-Id: I143cb0bb6b69e1b8befe9b4f24b71adbc28095c2
2013-09-12 18:57:51 -07:00
d538cea8c2 gif2webp: Support a 'min' and 'max' key frame interval
Change-Id: If910c8849d7210e7227db0f45affa26fce7da102
2013-09-12 13:41:09 -07:00
80b54e1c69 allow search with token buffer loop and fix PARTITION0 problem
The convergence algo is noticeably faster and more accurate.

Try it with: 'cwebp -size xxxxx -pass 8 ...' or 'cwebp -psnr 39 -pass 8 ...'
for instance

Allow full-looping with TokenBuffer case, and make the non-TokenBuffer
case match too.

In case Partition0 is likely to overflow, retry encoding with harder
limits on max_i4_header_bits_.

This CL should make -partition_limit option somewhat useless,
since the fix made automatically (albeit in a non-optimal way yet).

Change-Id: I46fde3564188b13b89d4cb69f847a5f24b8c735b
2013-09-11 21:15:28 +02:00
b7d4e04255 add VP8EstimateTokenSize()
estimates final size of coded tokens given a set of probabilities

Change-Id: Ia5a459422557d98b78b3cd8e1a88cb30835825b6
2013-09-11 10:08:49 +02:00
10fddf53bb enc/quant.c: silence a warning
score_t -> int: rd_i4.H contains a value from a uint16_t
lookup

Change-Id: I7227de2dfab74b4f796abbc47955197ffa0e6110
2013-09-11 00:04:11 -07:00
399cd4568b Merge "fix compile error on ARM/gcc" 2013-09-10 15:12:30 -07:00
9f24519e82 encoder: misc rate-related fixes
* fix VP8FixedCostsI4ÆÅ table
   (the constant cost '211' was erronenously included)
* use the rd-score for '211' correctly (calling SetRDScore() for good)
* count partition0 bits separately during rd-opt

No meaningful difference in rd-curve.

Change-Id: I6c49a150cf28928d9a92c32fff097600d7145ca4
2013-09-10 00:25:32 -07:00
c663bb214a Merge "simplify VP8IteratorSaveBoundary() arg passing" 2013-09-06 14:21:42 -07:00
fa46b31269 Demux.h: Correct a method name reference
Change-Id: I5638a4e40d9fcb44860952028f8b5ef2ea78621d
2013-09-06 11:26:00 -07:00
f8398c9dab fix compile error on ARM/gcc
use of uint8_t type was causing error like:
src/dsp/upsampling.c:223:1: internal compiler error: in vect_determine_vectorization_factor, at tree-vect-loop.c:349

with gcc 4.6.3

Change-Id: Ieb6189a1375c47fc4ff992e6c09b34a7f1f605da
2013-09-06 03:07:28 -07:00
f691f0e461 simplify VP8IteratorSaveBoundary() arg passing
we only need to save yuv_out_, so no need for the arg

Change-Id: I7bad5d910e81ed2eda5c9787821fd1cfe905bd92
2013-09-06 02:11:16 -07:00
42542be855 up to 6% faster encoding with clang compiler
mostly by revamping the main loop of GetResidualCost() and avoiding some branches

Change-Id: Ib05763e18a6bf46c82dc3d5d1d8eb65e99474207
2013-09-05 10:38:22 -07:00
93402f02db multi-threaded segment analysis
When -mt is used, the analysis pass will be split in two
and each halves performed in parallel. This gives a 5%-9% speed-up.

This was a good occasion to revamp the iterator and analysis-loop
code. As a result, the default (non-mt) behaviour is a tad (~1%) faster.

Change-Id: Id0828c2ebe2e968db8ca227da80af591d6a4055f
2013-09-05 09:13:36 +02:00
7e2d65950f Merge "remove the PACK() bit-packing tricks" 2013-09-04 23:55:41 -07:00
c13fecf908 remove the PACK() bit-packing tricks
was too smart for its own good :)
This is more ARM-friendly, since it removes a mult.

Change-Id: If146034c8efa2e71e3eaaf1230cb553884a42ebb
2013-09-05 08:53:36 +02:00
2fd091c9ae Merge "use NULL for lf_stats_ testing, not bool" 2013-09-04 07:18:42 -07:00
b11c9d6284 dwebp: use default dct_method
No significant speed diff was observed.
Avoids idct mismatch artifacts.

Change-Id: I9ca64d165d313f23b4aa231e7a97a20213ff86c7
2013-09-03 15:48:41 -07:00
4bb8465f8c Merge "(de)mux.h: wrap pseudo-code in /* */" 2013-09-03 15:47:42 -07:00
cfb56b1707 make -pass option work with token buffers
-pass 2 can be useful sometimes. More passes usually don't help more.
This change is a step toward being able to re-code the whole picture
with varying parameter (when token buffer is used).

Change-Id: Ia2538e2069a53c080e2ad248c18a1e04623a9304
2013-09-03 23:37:42 +02:00
5416aab479 (de)mux.h: wrap pseudo-code in /* */
easier to copy / read as a block

Change-Id: Ia22689a13afd8ea2325dcdf432e35fc802d8177a
2013-09-03 14:31:02 -07:00
35dba337a3 use NULL for lf_stats_ testing, not bool
Change-Id: I14c6341bce1c745b7f2d1b790f2d4f8441d01be6
2013-09-03 08:45:59 -07:00
733a7faae4 enc->Iterator memory cleanup
* move yuv_in_/out_* scratch buffers to iterator
* add y_top_/uv_top_ shortcuts in iterator

That's ~3k of stack size instead of heap.
But it allows having several iterators work in parallel.

Change-Id: I6a437c0f2ef1e5d398c1d6a2fd4974fa0869f0c1
2013-08-31 23:38:11 +02:00
e81fac86dd Add support for "no blend" in webpmux binary
Change-Id: I6d07b120626317df73f1a6f026931c5b9485a340
2013-08-26 18:04:52 -07:00
3b80bc4859 gif2webp: Separate out each step into a method
We now have ReadFrame(), OptimizeFrame(), EncodeFrame() and
DisposeFrame()
methods.

Change-Id: I522834bad18dd6a7a3ddac7c00dfd829c48248f8
2013-08-22 11:14:51 -07:00
bef7e9ccd1 Add doc precision about demux object keeping pointers to data.
Change-Id: I3d2139f975eedcce36606e586e0cbd6fa7d207e6
2013-08-21 11:09:37 -07:00
61405a143d dwebp: enable stdout output with WIC
Change-Id: Ieb73a414784480945bba6cb2687468517e24e755
2013-08-20 19:59:30 -07:00
6eabb88637 Merge "Animated WebP: add "do no blend" option to spec" 2013-08-20 19:14:17 -07:00
be20decb5c fix compilation for BITS 24
in_bits is const. Trying to apply bswap on it, one gets the error message:
error: read-only variable 'in_bits' used as 'asm' output

Change-Id: I0bef494b822c83d8ea87b1938b0e486d94de4742
2013-08-20 18:55:00 -07:00
e58cc13706 Merge "dwebp: s/unsigned char/uint8_t/" 2013-08-20 15:03:46 -07:00
72501d4309 dwebp: s/unsigned char/uint8_t/
Change-Id: I579e9b0a4d03998ca1dd8683d83d30d852551e45
2013-08-20 14:57:35 -07:00
2c9633e8e0 Merge "gif2webp: Insert independent frames at regular intervals." 2013-08-20 13:59:50 -07:00
f0d6a14b64 gif2webp: Insert independent frames at regular intervals.
We use the 'do not blend' option for creating independent frames.
We also mark the already independent frames as 'do not blend'.

This bounds the maximum number of frames that need to be decoded to
decode a given frame, thus leading to a much better decoding performance.

Change-Id: I7cef98af2b53751ec36993fd2bd54f7f4c4aad2b
2013-08-20 11:47:48 -07:00
b25a6fbfdc yuv.h: fix indent
Change-Id: I0c0bd5f7f71bc44e10134bd4f788769ec25cec1f
2013-08-19 18:06:15 -07:00
ede3602e5d Merge "cosmetics: fix indent" 2013-08-19 17:56:21 -07:00
3a65122a53 dwebp: fix stdout related output
logic was incorrect for the error output

Change-Id: I89e665f8065490e12e1bf3d3008087d42daee16b
2013-08-19 17:52:33 -07:00
388a7249c9 cosmetics: fix indent
Change-Id: Iad0fce79886bed0d61ddf2510ce133a5355ebc1f
2013-08-19 17:51:04 -07:00
4c7322c86f Merge "dsp: msvc compatibility" 2013-08-19 17:42:16 -07:00
d50c7e3275 Merge "5-7% faster SSE2 versions of YUV->RGB conversion functions" 2013-08-19 17:27:44 -07:00
b8ab784745 Merge "simplify upsampler calls: only allow 'bottom' to be NULL" 2013-08-19 17:27:36 -07:00
df6cebfa9e 5-7% faster SSE2 versions of YUV->RGB conversion functions
The C-version gets ~7-8% slower in order to match the SSE2
output exactly. The old (now off-by-1) code is kept under
the WEBP_YUV_USE_TABLE flag for reference.

(note that calc rounding precision is slightly better ~= +0.02dB)

on ARM-neon, we somehow recover the ~4% speed that was lost by mimicking
the initial C-version (see https://gerrit.chromium.org/gerrit/#/c/41610)

Change-Id: Ia4363c5ed9b4c9edff5d932b002e57bb7814bf6f
2013-08-19 17:05:58 -07:00
ad6ac32d7c simplify upsampler calls: only allow 'bottom' to be NULL
If 'top' was meant to be NULL, then bottom and top can be
swapped. Logic is simpler.

+ fix compilation in non-FANCY_UPSAMPLING mode

Change-Id: I7c62bbb59454017f072c0945d1ff2d24d89286ff
2013-08-19 16:47:51 -07:00
a5e8afafcb output to stdout if file name is "-"
Doesn't work with WIC

+ redirect some info messages from stdout to stderr
+ fix the error reporting upon output-writing error

Change-Id: I92b8bd7a15e656a3f3cdfbf56299f024e39453f8
2013-08-19 13:30:27 -07:00
f358450feb dsp: msvc compatibility
intrin.h is available after VS2003

patch from the FreeImage project

Change-Id: I58a18a0db00e247f871d05e3ba99772704f0e079
2013-08-16 20:46:16 -07:00
43a7c8ebee Merge "cosmetics" 2013-08-15 11:24:30 -07:00
4c5f19c148 Merge "bit_reader.h: cosmetics" 2013-08-15 11:11:03 -07:00
f72fab7045 cosmetics
Change-Id: Ib6fa2a7e1db8c5c48607c7097520ffca34a6cb66
2013-08-15 04:16:37 -07:00
14dd5e781a fix const-ness
Change-Id: I09c85fdb90b4380c8fc03c8b5652bb9cf29b2021
2013-08-15 00:21:15 -07:00
b20aec4949 Merge "Support for 'do not blend' option in vwebp" 2013-08-14 20:01:38 -07:00
dcf652223a Support for 'do not blend' option in vwebp
Change-Id: I563e192fa727816d11d8647c559ec407205ef40b
2013-08-14 19:51:46 -07:00
d5bad03328 Animated WebP: add "do no blend" option to spec
Marking certain frames as "do not blend" helps avoiding alpha-blending
at decode/render time.

It also helps inserting I-frames (frames which can be independently
decoded) into the animation.

Change-Id: Iaa222805db88d2f1c81104ce9d882e7c7ff8cfdb
2013-08-14 11:50:15 -07:00
a2f5f73de3 Merge "Support for "Do not blend" in mux and demux libraries" 2013-08-14 09:27:58 -07:00
e081f2f359 Pack code & extra_bits to Struct (VP8LPrefixCode).
Also created variant VP8LPrefixEncodeBits that returns the
code & extra_bits only.
There's no impact on compression density and compression speed.

Change-Id: I2cafdd3438ac9270cd72ad9d57b383cdddfdfa4c
2013-08-12 11:56:42 -07:00
6284854bd5 Support for "Do not blend" in mux and demux libraries
Change-Id: I9566a8e2d059fe1ebd9ca99c7e13311bf3f8f281
2013-08-12 11:49:00 -07:00
f486aaa9f8 Merge "slightly faster ParseIntraMode" 2013-08-09 02:17:16 -07:00
d17186328c slightly faster ParseIntraMode
+ cosmetics

Change-Id: Icb906d5f84f025ef9e04b71a73801a22cc990ee5
2013-08-09 02:15:21 -07:00
3ceca8ad31 bit_reader.h: cosmetics
- use const where applicable
- drop unnecessary string.h include

Change-Id: I560eef84fe17d3925768f6817c02ea79604c4379
2013-08-06 14:25:26 -07:00
69257f70df Create LUT for PrefixEncode.
This speeds up lossless compression by 5%.
Change-Id: Ifd114b1d9850dc3aac74593809e7d48529d35e3d
2013-08-05 10:20:18 -07:00
988b70844e add WebPWorkerExecute() for convenient bypass
This is mainly for re-using the worker structs without using the
thread.

Change-Id: I8e1be29e53874ef425b15c192fb68036b4c0a359
2013-08-02 12:20:15 -07:00
06e24987e7 Merge "VP8EncIterator clean-up" 2013-08-02 00:40:02 -07:00
de4d4ad598 VP8EncIterator clean-up
- remove unused fields from iterator
- introduce VP8IteratorSetRow() too
- rename 'done_' to 'countdown_'
- bring y_left_/u_left_/v_left_ from VP8Encoder

Change-Id: Idc1c15743157936e4cbb7002ebb5cc3c90e7f92a
2013-08-01 23:05:54 -07:00
7bbe95293f Merge "cosmetics: thread.c: drop a redundant comment" 2013-07-31 23:56:06 -07:00
da41148560 cosmetics: thread.c: drop a redundant comment
+ fix #if/#else/#endif comment

Change-Id: I76bfd3b123ed181897ed0feba721d5c1a3a2b0d7
2013-07-31 22:55:51 -07:00
feb4b6e6b3 thread.h: #ifdef when checking WEBP_USE_THREAD
prevents a warning with -Wundef and is consistent with thread.c

Change-Id: I60fa337c3b9daeea7302e86802402cb943cdb262
2013-07-31 22:50:57 -07:00
8924a3a704 thread.c: drop WebPWorker prefix from static funcs
Change-Id: I7cd39c9e41bbf11157c488aff18631ef17fde464
2013-07-31 19:22:44 -07:00
1aed8f2afc Merge "fix indent" 2013-07-26 17:08:37 -07:00
4038ed154d fix indent
Change-Id: I7f226ec4276e5e1b46896086a7d96cb67f36de6a
2013-07-26 17:03:19 -07:00
1693fd9b16 Demux: A new state WEBP_DEMUX_PARSE_ERROR
WebPDemuxPartial() returns NULL for both of the following cases:
- There was a parsing error.
- It doesn't have enough data to start parsing.

Now, one can differentiate between these two cases by checking the value
of 'state' returned by WebPDemuxPartial().

Change-Id: Ia2377f0c516b3fcfae475c0662c4932d2eddcd0b
2013-07-26 14:35:46 -07:00
8dcae8b3cf fix rescaling-with-alpha inaccuracy
(still missing YUVA decoding case for now)

https://code.google.com/p/webp/issues/detail?id=160

Change-Id: If723b4a5c0a303d0853ec9d839f995adce056095
2013-07-26 12:10:26 -07:00
11249abfc7 Merge changes I9b4dc36c,I4e0eef4d
* changes:
  Mux: support parsing unknown chunks within a frame/fragment.
  Stricter check for presence of alpha when writing lossless images
2013-07-22 17:05:27 -07:00
52508a1fe4 Mux: support parsing unknown chunks within a frame/fragment.
Change-Id: I9b4dc36c5ccc4b46f60cd64c1ee21008e20c8b95
2013-07-22 17:00:41 -07:00
05db0572f8 WebPMuxSetChunk: remove unused variable
and unnecessary call to ChunkGetIndexFromFourCC()

Change-Id: Ic1ed53750a3be43df827a6fb9a4b9ff3eb25c6c7
2013-07-19 13:03:25 -07:00
8ba1bf61a0 Stricter check for presence of alpha when writing lossless images
Earlier, all lossless images were assumed to contain alpha.
Now, we use the 'alpha_is_used' bit from the VP8L bitstream to determine
the
same.

Detecting an absence of alpha can sometimes lead to much more efficient
rendering, especially for animated images.

Related: refine mux code to read width/height/has_alpha information only
once
per frame/fragment. This avoid frequent calls to VP8(L)GetInfo().

Change-Id: I4e0eef4db7d94425396c7dff6ca5599d5bca8297
2013-07-19 11:55:09 -07:00
a03c3516cb Demux: WebPIterator now also denotes if the frame has alpha.
Change-Id: Ia300385a49c1ee5afa8f114f2560ee8d1c7664bb
2013-07-18 19:41:38 -07:00
6df743a33e Decoder: handle fragments case correctly too.
Exit early from ParseHeaderInternal() in case of fragmented image too.

Change-Id: I3e24be2c869cc2d87c43b705257f21be81127dbd
2013-07-18 19:23:57 -07:00
faa4b07eda Support for unknown chunks in mux library
WebPMuxSetChunk/WebPMuxGetChunk/WebPMuxDeleteChunk now correctly handle
unknown chunks.

Change-Id: I2b75106ef08260f2bce03eb1782924b620643746
2013-07-17 16:34:50 -07:00
7d60bbc6d9 Speed up HashChainFindCopy function.
Speed up HashChainFindCopy by optimizing on number of calls to
FindMatchLength method.
This change speeds up the lossless & lossy (Alpha) encoding by 20%
without loss of compression density.
At method=3, lossy (Alpha) compression speed (and density) remains
unchanged, as at that settings, the costly Backward Refs method is not
called

Change-Id: Ia1797148e9e4ee2787011837fa248afbae2242cb
2013-07-16 19:58:18 -07:00
6674014077 Speedup Alpha plane encoding.
Disable costly 'BackwardReferencesTraceBackwards' for encoding Alpha plane.
Increase the threshold for triggering 'BackwardReferencesTraceBackwards' to
quality 25 and above. Also lower the Alpha quality (at method 3) to be
lesser than this threshold (25).

Change-Id: Ic29fb2e6943472c564223df9fe099b19ccda0f31
2013-07-12 11:02:13 -07:00
b7346a1ed2 0.1 % speedup to decoding
Change-Id: If2add8a8b6f339f1d4b6a9581ce8d3d0f792d372
2013-07-09 18:47:06 -07:00
c606182edd webp-container-spec: Tighten language added by last
Change-Id: Id7719ba7f1ddfd7b54a1985771b2811318a7c0d7
2013-07-09 18:45:39 -07:00
a34a502989 pngdec: output error messages from libpng
Change-Id: I1981562eeab2ad07eb797cea589a033943e7f21d
2013-07-09 17:33:21 -07:00
e84c625d92 Merge "Detect canvas and image size mismatch in decoder." 2013-07-03 00:51:26 -07:00
f626fe2e9f Detect canvas and image size mismatch in decoder.
Will probably have to be re-visited when FRGM are
switched on.

Change-Id: I51c15e4c3887ab8f04d92ec783deca274cd09f41
2013-07-02 16:19:08 -07:00
f5fbdee0ba demux: stricter image bounds check
- validate x/y offset vs. canvas size
- with single images verify image res. == canvas res.

Change-Id: I737451d7f75c0c43c88180a903d5227f1d90cec1
2013-07-02 16:17:28 -07:00
30c8158a53 add extra assert in Huffman decode code
Change-Id: Ic0372d30827f11b727d63eac6c5eac01d1e9870d
2013-06-29 01:36:53 -07:00
8967b9f37e SSE2 for lossless decoding (critical) functions.
This speeds up WebP lossless decoding by 20%. In particular, the
photographic images get 35% speedup.

Change-Id: Idb94750342a140ec05df52c07e12be4bba335adc
2013-06-27 11:42:45 -07:00
699d80ea6b Jump-lookup for Huffman coding
speeds up those codes that are not part of the main lookup.
This gives a 10 % speedup for a photographic image.

Change-Id: Ief54b0ad77db790a01314402ad351b40ac9a7be4
2013-06-27 08:59:55 +02:00
c34307abda fix some VS9 warnings about type conversion
Change-Id: Ia6db5f70cecb1d0c29536d710a22cc5107e242ea
2013-06-27 08:49:01 +02:00
eeada35cd6 pngdec: add missing include
strcmp -> string.h

Change-Id: I07392d07cbad420f5d4f3abfd3ffde8626f0c0f2
2013-06-25 23:32:19 -07:00
54b65108ad gif2webp: If aligning to even offsets, extra pixels should be transparent
This is to ensure that the output WebP image is consistent with original
GIF when displayed against any background color.

Change-Id: I14218848153eb40358aa4ce331b2543d2fc2e86c
2013-06-24 15:18:35 -07:00
0bcf5ce39a Merge "remove a malloc() in case we're using only FILTER_NONE for alpha" 2013-06-24 12:55:40 -07:00
2c07143b9d remove a malloc() in case we're using only FILTER_NONE for alpha
+ some revamp and cleanup of the alpha-filter trial loop
+ EncodeAlphaInternal() now just takes a FilterTrial param

Change-Id: Ief84385083b1cba02678bbcd3dbf707245ee962f
2013-06-24 02:07:52 -07:00
a4d5f59d9e Faster lossless decoding
Specialize and simplify the alpha-decoding case, which is used when:
 - no color-cache is use
 - all red/blue/alpha values are the same (and hence their Huffman tree has
 only 1 symbol. We don't need to consume any bits for reading these).

 + revamped the loop to use size_t and offsets instead of pointers.

 ~2-3% faster on Unix (gcc) but up to 25% faster lossy+alpha decoding
 on Mac (llvm) and ARM.

Change-Id: I43c9688d1e4811cab0ecf0108a5b8f45781083e6
2013-06-24 09:34:30 +02:00
fd53bb758e Merge "alternate LUT-base reverse-bits code" 2013-06-24 00:32:39 -07:00
d1c166ef3f Merge "Container spec: a clarification on background color." 2013-06-21 00:53:12 -07:00
fdb9177917 Rename a method
Change-Id: Idc5f4ea1019a828a5fa1fbd81804fa51cd7e8673
2013-06-21 00:35:39 -07:00
5e9675329c Container spec: a clarification on background color.
Change-Id: Ifeb565bd162abfa03dfcbf8b576657fe83956a6c
2013-06-20 17:38:08 -07:00
30e77d0f66 Merge branch '0.3.0'
* 0.3.0: (57 commits)
  update ChangeLog
  Regression fix for alpha channels using color cache:
  wicdec: silence a format warning
  muxedit: silence some uninitialized warnings
  update ChangeLog
  update NEWS
  bump version to 0.3.1
  Revert "add WebPBlendAlpha() function to blend colors against background"
  Simplify forward-WHT + SSE2 version
  probe input file and quick-check for WebP format.
  configure: improve gl/glut library test
  update copyright text
  configure: remove use of AS_VAR_APPEND
  fix EXIF parsing in PNG
  add doc precision for WebPPictureCopy() and WebPPictureView()
  remove datatype qualifier for vmnv
  fix a memory leak in gif2webp
  fix two minor memory leaks in webpmux
  remove some cruft from swig/libwebp.jar
  README: update swig notes
  ...

Conflicts:
	NEWS
	examples/gif2webp.c
	src/dec/alpha.c
	src/dec/idec.c
	src/dec/vp8l.c
	src/enc/alpha.c
	src/enc/vp8l.c

Change-Id: Ib202fad7825a090c3b3a5169acd171369cface47
2013-06-20 14:47:48 -07:00
1b631e291a alternate LUT-base reverse-bits code
Not faster on Unix/x86. Left the code for reference though.

Change-Id: I7d419e9835e7746b85b8f8cc6f2af92db0eb8b03
2013-06-20 19:58:11 +02:00
24cc307ae3 ~20% faster lossless decoding
We use help from small LUTs for Huffman decoding.

Change-Id: I58db4ecd37282822094519f2aa14cf544beba975
2013-06-20 09:41:29 +02:00
313d853fa9 Speedup for decoding lossless WebP photographs:
use bit_pos instead num_bit to update the bit-reader
37.69 MB/s -> 39.79 MB/s, 5.6 %

Change-Id: Ica476cc3e2fc2db084d6961bea8586b050bb221d
2013-06-18 17:26:52 +02:00
24ee098a00 change the bytes_per_pixels_ field into more evocative use_8b_decode
+ split AllocateInternalBuffers() into two 32b/8b variants instead of
trying to do everything in one function.

Change-Id: I35cac9fcd990a2194c95da4b2a4046ca3a514343
2013-06-18 17:24:14 +02:00
2a04b034f7 update ChangeLog
Change-Id: Idea3464bbcb28896179c99488e7b96a4341b508a
2013-06-17 16:53:22 -07:00
7288950b88 Regression fix for alpha channels using color cache:
Considering the fact that insert to/lookup from the color cache is always 32
bit, use DecodeImageData() variant in that case.

Conflicts:
	src/dec/vp8l.c

Change-Id: I6c665a6cfbd9bd10651c1e82fa54e687cbd54a2b
(cherry picked from commit a37eff47d6)
2013-06-17 16:29:09 -07:00
2e377b53b0 wicdec: silence a format warning
from x86_64-w64-mingw32-gcc

examples/wicdec.c: In function ‘ExtractICCP’:
examples/wicdec.c:131:21: warning: format ‘%u’ expects argument of type
‘unsigned int’, but argument 4 has type ‘size_t’ [-Wformat]

Change-Id: I6642dae62265a2276ae9ac96dd8ce6f1e2d37ca5
(cherry picked from commit ffae9f31e8)
2013-06-17 15:43:33 -07:00
ad9e42a6fe muxedit: silence some uninitialized warnings
src/mux/muxedit.c:490: warning: 'x_offset' may be used uninitialized in this function
src/mux/muxedit.c:490: warning: 'y_offset' may be used uninitialized in this function

Change-Id: I4fd27f717e59a556354d0560b633d0edafe7a4d8
(cherry picked from commit 14cd5c6c40)
2013-06-17 15:43:33 -07:00
3307c16327 Don't set alpha-channel to 0xff for alpha->green uplift
usually saves ~4 bytes on average (but, up to 10 -or even 16- sometimes).

Change-Id: Ib500e1a35471a2f3da453ffc8c7e95d28b8d34fe
2013-06-18 00:03:32 +02:00
5130770cd3 Merge "wicdec: silence a format warning" 2013-06-17 14:45:01 -07:00
a37eff47d6 Regression fix for alpha channels using color cache:
Considering the fact that insert to/lookup from the color cache is always 32
bit, use DecodeImageData() variant in that case.

Change-Id: I6c665a6cfbd9bd10651c1e82fa54e687cbd54a2b
2013-06-17 03:49:30 -07:00
241cf99bc0 Merge "muxedit: silence some uninitialized warnings" 2013-06-16 23:23:20 -07:00
c8f9c84d5e Regression fix for alpha unfiltering:
For vertical filter, correctly initialize 'preds' when not starting at
row 0.

Change-Id: I69697d73dde8a2f701f376ff3986d09fa6f7fee3
2013-06-16 02:00:44 -07:00
14cd5c6c40 muxedit: silence some uninitialized warnings
src/mux/muxedit.c:490: warning: 'x_offset' may be used uninitialized in this function
src/mux/muxedit.c:490: warning: 'y_offset' may be used uninitialized in this function

Change-Id: I4fd27f717e59a556354d0560b633d0edafe7a4d8
2013-06-15 14:30:22 -07:00
a368db81c6 dec/vp8l: quiet vs9 x64 type conversion warning
src\dec\vp8l.c(816) : warning C4244: '=' : conversion from '__int64' to
'int', possible loss of data
src\dec\vp8l.c(817) : warning C4244: '=' : conversion from '__int64' to
'int', possible loss of data

Change-Id: I1d376d5dea909395bff8741aba16e8eed83a6e8f
2013-06-14 19:24:54 -07:00
ffae9f31e8 wicdec: silence a format warning
from x86_64-w64-mingw32-gcc

examples/wicdec.c: In function ‘ExtractICCP’:
examples/wicdec.c:131:21: warning: format ‘%u’ expects argument of type
‘unsigned int’, but argument 4 has type ‘size_t’ [-Wformat]

Change-Id: I6642dae62265a2276ae9ac96dd8ce6f1e2d37ca5
2013-06-14 18:44:59 -07:00
8cf0701eb0 Alpha encoding: never filter in case of NO_COMPRESSION
This is because, filtering would never affect compression density for
this case.

Change-Id: I4bb14d3eb7da0a3805fda140eb1dfbf9ccc134f5
2013-06-14 13:34:25 -07:00
825e73b1a6 update ChangeLog
Change-Id: I2e67e5d80cb8f8e4453cee45144bcc01e9a3efeb
2013-06-12 23:32:59 -07:00
abf6f6915f update NEWS
Change-Id: Ie831ecc6995679acaf238686e2f287bfaa0221a7
2013-06-12 23:25:09 -07:00
5a92c1a5e9 bump version to 0.3.1
libwebp{,decoder} - 0.3.1
libwebp libtool - 4.3.0 (compatible release)
libwebpdecoder libtool - 0.1.0 (compatible release)

mux/demux - 0.1.1
libtool - 0.1.0 (compatible release)

Change-Id: Icc8329a6bcd9eea5a715ea83f1535a66d6ba4b58
2013-06-12 23:19:13 -07:00
86daf77c47 store top Y/U/V samples in packed fashion
More cache-line friendly than storing them split.

Change-Id: Ifb23cc3518ff1b5c37afe007558d4278868d75ea
2013-06-13 06:01:27 +02:00
67bc353e6d Revert "add WebPBlendAlpha() function to blend colors against background"
This reverts commit dcbb1ca54a.

Dropping this for now to maintain compatibility for 0.3.1.

Change-Id: I44e032a072d317bb67e1439c42cff923e603038f
2013-06-12 15:02:09 -07:00
068db59e26 Intertwined decoding of alpha and RGB
This will reduce the time to first decoded pixel.

Change-Id: I07b900c0ed4af3aac806b2731e11cd18ec16d016
2013-06-12 14:10:59 -07:00
38cc011408 Simplify forward-WHT + SSE2 version
no precision loss observed
speed is not really faster (0.5% at max), as forward-WHT isn't called often.

also: replaced a "int << 3" (undefined by C-spec) by a "int * 8"
( supersedes https://gerrit.chromium.org/gerrit/#/c/48739/ )

Change-Id: I2d980ec2f20f4ff6be5636105ff4f1c70ffde401
(cherry picked from commit 9c4ce971a8)
2013-06-12 13:48:11 -07:00
3fa595a571 Support decoding upto given row in DECODE_DATA_FUNC
This is in preparation for supporting Intertwined decoding of RGB and
alpha.

Change-Id: Ie1905d1212f4a00718a64dca020be3f4b1b870ec
2013-06-11 16:00:58 -07:00
520f005f85 DequantizeLevels(): Add 'row' and 'num_rows' args
Change-Id: I0032e25c30f34362105d942f2dbb9ac75200f911
2013-06-11 15:59:21 -07:00
47374b8273 Alpha unfilter for given set of rows
Support reconstruction of small number of rows at a time.

Change-Id: Ief1bc78c7ad011ec6df856551f3beb5f907fd8e0
2013-06-11 15:57:42 -07:00
f32097e0df probe input file and quick-check for WebP format.
Error message is clearer that 'can't create demux object'.

Change-Id: Iec008601892f7cd8399e1948751747ac23305eef
(cherry picked from commit 830f72b7e9)
2013-06-11 15:03:23 -07:00
a2aed1d08c configure: improve gl/glut library test
add a check for a libGL function (glOrtho) in addition to glutMainLoop
when establishing the need for libGL at link time.

fixes vwebp link failure on ubuntu 13.04+

Change-Id: I537e9a5cab5cf4cd8875e06268d2107f377e625e
(cherry picked from commit 2ccf58d648)
2013-06-11 15:03:23 -07:00
c7e89cbb02 update copyright text
rather than symlink the webm/vpx terms, use the same header as libvpx to
reference in-tree files

based on the discussion in:
https://codereview.chromium.org/12771026/

Change-Id: Ia3067ecddefaa7ee01550136e00f7b3f086d4af4
(cherry picked from commit d640614d54)
2013-06-11 15:03:22 -07:00
a00380d2ed configure: remove use of AS_VAR_APPEND
This wasn't used often and benefits were likely minimal. Dropping it
outright is a bit simpler than adding a compatibility ifdef.

provides some compatibility with older versions of autoconf.
tested with autoconf 2.59/automake 1.7/aclocal 1.7

Change-Id: Ifed892346cf2329597985704830a96fc58d65607
(cherry picked from commit 9326a56f8d)
2013-06-11 15:03:22 -07:00
a94a88dd62 fix EXIF parsing in PNG
'exiftool' puts an 'APP1' chunk for exif, e.g.:
https://metacpan.org/source/EXIFTOOL/Image-ExifTool-5.87/lib/Image/ExifTool/PNG.pm#L305

Change-Id: I313d3e6945898526b8a4baf3d9016a2591a1a817
(cherry picked from commit bec11092ca)
2013-06-11 15:03:22 -07:00
a71e5d84e9 add doc precision for WebPPictureCopy() and WebPPictureView()
output picture object is overwritten, not free'd or destroyed.

Change-Id: Ibb47ab444063e7ad90ff3d296260807ffe7ddbf9
(cherry picked from commit 23d28e216d)
2013-06-11 15:03:21 -07:00
8287012ec7 remove datatype qualifier for vmnv
this fix is for clang (LLVM v4.2). gcc was fine.

Change-Id: Id4076cda84813f6f9548a01775b094cff22b4be9
(cherry picked from commit 3fe91635df)
2013-06-11 15:03:21 -07:00
e190843029 fix a memory leak in gif2webp
(rgba->yuv allocates memory)
Also fixed few warning and cleaned the code up.

Change-Id: Id904ad3ad8802ea9fc3d34247d27193dfa7b0b99
(cherry picked from commit 764fdffaac)
2013-06-11 15:03:21 -07:00
0b18b9eef6 fix two minor memory leaks in webpmux
(only occur in case of error)

Change-Id: Icab69bb364b77f8eae6cae91047354c27e610602
(cherry picked from commit 3e59a74d72)
2013-06-11 15:03:21 -07:00
db5095d5b6 remove some cruft from swig/libwebp.jar
picked up a few unnecessary classes from a dirty tree in the last commit

Change-Id: I98be16a0bc8716476ce440da542d113f254aee78
(cherry picked from commit 325d15ff30)
2013-06-11 15:03:21 -07:00
850e956f9b README: update swig notes
add python, required version notes

Change-Id: Iec2e94075f6cf54455ce5a658f9b7258109f4d01
(cherry picked from commit 4a7627c215)
2013-06-11 15:03:21 -07:00
bddd9b0a93 swig/python: add minimal documentation
uses autodoc to display the function arguments rather than the
inscrutable va_args (*args).

Change-Id: Iec2ff8276c1533b14c3032836d822fbdae632521
(cherry picked from commit 825b64db53)
2013-06-11 15:00:48 -07:00
d573a8d53f swig: add python encode support
wraps the simple interface similar to java.

Change-Id: Ib922bbcae322b2345b6dce5dee08faad705a77fd
(cherry picked from commit 14677e11d4)
2013-06-11 15:00:47 -07:00
6b931875ac swig/java: reduce wrapper function code duplication
define a macro to emit the wrapper code

Change-Id: I672416016162d6d9ce6f455d224044e0837e3ace
(cherry picked from commit a5c297c842)
2013-06-11 15:00:47 -07:00
6fe536f4ba swig/java: rework uint8_t typemap
reuse the declarations from arrays_java.i for signed char to make an
explicit uint8_t mapping. this avoids sign conversion build warnings.

Change-Id: Icfb5b865cf1fd404e89f2cd889111f0a94e3c604
(cherry picked from commit ad4a367dba)
2013-06-11 15:00:47 -07:00
a2ea46439e Fix the bug in ApplyPalette.
The auto-infer logic of detecting the 'Alpha' use case
(via check '(palette[i] & 0x00ff00ffu) != 0' is failing
for this corner case image with all black pixels (rgb = 0)
and different Alpha values.

-> switch generic use-LUT detection

Change-Id: I982a8b28c8bcc43e3dc68ac358f978a4bcc14c36
(cherry picked from commit afa3450c11)
2013-06-11 15:00:47 -07:00
7bb28d2a55 webp/lossless: fix big endian BGRA output
Change-Id: I3d4b3d21f561cb526dbe7697a31ea847d3e8b2c1
(cherry picked from commit 2ca83968ae)
2013-06-11 15:00:47 -07:00
f036d4bfa6 Speed up ApplyPalette for ARGB pixels.
Added 1 pixel cache for palette colors for faster lookup.
This will speedup images that require ApplyPalette by 6.5% for lossless
compression.

Change-Id: Id0c5174d797ffabdb09905c2ba76e60601b686f8
(cherry picked from commit 742110ccce)
2013-06-11 15:00:46 -07:00
8112c8cf54 remove some warnings:
* "declaration of ‘index’ shadows a global declaration [-Wshadow]"
* "signed and unsigned type in conditional expression [-Wsign-compare]"

Change-Id: I891182d919b18b6c84048486e0385027bd93b57d
(cherry picked from commit 87a4fca25f)
2013-06-11 15:00:46 -07:00
cc128e0bfc Further reduce memory to decode lossy+alpha images
Earlier such images were using roughly 9 * width * height bytes for
decoding. Now, they take 6 * width * height memory.

Change-Id: Ie4a681ca5074d96d64f30b2597fafdca648dd8f7
(cherry picked from commit 64c844863a)
2013-06-11 15:00:46 -07:00
07db70d20f fix for big-endian
(Issue #150: https://code.google.com/p/webp/issues/detail?id=150)

Change-Id: Iad46d375a8c5eabae37cde8f55b3e7448601f264
(cherry picked from commit 4437061735)
2013-06-11 15:00:46 -07:00
eda8a7dec5 gif2webp: Fix signed/unsigned comparison mismatch
Change-Id: I355f0614424276550db71b24e5bb1948e5c6894c
(cherry picked from commit 043e1ae4bd)
2013-06-11 15:00:46 -07:00
31f346fe0c Makefile.vc: fix libwebpdemux dll variable typo
Fixes issue #149

Patch by: Jason Stevens  (cypher497 at gmail dot com)

Change-Id: I65cceaad37d22b96e5e92cb78f859fc0b7c38b67
(cherry picked from commit 3eeedae1bc)
2013-06-11 15:00:45 -07:00
6c76d28e4b swig: add python (decode) support
similar to Java, simple interface only

Change-Id: I8a3d344e5d89f73627e4e0cb2067512260d46fdd
(cherry picked from commit f980faf417)
2013-06-11 15:00:45 -07:00
b4f5bb6ca3 swig: cosmetics
normalize formatting
- update decode prototypes
- match project function name style

Change-Id: Ib481b5602171b72dbb1a5d462e6d5166e9b8566e
(cherry picked from commit 7f5f42bb36)
2013-06-11 15:00:45 -07:00
498d4dd634 WebP-Lossless encoding improvements.
Lossy (with Alpha) image compression gets 2.3X speedup.
Compressing lossless images is 20%-40% faster now.

Change-Id: I41f0225838b48ae5c60b1effd1b0de72fecb3ae6
(cherry picked from commit 8eae188a62)
2013-06-11 15:00:45 -07:00
26e7244221 swig: ifdef some Java specific code
no implementation change

Change-Id: I077c707e1f6293188e6fa11ba24757009a709f77
(cherry picked from commit c7247c4c68)
2013-06-11 15:00:45 -07:00
8ecec68652 configure: add warning related flags
adds TEST_AND_ADD_CFLAGS function
uses AM_CFLAGS to allow CFLAGS override

Change-Id: I9352aec6e5d905a41d832bf5ad0c8dcd154f7e97
(cherry picked from commit bba4c2b2a6)
2013-06-11 15:00:44 -07:00
e676b04309 configure: add GLUT detection; build vwebp
Change-Id: I7f0964db2d04c22ff9ec274e8cd1cbed7379a165
(cherry picked from commit 0e513f7ae3)
2013-06-11 15:00:44 -07:00
b0ffc43700 Alpha decoding: significantly reduce memory usage
Simply get rid of an intermediate buffer of size width x height, by
using the fact that stride == width in this case.

Change-Id: I92376a2561a3beb6e723e8bcf7340c7f348e02c2
(cherry picked from commit edccd19436)
2013-06-11 15:00:44 -07:00
20aa7a8dd5 configure: add --enable-everything
Change-Id: Ie1b3abd42459de7f789fe985759c465c2a196727
(cherry picked from commit 3cafcc9a8d)
2013-06-11 15:00:44 -07:00
b8307cc08b configure.ac: add some helper macros
library check related variable maintenance -> *_INCLUDES / *_LIBS

CLEAR_LIBVARS / LIBCHECK_PROLOGUE / LIBCHECK_EPILOGUE

Change-Id: I72e292dc1f69b02f69a26639308f247db0471e2b
(cherry picked from commit 4ef1447792)
2013-06-11 15:00:44 -07:00
980e7ae951 Remove the gcc compilation comments
They went out of sync some time ago, and are
no longer really required since we have them
buildable from makefile.unix

Change-Id: Ica2dcf5c55f44365598f832f55204d123d7aa601
(cherry picked from commit a4e1cdbbe8)
2013-06-11 15:00:44 -07:00
7f25ff99fd gif2webp: Fix ICC and XMP support
Change-Id: Ib5aafef388bd191610e4cc2f8180f35cd454f1d3
(cherry picked from commit b26e5ad540)
2013-06-11 15:00:43 -07:00
d8e5321144 Add missing name to AUTHORS
Change-Id: I00092e5bb676b48abc05b94080b589b48c911c82
(cherry picked from commit 46089b207d)
2013-06-11 15:00:43 -07:00
11edf5e24b Demux: Fix a potential memleak
Change-Id: Ic0dcac010da088b791c130be4abacdd8c31e92cf
(cherry picked from commit 94328d6457)
2013-06-11 15:00:43 -07:00
c7b92184df don't forward declare enums
doing so is not part of ISO C; removes some pedantic warnings

Change-Id: I739ad8c5cacc133e2546e9f45c0db9d92fb93d7e
(cherry picked from commit 96e948d7b0)
2013-06-11 15:00:43 -07:00
7a650c6ad6 prevent signed int overflow in left shift ops
force unsigned when shifting by 24.

Change-Id: Ie229d252e2e4078107cd705b09397e686a321ffd
(cherry picked from commit f4f90880a8)
2013-06-11 15:00:43 -07:00
31bea32408 add precision about dynamic output reallocation with IDecoder
The output surface CAN be changed inbetween calls to
WebPIUpdate() or WebPIAppend(), but with precautions.

Change-Id: I899afbd95738a6a8e0e7000f8daef3e74c99ddd8
(cherry picked from commit ff885bfe1f)
2013-06-11 15:00:43 -07:00
c22877f70f Add incremental support for extended format files
This applies to images with optional chunks (e.g. images with ALPH
chunk,
ICCP chunk etc). Before this, the incremental decoding used to work like
non-incremental decoding for such files, that is, no rows were decoded
until
all data was available.

The change is in 2 parts:
- During optional chunk parsing, don't wait for the full VP8/VP8L chunk.
- Remap 'alpha_data' pointer whenever a new buffer is allocated/used in
WebPIAppend() and WebPIUpdate().

Change-Id: I6cfd6ca1f334b9c6610fcbf662cd85fa494f2a91
(cherry picked from commit ead4d47859)
2013-06-11 15:00:42 -07:00
5051245f3a Makefile.vc: have 'all' target build everything
default is still the core examples as makefile.unix

Change-Id: Ica3fe6123f4359aefa130b39d2b0739b65e34c0b
(cherry picked from commit 69d0f92658)
2013-06-11 15:00:42 -07:00
8191decae9 Makefile.vc: flags cleanup
- drop some unnecessary link flags
- use lib.exe directly for creating libraries
- factorize /nologo and use it consistently

Change-Id: Ie76119bc051e9bc53e4d6bba1a0a3f124f9062fc
(cherry picked from commit 52967498b3)
2013-06-11 15:00:42 -07:00
b9d747351a Makefile.vc: drop /FD flag
breaks under wine; from MSDN:
/FD is only used by the development environment, and it should not be
used from the command line or a build script.

Change-Id: I180c9813e721b163cc645b9b7f14fe36556019d3
(cherry picked from commit c61baf0c10)
2013-06-11 15:00:42 -07:00
5568dbcfe6 update gitignore
*.a, new examples and new automake-1.12 file (ar-lib)

Change-Id: I28d7bc59a2977a7c5959940936e3d13a71dd149c
(cherry picked from commit 3a15125d2f)
2013-06-11 15:00:42 -07:00
f4c7b6547b WebPEncode: An additional check.
Start VP8EncLoop/VP8EncTokenLoop only if VP8EncStartAlpha succeeded.

Change-Id: Id1faca3e6def88102329ae2b4974bd4d6d4c4a7a
(cherry picked from commit 67708d6701)
2013-06-11 15:00:42 -07:00
1fb04bec99 pngdec: Avoid a double-free.
Earlier, at line#275, if ok == 0, it would have triggered a double free
of 'rgb'.

Change-Id: Iaee1f35824a66f6e4b488e523416f73b87c5ec30
(cherry picked from commit b68912af2c)
2013-06-11 15:00:42 -07:00
dcbb1ca54a add WebPBlendAlpha() function to blend colors against background
new option: -blend_alpha 0xrrggbb
also: don't force picture.use_argb value for lossless. Instead,
delay the YUVA<->ARGB conversion till WebPEncode() is called.
This make the blending more accurate when source is ARGB
and lossy compression is used (YUVA).
This has an effect on cropping/rescaling. E.g. for PNG, these
are now done in ARGB colorspace instead of YUV when lossy compression
is used.

Change-Id: I18571f1b1179881737a8dbd23ad0aa8cddae3c6b
(cherry picked from commit e7d9548c9b)
2013-06-11 15:00:41 -07:00
bc9f5fbe0f configure.ac: add AM_PROG_AR for automake >= 1.12
fixes:
automake-1.12/am/ltlibrary.am: warning: 'libwebp.la': linking libtool libraries using a non-POSIX
automake-1.12/am/ltlibrary.am: archiver requires 'AM_PROG_AR' in 'configure.ac'

Change-Id: I223f93e5f075aaf23cfefceef55e2ab8eeb34ccd
(cherry picked from commit ed4dc71769)
2013-06-11 15:00:41 -07:00
bf867bf296 Tuned cross_color parameter (step) for lower qual
Tuned the cross_color transform parameter (step) for lower quality
levels. This change gives speedup of 20% at lower qualities (25) and 10% at
moderate quality level (50) with a loss of 0.25% in compression density.
Also removed TODO for cross_color transform. Observed good correlation of
this with the predict transform.

Change-Id: I8a1044e9f24e6a5f84295c030fd444d0eec7d154
2013-06-11 12:15:07 -07:00
90e2ec5a1b Merge "probe input file and quick-check for WebP format." 2013-06-10 12:29:58 -07:00
7180d7ffbd Merge "update copyright text" 2013-06-10 12:23:44 -07:00
830f72b7e9 probe input file and quick-check for WebP format.
Error message is clearer that 'can't create demux object'.

Change-Id: Iec008601892f7cd8399e1948751747ac23305eef
2013-06-10 05:46:22 -07:00
2ccf58d648 configure: improve gl/glut library test
add a check for a libGL function (glOrtho) in addition to glutMainLoop
when establishing the need for libGL at link time.

fixes vwebp link failure on ubuntu 13.04+

Change-Id: I537e9a5cab5cf4cd8875e06268d2107f377e625e
2013-06-07 20:15:53 -07:00
d640614d54 update copyright text
rather than symlink the webm/vpx terms, use the same header as libvpx to
reference in-tree files

based on the discussion in:
https://codereview.chromium.org/12771026/

Change-Id: Ia3067ecddefaa7ee01550136e00f7b3f086d4af4
2013-06-06 23:09:14 -07:00
c2113ad4f9 Merge "configure: remove use of AS_VAR_APPEND" 2013-06-06 00:23:29 -07:00
9326a56f8d configure: remove use of AS_VAR_APPEND
This wasn't used often and benefits were likely minimal. Dropping it
outright is a bit simpler than adding a compatibility ifdef.

provides some compatibility with older versions of autoconf.
tested with autoconf 2.59/automake 1.7/aclocal 1.7

Change-Id: Ifed892346cf2329597985704830a96fc58d65607
2013-06-06 00:14:02 -07:00
ea63d61937 fix a type warning on VS9 x86
"warning C4244: 'function' : conversion from 'uint64_t' to 'size_t', possible loss of data"

Change-Id: Ibd9f6a24993518d658d08127d616a17d7b99e0e4
2013-06-05 10:14:04 +02:00
bec11092ca fix EXIF parsing in PNG
'exiftool' puts an 'APP1' chunk for exif, e.g.:
https://metacpan.org/source/EXIFTOOL/Image-ExifTool-5.87/lib/Image/ExifTool/PNG.pm#L305

Change-Id: I313d3e6945898526b8a4baf3d9016a2591a1a817
2013-06-05 10:07:48 +02:00
b6e65f3d35 Merge "fix warnings for vs9 x64" 2013-06-03 22:27:03 -07:00
438946dcc6 fix warnings for vs9 x64
Change-Id: Id022d48a0a898a8bf7dce437b078da6c98afd75c
2013-06-01 22:48:08 -07:00
f4710e3b89 collect macroblock reconstruction data in VP8MBData struct
This is to better separate bitstream parsing from reconstruction.

Change-Id: I872b58e9940c4b14f72ebee50fba545468ff754c
2013-05-31 22:38:12 +02:00
23d28e216d add doc precision for WebPPictureCopy() and WebPPictureView()
output picture object is overwritten, not free'd or destroyed.

Change-Id: Ibb47ab444063e7ad90ff3d296260807ffe7ddbf9
2013-05-31 10:41:19 +02:00
518f2cd738 cosmetics: gif2webp: fix indent
Change-Id: I9b7aaefb33b4101bcb572577ce17fb3953599c2b
2013-05-29 16:00:05 -07:00
af358e68ed Merge "remove datatype qualifier for vmnv" 2013-05-23 06:12:06 -07:00
3fe91635df remove datatype qualifier for vmnv
this fix is for clang (LLVM v4.2). gcc was fine.

Change-Id: Id4076cda84813f6f9548a01775b094cff22b4be9
2013-05-23 13:52:24 +02:00
764fdffaac fix a memory leak in gif2webp
(rgba->yuv allocates memory)
Also fixed few warning and cleaned the code up.

Change-Id: Id904ad3ad8802ea9fc3d34247d27193dfa7b0b99
2013-05-22 23:49:24 +02:00
3e59a74d72 fix two minor memory leaks in webpmux
(only occur in case of error)

Change-Id: Icab69bb364b77f8eae6cae91047354c27e610602
2013-05-22 00:58:53 +02:00
47b9862f30 Merge "README: update swig notes" 2013-05-21 15:09:30 -07:00
325d15ff30 remove some cruft from swig/libwebp.jar
picked up a few unnecessary classes from a dirty tree in the last commit

Change-Id: I98be16a0bc8716476ce440da542d113f254aee78
2013-05-21 15:01:15 -07:00
4a7627c215 README: update swig notes
add python, required version notes

Change-Id: Iec2e94075f6cf54455ce5a658f9b7258109f4d01
2013-05-21 14:58:32 -07:00
5da81e3383 Merge "swig/python: add minimal documentation" 2013-05-21 14:49:46 -07:00
f39e08f2e3 Merge "swig: add python encode support" 2013-05-21 14:48:36 -07:00
6ca4a3e385 Merge "swig/java: reduce wrapper function code duplication" 2013-05-21 14:44:16 -07:00
8f8702b034 Merge "swig/java: rework uint8_t typemap" 2013-05-21 14:42:58 -07:00
91413be2f9 reduce memory for VP8MB and remove bitfields use
~1% faster on ARMv7

Change-Id: I3e3524e0c25ebd31a04269aae0d304932f1a781a
2013-05-21 23:20:04 +02:00
7413394e7f Fix the memory leak in ApplyFilters.
Change-Id: Iba1b1adf3088ea9c43e4f602a93e77450f6c6170
2013-05-21 14:00:26 -07:00
2053c2cff2 simplify the alpha-filter testing loop
Change-Id: Iacebae749c37edc87a3c94c76cd589a2565ee642
2013-05-21 10:29:27 +02:00
825b64db53 swig/python: add minimal documentation
uses autodoc to display the function arguments rather than the
inscrutable va_args (*args).

Change-Id: Iec2ff8276c1533b14c3032836d822fbdae632521
2013-05-20 17:14:51 -07:00
14677e11d4 swig: add python encode support
wraps the simple interface similar to java.

Change-Id: Ib922bbcae322b2345b6dce5dee08faad705a77fd
2013-05-20 17:14:50 -07:00
a5c297c842 swig/java: reduce wrapper function code duplication
define a macro to emit the wrapper code

Change-Id: I672416016162d6d9ce6f455d224044e0837e3ace
2013-05-20 17:13:27 -07:00
ad4a367dba swig/java: rework uint8_t typemap
reuse the declarations from arrays_java.i for signed char to make an
explicit uint8_t mapping. this avoids sign conversion build warnings.

Change-Id: Icfb5b865cf1fd404e89f2cd889111f0a94e3c604
2013-05-20 13:16:14 -07:00
0d25876bad use uint8_t for inv_palette[]
Change-Id: I5005ce68d89bfb657d46ad8acc4368c29fa0c4fd
2013-05-18 17:13:58 +02:00
afa3450c11 Fix the bug in ApplyPalette.
The auto-infer logic of detecting the 'Alpha' use case
(via check '(palette[i] & 0x00ff00ffu) != 0' is failing
for this corner case image with all black pixels (rgb = 0)
and different Alpha values.

-> switch generic use-LUT detection

Change-Id: I982a8b28c8bcc43e3dc68ac358f978a4bcc14c36
2013-05-18 17:03:18 +02:00
2d6ac422cf Merge "webp/lossless: fix big endian BGRA output" 2013-05-17 00:36:15 -07:00
2ca83968ae webp/lossless: fix big endian BGRA output
Change-Id: I3d4b3d21f561cb526dbe7697a31ea847d3e8b2c1
2013-05-17 00:32:01 -07:00
742110ccce Speed up ApplyPalette for ARGB pixels.
Added 1 pixel cache for palette colors for faster lookup.
This will speedup images that require ApplyPalette by 6.5% for lossless
compression.

Change-Id: Id0c5174d797ffabdb09905c2ba76e60601b686f8
2013-05-16 15:44:21 -07:00
2451e47dca misc code cleanup
* remove dec->skip_
* fix some naming

(no speed diff observed)

Change-Id: I12545ef79d29dd6f893c344d8fb171b0a8c7cc46
2013-05-15 20:03:15 +02:00
83db404390 Merge "swig: add python (decode) support" 2013-05-15 00:42:36 -07:00
eeeea8b530 Merge "swig: cosmetics" 2013-05-15 00:42:08 -07:00
d5f9b8f383 Merge "libwebp: fix vp8 encoder mem alloc offsetting" 2013-05-15 00:41:14 -07:00
d8edd83551 libwebp: fix vp8 encoder mem alloc offsetting
'mem' was being offset once by DO_ALIGN() then shifted 'nz_size' which
would end up accounting for more than ALIGN_CST and exceed the allocation.

broken since:
  9bf3129 align VP8Encoder::nz_ allocation

Change-Id: I04a4e0bbf80d909253ce057f8550ed98e0cf1054
2013-05-15 00:31:23 -07:00
8983b83ee1 remove use of bit-fields in VP8FInfo
(in favor of just plain uint8_t's)

Change-Id: I6187587a4d8a9f5c304a132d98ec42ce24fd244a
2013-05-15 09:21:30 +02:00
87a4fca25f remove some warnings:
* "declaration of ‘index’ shadows a global declaration [-Wshadow]"
* "signed and unsigned type in conditional expression [-Wsign-compare]"

Change-Id: I891182d919b18b6c84048486e0385027bd93b57d
2013-05-14 22:28:32 +02:00
ba8f74e229 Merge "fix for big-endian" 2013-05-14 01:58:06 -07:00
a65067fa77 Merge "Further reduce memory to decode lossy+alpha images" 2013-05-14 01:56:54 -07:00
64c844863a Further reduce memory to decode lossy+alpha images
Earlier such images were using roughly 9 * width * height bytes for
decoding. Now, they take 6 * width * height memory.

Change-Id: Ie4a681ca5074d96d64f30b2597fafdca648dd8f7
2013-05-13 16:24:49 -07:00
332130b9b3 Mux: make a few methods static
Change-Id: I8a8b0b403116c89933e84c93502a8230026f819e
2013-05-13 13:26:33 -07:00
4437061735 fix for big-endian
(Issue #150: https://code.google.com/p/webp/issues/detail?id=150)

Change-Id: Iad46d375a8c5eabae37cde8f55b3e7448601f264
2013-05-13 10:19:29 -07:00
5199eab516 Merge "add uncompressed TIFF output support" 2013-05-13 08:59:48 -07:00
a3aede9739 add uncompressed TIFF output support
new option: 'dwebp -tiff ...'
This is a very simple uncompressed-tiff writing method.

Change-Id: Ie2182c8498bce570de3cde363abe1099e18596cb
2013-05-11 01:46:24 -07:00
f975b67f66 Merge "gif2webp: Fix signed/unsigned comparison mismatch" 2013-05-11 00:53:20 -07:00
5fbc734b90 Merge "GetFeatures: Detect invalid VP8X/VP8/VP8L data" 2013-05-11 00:51:39 -07:00
d5060c873c Merge "mux.h: A comment fix + some consistency fixes" 2013-05-11 00:41:35 -07:00
352d0dee99 GetFeatures: Detect invalid VP8X/VP8/VP8L data
This facilitates early error detection during decode/render.
Also, related refactoring.

Change-Id: Ia6c7cd91dec202a2a68dae2118f5981cf1eaa83d
2013-05-10 14:27:11 -07:00
3ef79fefec Cosmetic: "width * height"
Change-Id: I567c0d95355160a9f6721f949b38e2b8b6270b7a
2013-05-10 13:39:58 -07:00
043e1ae4bd gif2webp: Fix signed/unsigned comparison mismatch
Change-Id: I355f0614424276550db71b24e5bb1948e5c6894c
2013-05-10 13:37:20 -07:00
5818cff770 mux.h: A comment fix + some consistency fixes
Change-Id: I0aee1090322bac3ae3dabf9a48661cbb6de3ca52
2013-05-10 13:35:16 -07:00
1153f888c9 Merge "swig: ifdef some Java specific code" 2013-05-10 02:00:02 -07:00
3eeedae1bc Makefile.vc: fix libwebpdemux dll variable typo
Fixes issue #149

Patch by: Jason Stevens  (cypher497 at gmail dot com)

Change-Id: I65cceaad37d22b96e5e92cb78f859fc0b7c38b67
2013-05-09 23:49:39 -07:00
f980faf417 swig: add python (decode) support
similar to Java, simple interface only

Change-Id: I8a3d344e5d89f73627e4e0cb2067512260d46fdd
2013-05-09 23:29:35 -07:00
7f5f42bb36 swig: cosmetics
normalize formatting
- update decode prototypes
- match project function name style

Change-Id: Ib481b5602171b72dbb1a5d462e6d5166e9b8566e
2013-05-08 18:00:30 -07:00
8eae188a62 WebP-Lossless encoding improvements.
Lossy (with Alpha) image compression gets 2.3X speedup.
Compressing lossless images is 20%-40% faster now.

Change-Id: I41f0225838b48ae5c60b1effd1b0de72fecb3ae6
2013-05-08 17:22:11 -07:00
c7247c4c68 swig: ifdef some Java specific code
no implementation change

Change-Id: I077c707e1f6293188e6fa11ba24757009a709f77
2013-05-08 17:13:40 -07:00
4cb234d5b5 Merge "Mux: make ValidateForSingleImage() method static" 2013-05-08 01:54:28 -07:00
ed6f53086b Merge "Add GetCanvasSize() method to mux" 2013-05-08 01:53:22 -07:00
1d530c9a7e Mux: make ValidateForSingleImage() method static
Change-Id: I96ac5e3be26b8e8ecd9f055501a5feb7710bc324
2013-05-07 12:57:51 -07:00
bba4c2b2a6 configure: add warning related flags
adds TEST_AND_ADD_CFLAGS function
uses AM_CFLAGS to allow CFLAGS override

Change-Id: I9352aec6e5d905a41d832bf5ad0c8dcd154f7e97
2013-05-07 12:48:12 -07:00
fffefd18c3 Add GetCanvasSize() method to mux
Change-Id: If910f5024f4c301a92e6c2e8ee9c315a103c5df7
2013-05-07 12:47:48 -07:00
732da8d0f0 Merge "configure: add GLUT detection; build vwebp" 2013-05-07 12:15:50 -07:00
0e513f7ae3 configure: add GLUT detection; build vwebp
Change-Id: I7f0964db2d04c22ff9ec274e8cd1cbed7379a165
2013-05-07 11:42:15 -07:00
55d1c150ea Merge "Alpha decoding: significantly reduce memory usage" 2013-05-07 09:14:56 -07:00
13d99fb58b Merge "configure: add --enable-everything" 2013-05-07 08:50:57 -07:00
2bf698fe49 Merge "configure.ac: add some helper macros" 2013-05-07 08:49:21 -07:00
edccd19436 Alpha decoding: significantly reduce memory usage
Simply get rid of an intermediate buffer of size width x height, by
using the fact that stride == width in this case.

Change-Id: I92376a2561a3beb6e723e8bcf7340c7f348e02c2
2013-05-02 18:24:46 -07:00
3cafcc9a8d configure: add --enable-everything
Change-Id: Ie1b3abd42459de7f789fe985759c465c2a196727
2013-05-01 18:25:00 -07:00
4ef1447792 configure.ac: add some helper macros
library check related variable maintenance -> *_INCLUDES / *_LIBS

CLEAR_LIBVARS / LIBCHECK_PROLOGUE / LIBCHECK_EPILOGUE

Change-Id: I72e292dc1f69b02f69a26639308f247db0471e2b
2013-05-01 18:24:49 -07:00
a4e1cdbbe8 Remove the gcc compilation comments
They went out of sync some time ago, and are
no longer really required since we have them
buildable from makefile.unix

Change-Id: Ica2dcf5c55f44365598f832f55204d123d7aa601
2013-05-01 15:01:37 -07:00
6393fe4b7c Cosmetic fixes
Change-Id: I7d5a337077ba1d0406769190fef9b47146ed8b24
2013-04-26 15:55:42 -07:00
9c4ce971a8 Simplify forward-WHT + SSE2 version
no precision loss observed
speed is not really faster (0.5% at max), as forward-WHT isn't called often.

also: replaced a "int << 3" (undefined by C-spec) by a "int * 8"
( supersedes https://gerrit.chromium.org/gerrit/#/c/48739/ )

Change-Id: I2d980ec2f20f4ff6be5636105ff4f1c70ffde401
2013-04-26 08:57:18 +02:00
878b9da5bf fix missed optim
it's not often the case, but could happen, that chroma has non-zero
coeff but luma hasn't. In such case, we should skip luma right away

Change-Id: I9515573ffaec8aad8b069d2c02ffbda4a6eff97c
2013-04-25 22:46:12 +02:00
00046171a5 VP8GetInfo(): Check for zero width or height.
Change-Id: I0bf40621ed0776e1a185ad8abab5a914a3d29d69
2013-04-25 12:08:40 -07:00
9bf312938f align VP8Encoder::nz_ allocation
prevents unaligned uint32_t load/store

Change-Id: I3f5e1b434a7452f618009d5e4bbe4f3260e3e321
2013-04-25 02:55:39 -07:00
5da165cfad fix CheckMode() signature
should have been 'const VP8Decoder* const dec', but actually
we just need to pass mb_x_ and mb_y_

Change-Id: I21ca0d67ab33302d6eaa45698d53ed6c2de76981
2013-04-24 14:33:44 +02:00
0ece07dcb1 Merge "explicitly pad bitfields to 32-bits" 2013-04-22 13:13:29 -07:00
9dbc9d1909 explicitly pad bitfields to 32-bits
suggested by fbarchard@chromium; may affect alignment in some cases

Change-Id: I63552eb1a0f9e81754d30ce6e9b1cfe5748bdbc9
2013-04-22 13:00:28 -07:00
5369a80fd4 Merge "prevent signed int overflow in left shift ops" 2013-04-19 00:13:16 -07:00
70e3971260 Merge "cosmetics: remove unnecessary ';'s" 2013-04-19 00:11:10 -07:00
d3136ce27d Merge "don't forward declare enums" 2013-04-19 00:10:39 -07:00
b26e5ad540 gif2webp: Fix ICC and XMP support
Change-Id: Ib5aafef388bd191610e4cc2f8180f35cd454f1d3
2013-04-18 14:19:25 -07:00
46089b207d Add missing name to AUTHORS
Change-Id: I00092e5bb676b48abc05b94080b589b48c911c82
2013-04-18 11:29:36 +02:00
94328d6457 Demux: Fix a potential memleak
Change-Id: Ic0dcac010da088b791c130be4abacdd8c31e92cf
2013-04-16 14:13:45 -07:00
96e948d7b0 don't forward declare enums
doing so is not part of ISO C; removes some pedantic warnings

Change-Id: I739ad8c5cacc133e2546e9f45c0db9d92fb93d7e
2013-04-13 11:08:42 -07:00
f4f90880a8 prevent signed int overflow in left shift ops
force unsigned when shifting by 24.

Change-Id: Ie229d252e2e4078107cd705b09397e686a321ffd
2013-04-13 10:57:31 -07:00
0261545e0b cosmetics: remove unnecessary ';'s
Change-Id: I5fefd9a5b2fe3795c2b5d785c30335b85bac0b43
2013-04-13 10:49:35 -07:00
7ebdf110af Merge "Fix few missing comparisons to NULL" 2013-04-13 10:42:51 -07:00
1579989e7b Fix few missing comparisons to NULL
Change-Id: I0d2ff8e8b507d17e80669b2b59fd5b017af995ed
2013-04-13 10:36:33 -07:00
ea1b21cfdb Cleaned up VP8GetHeaders() so that it parses only frame header
Removed a call to WebPParseHeaders() inside VP8GetHeaders(). This was not needed
anyway, as all call flows already call WebPParseHeaders() before calling
VP8GetHeaders().

This avoids duplicate calls to WebPParseHeaders().

Change-Id: Icb2d618bd26c44220d956c17a69c9c45a62d5237
2013-04-11 11:35:18 -07:00
b66caee410 dwebp: add support for BMP output
Supports alpha output too.

Change-Id: I42ea0bf7b6bbb420f0e537d372a3b13fe00528a9
2013-04-09 18:52:34 -07:00
ff885bfe1f add precision about dynamic output reallocation with IDecoder
The output surface CAN be changed inbetween calls to
WebPIUpdate() or WebPIAppend(), but with precautions.

Change-Id: I899afbd95738a6a8e0e7000f8daef3e74c99ddd8
2013-04-09 18:05:11 -07:00
79241d5a0d Merge "Makefile.vc: have 'all' target build everything" 2013-04-09 14:02:22 -07:00
ac1c729bd8 Merge "Makefile.vc: flags cleanup" 2013-04-08 17:21:24 -07:00
118a055c3d Merge "Makefile.vc: drop /FD flag" 2013-04-08 17:20:32 -07:00
ecad0109d5 Merge "update gitignore" 2013-04-08 17:20:02 -07:00
a681b4f4e5 Rename PRE_VP8 state to WEBP_HEADER
Also, rename state VP8_FRAME_HEADER to VP8_HEADER (to be consistent with
VP8L_HEADER).

Change-Id: Ief2d2f483e36d37f00d8d0db87026ad059f27327
2013-04-08 14:46:05 -07:00
ead4d47859 Add incremental support for extended format files
This applies to images with optional chunks (e.g. images with ALPH
chunk,
ICCP chunk etc). Before this, the incremental decoding used to work like
non-incremental decoding for such files, that is, no rows were decoded
until
all data was available.

The change is in 2 parts:
- During optional chunk parsing, don't wait for the full VP8/VP8L chunk.
- Remap 'alpha_data' pointer whenever a new buffer is allocated/used in
WebPIAppend() and WebPIUpdate().

Change-Id: I6cfd6ca1f334b9c6610fcbf662cd85fa494f2a91
2013-04-08 14:29:14 -07:00
69d0f92658 Makefile.vc: have 'all' target build everything
default is still the core examples as makefile.unix

Change-Id: Ica3fe6123f4359aefa130b39d2b0739b65e34c0b
2013-04-05 19:21:01 -07:00
52967498b3 Makefile.vc: flags cleanup
- drop some unnecessary link flags
- use lib.exe directly for creating libraries
- factorize /nologo and use it consistently

Change-Id: Ie76119bc051e9bc53e4d6bba1a0a3f124f9062fc
2013-04-05 18:47:18 -07:00
c61baf0c10 Makefile.vc: drop /FD flag
breaks under wine; from MSDN:
/FD is only used by the development environment, and it should not be
used from the command line or a build script.

Change-Id: I180c9813e721b163cc645b9b7f14fe36556019d3
2013-04-05 14:54:05 -07:00
3a15125d2f update gitignore
*.a, new examples and new automake-1.12 file (ar-lib)

Change-Id: I28d7bc59a2977a7c5959940936e3d13a71dd149c
2013-04-05 14:51:02 -07:00
5167ca4759 Merge "WebPEncode: An additional check. Start VP8EncLoop/VP8EncTokenLoop only if VP8EncStartAlpha succeeded." 2013-04-05 12:00:46 -07:00
67708d6701 WebPEncode: An additional check.
Start VP8EncLoop/VP8EncTokenLoop only if VP8EncStartAlpha succeeded.

Change-Id: Id1faca3e6def88102329ae2b4974bd4d6d4c4a7a
2013-04-05 11:33:44 -07:00
b68912af2c pngdec: Avoid a double-free.
Earlier, at line#275, if ok == 0, it would have triggered a double free
of 'rgb'.

Change-Id: Iaee1f35824a66f6e4b488e523416f73b87c5ec30
2013-04-05 11:24:59 -07:00
82abbe12fb Merge "configure.ac: add AM_PROG_AR for automake >= 1.12" 2013-04-04 20:09:00 -07:00
e7d9548c9b add WebPBlendAlpha() function to blend colors against background
new option: -blend_alpha 0xrrggbb
also: don't force picture.use_argb value for lossless. Instead,
delay the YUVA<->ARGB conversion till WebPEncode() is called.
This make the blending more accurate when source is ARGB
and lossy compression is used (YUVA).
This has an effect on cropping/rescaling. E.g. for PNG, these
are now done in ARGB colorspace instead of YUV when lossy compression
is used.

Change-Id: I18571f1b1179881737a8dbd23ad0aa8cddae3c6b
2013-04-02 19:14:30 -07:00
ed4dc71769 configure.ac: add AM_PROG_AR for automake >= 1.12
fixes:
automake-1.12/am/ltlibrary.am: warning: 'libwebp.la': linking libtool libraries using a non-POSIX
automake-1.12/am/ltlibrary.am: archiver requires 'AM_PROG_AR' in 'configure.ac'

Change-Id: I223f93e5f075aaf23cfefceef55e2ab8eeb34ccd
2013-04-02 18:45:02 -07:00
df4a406d8d Merge branch '0.3.0'
* 0.3.0: (65 commits)
  Update ChangeLog
  Cosmetic fixes
  misc style fix
  add missing YUVA->ARGB automatic conversion in WebPEncode()
  Container spec: Clarify frame disposal
  container doc: add a note about the 'ANMF' payload
  Container spec: clarify the background color field
  container doc: move RIFF description to own section
  libwebp/mux: fix double free
  use WebPDataCopy() instead of re-coding it.
  demux: keep a frame tail pointer; used in AddFrame
  add doc precision about WebPParseHeaders() return codes
  gif2webp: Bgcolor fix for a special case
  fix bad saturation order in QuantizeBlock
  vwebp/animation: fix background dispose
  Makefile.vc: fix dynamic builds
  update ChangeLog
  examples: don't use C99 %zu
  update ChangeLog
  update NEWS
  ...

Conflicts:
	src/webp/format_constants.h

Change-Id: Ie659644d3ea5592cde64ec3af90a00cd17838247
2013-04-01 19:11:52 -07:00
e9a7990bc5 Cosmetic fixes
Change-Id: Ia878115086edc3fdfee3f0ca76e5e74ea5906f21
2013-03-29 14:21:56 -07:00
2aaa423b20 Merge "add missing YUVA->ARGB automatic conversion in WebPEncode()" 2013-03-29 11:39:04 -07:00
07d87bda1b add missing YUVA->ARGB automatic conversion in WebPEncode()
user can now call WebPEncode() with any YUVA or ARGB format, for
lossy or lossless compression

also: simplified error reporting, which is done in WebPPictureARGBToYUVA()
and WebPPictureYUVAToARGB()

Change-Id: Ifb68909217175bcf5a050e5c68d06de9849468f7
2013-03-29 04:31:37 -07:00
142c46291e misc style fix
Change-Id: Ib764cb09bd78ab6e72c60f495d55b752ad4dbe4d
2013-03-29 03:13:43 -07:00
145 changed files with 14761 additions and 4259 deletions

7
.gitignore vendored
View File

@ -1,8 +1,9 @@
*.l[ao]
*.o
*.[ao]
.deps
.libs
/aclocal.m4
/ar-lib
/autom4te.cache
/compile
/config.*
@ -16,7 +17,9 @@
/stamp-h1
Makefile
Makefile.in
examples/[cd]webp
examples/[cdv]webp
examples/gif2webp
examples/webpmux
/output
/doc/output
*.idb

View File

@ -1,6 +1,7 @@
<johann.koenig@duck.com> <johannkoenig@google.com>
Mikołaj Zalewski <mikolajz@google.com>
Pascal Massimino <pascal.massimino@gmail.com>
<pascal.massimino@gmail.com> <skal@google.com>
Vikas Arora <vikasa@google.com>
<vikasa@google.com> <vikasa@gmail.com>
<vikasa@google.com> <vikaas.arora@gmail.com>

View File

@ -1,4 +1,6 @@
Contributors:
- Charles Munger (clm at google dot com)
- Christian Duvivier (cduvivier at google dot com)
- James Zern (jzern at google dot com)
- Jan Engelhardt (jengelh at medozas dot de)
- Johann (johann dot koenig at duck dot com)

View File

@ -1,6 +1,14 @@
LOCAL_PATH:= $(call my-dir)
LOCAL_PATH := $(call my-dir)
WEBP_CFLAGS := -Wall -DANDROID -DHAVE_MALLOC_H -DHAVE_PTHREAD -DWEBP_USE_THREAD
ifeq ($(APP_OPTIM),release)
WEBP_CFLAGS += -finline-functions -frename-registers -ffast-math -s \
-ffunction-sections -fdata-sections
endif
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
src/dec/alpha.c \
src/dec/buffer.c \
@ -39,6 +47,7 @@ LOCAL_SRC_FILES := \
src/enc/tree.c \
src/enc/vp8l.c \
src/enc/webpenc.c \
src/utils/alpha_processing.c \
src/utils/bit_reader.c \
src/utils/bit_writer.c \
src/utils/color_cache.c \
@ -47,17 +56,17 @@ LOCAL_SRC_FILES := \
src/utils/huffman_encode.c \
src/utils/quant_levels.c \
src/utils/quant_levels_dec.c \
src/utils/random.c \
src/utils/rescaler.c \
src/utils/thread.c \
src/utils/utils.c \
LOCAL_CFLAGS := -Wall -DANDROID -DHAVE_MALLOC_H -DHAVE_PTHREAD \
-DWEBP_USE_THREAD \
-finline-functions -frename-registers -ffast-math \
-s -fomit-frame-pointer -Isrc/webp
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES += $(LOCAL_PATH)/src
# prefer arm over thumb mode for performance gains
LOCAL_ARM_MODE := arm
ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
# Setting LOCAL_ARM_NEON will enable -mfpu=neon which may cause illegal
# instructions to be generated for armv7a code. Instead target the neon code
@ -68,8 +77,22 @@ ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
endif
LOCAL_STATIC_LIBRARIES := cpufeatures
LOCAL_MODULE:= webp
LOCAL_MODULE := webp
include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
examples/dwebp.c \
examples/example_util.c \
LOCAL_CFLAGS := $(WEBP_CFLAGS)
LOCAL_C_INCLUDES := $(LOCAL_PATH)/src
LOCAL_STATIC_LIBRARIES := webp
LOCAL_MODULE := dwebp
include $(BUILD_EXECUTABLE)
$(call import-module,android/cpufeatures)

463
ChangeLog
View File

@ -1,7 +1,456 @@
256e433 update NEWS description with new general features
2962534 Merge "gif2webp: don't use C99 %zu" into 0.4.0
3b9f9dd gif2webp: don't use C99 %zu
b5b2e3c cwebp: fix metadata output w/lossy+alpha
ad26df1 makefile.unix: clean up libgif2webp_util.a
c3b4557 update Changelog
ca84112 Merge "bump version to 0.4.0" into 0.4.0
8c524db bump version to 0.4.0
eec2398 update AUTHORS & .mailmap
b9bbf6a update NEWS for 0.4.0
c72e081 Merge "dec/webp.c: don't wait for data before reporting w/h"
5ad6531 dec/frame.c: fix formatting
f7fc4bc dec/webp.c: don't wait for data before reporting w/h
66a32af Merge "NEON speed up"
26d842e NEON speed up
f307f98 Merge "webpmux: let -- stop parameter parsing"
fe051da Merge "README: add a section on gif2webp"
6fd2bd6 Merge "manpage pedantry"
4af1900 README: add a section on gif2webp
6f36ade manpage pedantry
f9016cb README: update dwebp options
b4fa0a4 webpmux: let -- stop parameter parsing
a9a20ac gif2webp: Add a multi-threaded encode option
495bef4 fix bug in TrellisQuantize
605a712 simplify __cplusplus ifdef
33109f9 Merge "drop: ifdef __cplusplus checks from C files"
7f9de0b Merge changes I994a5587,I8467bb71,I13b50688,I1e2c9c7b
5459030 gif2webp: let -- stop parameter parsing
a4b0aa0 vwebp: let -- stop parameter parsing
98af68f cwebp: let -- stop parameter parsing
a33831e dwebp: let -- stop parameter parsing
3630124 add some checks on error paths
ce4c713 Merge "autoconf: add --disable-wic"
5227d99 drop: ifdef __cplusplus checks from C files
f645355 dwebp.1: fix typo
f91034f Merge "cwebp: print metadata stats when no output file is given"
d493455 gif2webp: Backward compatibility for giflib version <= 4.1.3
4c617d3 gif2webp: Disable output of ICC profile by default
73b731f introduce a special quantization function for WHT
41c0cc4 Make Forward WHT transform use 32bit fixed-point calculation
a3359f5 Only compute quantization params once
7049043 cwebp: print metadata stats when no output file is given
d513bb6 * fix off-by-one zthresh calculation * remove the sharpening for non luma-AC coeffs * adjust the bias a little bit to compensate for this
ad9dec0 Merge "cosmetics: dwebp: fix local function name format"
f737f03 Merge "dwebp: remove a dead store"
3c3a70d Merge "makefile.unix: install binaries in $(DESTDIR)/bin/"
150b655 Merge "Android.mk: add some release compile flags"
dbebd33 cosmetics: dwebp: fix local function name format
2774995 dwebp: remove a dead store
a01e04f autoconf: add --disable-wic
5009b22 makefile.unix: install binaries in $(DESTDIR)/bin/
bab30fc Merge "fix -print_psnr / ssim options"
ebef7fb fix -print_psnr / ssim options
cb63785 Merge "fix bug due to overzealous check in WebPPictureYUVAToARGB()"
8189885 Merge "EstimateBestFilter: use an int to iterate WEBP_FILTER_TYPE"
4ad7d33 Android.mk: add some release compile flags
c12e236 cosmetics: fix a few typos
6f10403 fix bug due to overzealous check in WebPPictureYUVAToARGB()
3f6c35c EstimateBestFilter: use an int to iterate WEBP_FILTER_TYPE
cc55790 Merge changes I8bb7a4dc,I2c180051,I021a014f,I8a224a62
c536afb Merge "cosmetics: fix some typos"
cbdd3e6 add a -dither dithering option to the decoder
e812401 Updated iosbuild.sh for XCode 5.x
4931c32 cosmetics: fix some typos
05aacf7 mux: add some missing casts
617d934 enc/vp8l: add a missing cast
46db286 idec: add some missing casts
b524e33 ErrorStatusLossless: correct return type
cb261f7 fix a descaling bug for vertical/horizontal U/V interpolation
bcb3955 Merge changes I48968468,I181bc736
73f5213 gif2webp: Add a mixed compression mode
6198715 demux: split chunk parsing from ParseVP8X
d2e3f4e demux: add a tail pointer for chunks
87cffcc demux: cosmetics: s/has_frames/is_animation/
e18e667 demux: strictly enforce the animation flag
c4f39f4 demux: cosmetics: remove a useless break
61cb884 demux: (non-exp) fail if the fragmented flag is set
ff379db few % speedup of lossless encoding
df3649a remove all disabled code related to P-frames
6d0cb3d Merge "gif2webp: kmin = 0 should suppress key-frame addition."
3655598 gif2webp: kmin = 0 should suppress key-frame addition.
7708e60 Merge "detect flatness in blocks and favor DC prediction"
06b1503 Merge "add comment about the kLevelsFromDelta[][] LUT generation"
5935259 add comment about the kLevelsFromDelta[][] LUT generation
e3312ea detect flatness in blocks and favor DC prediction
ebc9b1e Merge "VPLBitReader bugfix: Catch error if bit_pos > LBITS too."
96ad0e0 VPLBitReader bugfix: Catch error if bit_pos > LBITS too.
a014e9c tune quantization biases toward higher precision
1e89861 add helpful PrintBlockInfo() function
596a6d7 make use of 'extern' consistent in function declarations
c8d48c6 Merge "extract random utils to their own file util/random.[ch]"
98aa33c extract random utils to their own file util/random.[ch]
432a723 Merge "swig: add basic go bindings"
fab618b Merge "rename libwebp.i -> libwebp.swig"
e4e7fcd swig: add basic go bindings
d340872 Merge "fast auto-determined filtering strength"
f8bfd5c fast auto-determined filtering strength
ac0bf95 small clean-up in ExpandMatrix()
1939607 rename libwebp.i -> libwebp.swig
43148b6 filtering: precompute ilimit and hev_threshold
18f992e simplify f_inner calculation a little
241d11f add missing const
86c0031 add a 'format' field to WebPBitstreamFeatures
dde91fd Demux: Correct the extended format validation
5d6c5bd add entry for '-resize' option in cwebp's man
7c098d1 Use some gamma-curve range compression when computing U/V average
0b2b050 Use deterministic random-dithering during RGB->YUV conversion
8a2fa09 Add a second multi-thread method
7d6f2da Merge "up to 20% faster multi-threaded decoding"
266f63e Merge "libwebp.jar: build w/Java 1.6 for Android compat"
0532149 up to 20% faster multi-threaded decoding
38efdc2 Simplify the gif2webp tool: move the optimization details to util
de89951 libwebp.jar: build w/Java 1.6 for Android compat
cb22155 Decode a full row of bitstream before reconstructing
dca8a4d Merge "NEON/simple loopfilter: avoid q4-q7 registers"
9e84d90 Merge "NEON/TransformWHT: avoid q4-q7 registers"
fc10249 NEON/simple loopfilter: avoid q4-q7 registers
2f09d63 NEON/TransformWHT: avoid q4-q7 registers
77585a2 Merge "use a macrofunc for setting NzCoeffs bits"
d155507 Merge "use HINT_GRAPH as image_hint for gif source"
9c56164 Merge "only print GIF_DISPOSE_WARNING once"
0587986 use HINT_GRAPH as image_hint for gif source
0b28d7a use a macrofunc for setting NzCoeffs bits
f9bbc2a Special-case sparse transform
0012519 gif2webp: detect and flatten uniformly similar blocks
0deaf0f only print GIF_DISPOSE_WARNING once
6a8c0eb Merge "small optimization in segment-smoothing loop"
f7146bc small optimization in segment-smoothing loop
5a7533c small gif2webp fix
4df0c89 Merge changes Ic697660c,I27285521
5b2e6bd Android.mk: add a dwebp target
f910a84 Android.mk: update build flags
63f9aba special-case WHT transform when there's only DC
80911ae Merge "7-8% faster decoding by rewriting GetCoeffs()"
606c430 gif2webp: Improved compression for lossy animated WebP
fb887f7 gif2webp: Different kmin/kmax defaults for lossy and lossless
2a98136 7-8% faster decoding by rewriting GetCoeffs()
92d47e4 improve VP8L signature detection by checking the version bits too
5cd43e4 Add -incremental option to dwebp
54b8e3f webpmux: DisplayInfo(): remove unnecessary error checks.
40ae352 fix memleak in WebPIDelete()
d966265 mux.h doc: WebPMuxGetFrame() can return WEBP_MUX_MEMORY_ERROR too.
0e6747f webpmux -info: display dimensions and has_alpha per frame
d78a82c Sanity check for underflow
8498f4b Merge "remove -Wshadow warnings"
e89c6fc Avoid a potential memleak
3ebe175 Merge "break down the proba 4D-array into some handy structs"
6a44550 break down the proba 4D-array into some handy structs
2f5e893 remove -Wshadow warnings
bf3a29b Merge "add proper WEBP_HAVE_GIF and WEBP_HAVE_GL flags"
2b0a759 Merge "fix some warnings from static analysis"
22dd07c mux.h: Some doc corrections
79ff034 add proper WEBP_HAVE_GIF and WEBP_HAVE_GL flags
d51f45f fix some warnings from static analysis
d134307 fix conversion warning on MSVC
d538cea gif2webp: Support a 'min' and 'max' key frame interval
80b54e1 allow search with token buffer loop and fix PARTITION0 problem
b7d4e04 add VP8EstimateTokenSize()
10fddf5 enc/quant.c: silence a warning
399cd45 Merge "fix compile error on ARM/gcc"
9f24519 encoder: misc rate-related fixes
c663bb2 Merge "simplify VP8IteratorSaveBoundary() arg passing"
fa46b31 Demux.h: Correct a method name reference
f8398c9 fix compile error on ARM/gcc
f691f0e simplify VP8IteratorSaveBoundary() arg passing
42542be up to 6% faster encoding with clang compiler
93402f0 multi-threaded segment analysis
7e2d659 Merge "remove the PACK() bit-packing tricks"
c13fecf remove the PACK() bit-packing tricks
2fd091c Merge "use NULL for lf_stats_ testing, not bool"
b11c9d6 dwebp: use default dct_method
4bb8465 Merge "(de)mux.h: wrap pseudo-code in /* */"
cfb56b1 make -pass option work with token buffers
5416aab (de)mux.h: wrap pseudo-code in /* */
35dba33 use NULL for lf_stats_ testing, not bool
733a7fa enc->Iterator memory cleanup
e81fac8 Add support for "no blend" in webpmux binary
3b80bc4 gif2webp: Separate out each step into a method
bef7e9c Add doc precision about demux object keeping pointers to data.
61405a1 dwebp: enable stdout output with WIC
6eabb88 Merge "Animated WebP: add "do no blend" option to spec"
be20dec fix compilation for BITS 24
e58cc13 Merge "dwebp: s/unsigned char/uint8_t/"
72501d4 dwebp: s/unsigned char/uint8_t/
2c9633e Merge "gif2webp: Insert independent frames at regular intervals."
f0d6a14 gif2webp: Insert independent frames at regular intervals.
b25a6fb yuv.h: fix indent
ede3602 Merge "cosmetics: fix indent"
3a65122 dwebp: fix stdout related output
388a724 cosmetics: fix indent
4c7322c Merge "dsp: msvc compatibility"
d50c7e3 Merge "5-7% faster SSE2 versions of YUV->RGB conversion functions"
b8ab784 Merge "simplify upsampler calls: only allow 'bottom' to be NULL"
df6cebf 5-7% faster SSE2 versions of YUV->RGB conversion functions
ad6ac32 simplify upsampler calls: only allow 'bottom' to be NULL
a5e8afa output to stdout if file name is "-"
f358450 dsp: msvc compatibility
43a7c8e Merge "cosmetics"
4c5f19c Merge "bit_reader.h: cosmetics"
f72fab7 cosmetics
14dd5e7 fix const-ness
b20aec4 Merge "Support for 'do not blend' option in vwebp"
dcf6522 Support for 'do not blend' option in vwebp
d5bad03 Animated WebP: add "do no blend" option to spec
a2f5f73 Merge "Support for "Do not blend" in mux and demux libraries"
e081f2f Pack code & extra_bits to Struct (VP8LPrefixCode).
6284854 Support for "Do not blend" in mux and demux libraries
f486aaa Merge "slightly faster ParseIntraMode"
d171863 slightly faster ParseIntraMode
3ceca8a bit_reader.h: cosmetics
69257f7 Create LUT for PrefixEncode.
988b708 add WebPWorkerExecute() for convenient bypass
06e2498 Merge "VP8EncIterator clean-up"
de4d4ad VP8EncIterator clean-up
7bbe952 Merge "cosmetics: thread.c: drop a redundant comment"
da41148 cosmetics: thread.c: drop a redundant comment
feb4b6e thread.h: #ifdef when checking WEBP_USE_THREAD
8924a3a thread.c: drop WebPWorker prefix from static funcs
1aed8f2 Merge "fix indent"
4038ed1 fix indent
1693fd9 Demux: A new state WEBP_DEMUX_PARSE_ERROR
8dcae8b fix rescaling-with-alpha inaccuracy
11249ab Merge changes I9b4dc36c,I4e0eef4d
52508a1 Mux: support parsing unknown chunks within a frame/fragment.
05db057 WebPMuxSetChunk: remove unused variable
8ba1bf6 Stricter check for presence of alpha when writing lossless images
a03c351 Demux: WebPIterator now also denotes if the frame has alpha.
6df743a Decoder: handle fragments case correctly too.
faa4b07 Support for unknown chunks in mux library
7d60bbc Speed up HashChainFindCopy function.
6674014 Speedup Alpha plane encoding.
b7346a1 0.1 % speedup to decoding
c606182 webp-container-spec: Tighten language added by last
a34a502 pngdec: output error messages from libpng
e84c625 Merge "Detect canvas and image size mismatch in decoder."
f626fe2 Detect canvas and image size mismatch in decoder.
f5fbdee demux: stricter image bounds check
30c8158 add extra assert in Huffman decode code
8967b9f SSE2 for lossless decoding (critical) functions.
699d80e Jump-lookup for Huffman coding
c34307a fix some VS9 warnings about type conversion
eeada35 pngdec: add missing include
54b6510 gif2webp: If aligning to even offsets, extra pixels should be transparent
0bcf5ce Merge "remove a malloc() in case we're using only FILTER_NONE for alpha"
2c07143 remove a malloc() in case we're using only FILTER_NONE for alpha
a4d5f59 Faster lossless decoding
fd53bb7 Merge "alternate LUT-base reverse-bits code"
d1c166e Merge "Container spec: a clarification on background color."
fdb9177 Rename a method
5e96753 Container spec: a clarification on background color.
30e77d0 Merge branch '0.3.0'
1b631e2 alternate LUT-base reverse-bits code
24cc307 ~20% faster lossless decoding
313d853 Speedup for decoding lossless WebP photographs:
24ee098 change the bytes_per_pixels_ field into more evocative use_8b_decode
2a04b03 update ChangeLog (tag: v0.3.1-rc2, tag: v0.3.1)
7288950 Regression fix for alpha channels using color cache:
2e377b5 wicdec: silence a format warning
ad9e42a muxedit: silence some uninitialized warnings
3307c16 Don't set alpha-channel to 0xff for alpha->green uplift
5130770 Merge "wicdec: silence a format warning"
a37eff4 Regression fix for alpha channels using color cache:
241cf99 Merge "muxedit: silence some uninitialized warnings"
c8f9c84 Regression fix for alpha unfiltering:
14cd5c6 muxedit: silence some uninitialized warnings
a368db8 dec/vp8l: quiet vs9 x64 type conversion warning
ffae9f3 wicdec: silence a format warning
8cf0701 Alpha encoding: never filter in case of NO_COMPRESSION
825e73b update ChangeLog (tag: v0.3.1-rc1)
abf6f69 update NEWS
5a92c1a bump version to 0.3.1
86daf77 store top Y/U/V samples in packed fashion
67bc353 Revert "add WebPBlendAlpha() function to blend colors against background"
068db59 Intertwined decoding of alpha and RGB
38cc011 Simplify forward-WHT + SSE2 version
3fa595a Support decoding upto given row in DECODE_DATA_FUNC
520f005 DequantizeLevels(): Add 'row' and 'num_rows' args
47374b8 Alpha unfilter for given set of rows
f32097e probe input file and quick-check for WebP format.
a2aed1d configure: improve gl/glut library test
c7e89cb update copyright text
a00380d configure: remove use of AS_VAR_APPEND
a94a88d fix EXIF parsing in PNG
a71e5d8 add doc precision for WebPPictureCopy() and WebPPictureView()
8287012 remove datatype qualifier for vmnv
e190843 fix a memory leak in gif2webp
0b18b9e fix two minor memory leaks in webpmux
db5095d remove some cruft from swig/libwebp.jar
850e956 README: update swig notes
bddd9b0 swig/python: add minimal documentation
d573a8d swig: add python encode support
6b93187 swig/java: reduce wrapper function code duplication
6fe536f swig/java: rework uint8_t typemap
a2ea464 Fix the bug in ApplyPalette.
7bb28d2 webp/lossless: fix big endian BGRA output
f036d4b Speed up ApplyPalette for ARGB pixels.
8112c8c remove some warnings:
cc128e0 Further reduce memory to decode lossy+alpha images
07db70d fix for big-endian
eda8a7d gif2webp: Fix signed/unsigned comparison mismatch
31f346f Makefile.vc: fix libwebpdemux dll variable typo
6c76d28 swig: add python (decode) support
b4f5bb6 swig: cosmetics
498d4dd WebP-Lossless encoding improvements.
26e7244 swig: ifdef some Java specific code
8ecec68 configure: add warning related flags
e676b04 configure: add GLUT detection; build vwebp
b0ffc43 Alpha decoding: significantly reduce memory usage
20aa7a8 configure: add --enable-everything
b8307cc configure.ac: add some helper macros
980e7ae Remove the gcc compilation comments
7f25ff9 gif2webp: Fix ICC and XMP support
d8e5321 Add missing name to AUTHORS
11edf5e Demux: Fix a potential memleak
c7b9218 don't forward declare enums
7a650c6 prevent signed int overflow in left shift ops
31bea32 add precision about dynamic output reallocation with IDecoder
c22877f Add incremental support for extended format files
5051245 Makefile.vc: have 'all' target build everything
8191dec Makefile.vc: flags cleanup
b9d7473 Makefile.vc: drop /FD flag
5568dbc update gitignore
f4c7b65 WebPEncode: An additional check. Start VP8EncLoop/VP8EncTokenLoop only if VP8EncStartAlpha succeeded.
1fb04be pngdec: Avoid a double-free.
dcbb1ca add WebPBlendAlpha() function to blend colors against background
bc9f5fb configure.ac: add AM_PROG_AR for automake >= 1.12
bf867bf Tuned cross_color parameter (step) for lower qual
90e2ec5 Merge "probe input file and quick-check for WebP format."
7180d7f Merge "update copyright text"
830f72b probe input file and quick-check for WebP format.
2ccf58d configure: improve gl/glut library test
d640614 update copyright text
c2113ad Merge "configure: remove use of AS_VAR_APPEND"
9326a56 configure: remove use of AS_VAR_APPEND
ea63d61 fix a type warning on VS9 x86
bec1109 fix EXIF parsing in PNG
b6e65f3 Merge "fix warnings for vs9 x64"
438946d fix warnings for vs9 x64
f4710e3 collect macroblock reconstruction data in VP8MBData struct
23d28e2 add doc precision for WebPPictureCopy() and WebPPictureView()
518f2cd cosmetics: gif2webp: fix indent
af358e6 Merge "remove datatype qualifier for vmnv"
3fe9163 remove datatype qualifier for vmnv
764fdff fix a memory leak in gif2webp
3e59a74 fix two minor memory leaks in webpmux
47b9862 Merge "README: update swig notes"
325d15f remove some cruft from swig/libwebp.jar
4a7627c README: update swig notes
5da81e3 Merge "swig/python: add minimal documentation"
f39e08f Merge "swig: add python encode support"
6ca4a3e Merge "swig/java: reduce wrapper function code duplication"
8f8702b Merge "swig/java: rework uint8_t typemap"
91413be reduce memory for VP8MB and remove bitfields use
7413394 Fix the memory leak in ApplyFilters.
2053c2c simplify the alpha-filter testing loop
825b64d swig/python: add minimal documentation
14677e1 swig: add python encode support
a5c297c swig/java: reduce wrapper function code duplication
ad4a367 swig/java: rework uint8_t typemap
0d25876 use uint8_t for inv_palette[]
afa3450 Fix the bug in ApplyPalette.
2d6ac42 Merge "webp/lossless: fix big endian BGRA output"
2ca8396 webp/lossless: fix big endian BGRA output
742110c Speed up ApplyPalette for ARGB pixels.
2451e47 misc code cleanup
83db404 Merge "swig: add python (decode) support"
eeeea8b Merge "swig: cosmetics"
d5f9b8f Merge "libwebp: fix vp8 encoder mem alloc offsetting"
d8edd83 libwebp: fix vp8 encoder mem alloc offsetting
8983b83 remove use of bit-fields in VP8FInfo
87a4fca remove some warnings:
ba8f74e Merge "fix for big-endian"
a65067f Merge "Further reduce memory to decode lossy+alpha images"
64c8448 Further reduce memory to decode lossy+alpha images
332130b Mux: make a few methods static
4437061 fix for big-endian
5199eab Merge "add uncompressed TIFF output support"
a3aede9 add uncompressed TIFF output support
f975b67 Merge "gif2webp: Fix signed/unsigned comparison mismatch"
5fbc734 Merge "GetFeatures: Detect invalid VP8X/VP8/VP8L data"
d5060c8 Merge "mux.h: A comment fix + some consistency fixes"
352d0de GetFeatures: Detect invalid VP8X/VP8/VP8L data
3ef79fe Cosmetic: "width * height"
043e1ae gif2webp: Fix signed/unsigned comparison mismatch
5818cff mux.h: A comment fix + some consistency fixes
1153f88 Merge "swig: ifdef some Java specific code"
3eeedae Makefile.vc: fix libwebpdemux dll variable typo
f980faf swig: add python (decode) support
7f5f42b swig: cosmetics
8eae188 WebP-Lossless encoding improvements.
c7247c4 swig: ifdef some Java specific code
4cb234d Merge "Mux: make ValidateForSingleImage() method static"
ed6f530 Merge "Add GetCanvasSize() method to mux"
1d530c9 Mux: make ValidateForSingleImage() method static
bba4c2b configure: add warning related flags
fffefd1 Add GetCanvasSize() method to mux
732da8d Merge "configure: add GLUT detection; build vwebp"
0e513f7 configure: add GLUT detection; build vwebp
55d1c15 Merge "Alpha decoding: significantly reduce memory usage"
13d99fb Merge "configure: add --enable-everything"
2bf698f Merge "configure.ac: add some helper macros"
edccd19 Alpha decoding: significantly reduce memory usage
3cafcc9 configure: add --enable-everything
4ef1447 configure.ac: add some helper macros
a4e1cdb Remove the gcc compilation comments
6393fe4 Cosmetic fixes
9c4ce97 Simplify forward-WHT + SSE2 version
878b9da fix missed optim
0004617 VP8GetInfo(): Check for zero width or height.
9bf3129 align VP8Encoder::nz_ allocation
5da165c fix CheckMode() signature
0ece07d Merge "explicitly pad bitfields to 32-bits"
9dbc9d1 explicitly pad bitfields to 32-bits
5369a80 Merge "prevent signed int overflow in left shift ops"
70e3971 Merge "cosmetics: remove unnecessary ';'s"
d3136ce Merge "don't forward declare enums"
b26e5ad gif2webp: Fix ICC and XMP support
46089b2 Add missing name to AUTHORS
94328d6 Demux: Fix a potential memleak
96e948d don't forward declare enums
f4f9088 prevent signed int overflow in left shift ops
0261545 cosmetics: remove unnecessary ';'s
7ebdf11 Merge "Fix few missing comparisons to NULL"
1579989 Fix few missing comparisons to NULL
ea1b21c Cleaned up VP8GetHeaders() so that it parses only frame header
b66caee dwebp: add support for BMP output
ff885bf add precision about dynamic output reallocation with IDecoder
79241d5 Merge "Makefile.vc: have 'all' target build everything"
ac1c729 Merge "Makefile.vc: flags cleanup"
118a055 Merge "Makefile.vc: drop /FD flag"
ecad010 Merge "update gitignore"
a681b4f Rename PRE_VP8 state to WEBP_HEADER
ead4d47 Add incremental support for extended format files
69d0f92 Makefile.vc: have 'all' target build everything
5296749 Makefile.vc: flags cleanup
c61baf0 Makefile.vc: drop /FD flag
3a15125 update gitignore
5167ca4 Merge "WebPEncode: An additional check. Start VP8EncLoop/VP8EncTokenLoop only if VP8EncStartAlpha succeeded."
67708d6 WebPEncode: An additional check. Start VP8EncLoop/VP8EncTokenLoop only if VP8EncStartAlpha succeeded.
b68912a pngdec: Avoid a double-free.
82abbe1 Merge "configure.ac: add AM_PROG_AR for automake >= 1.12"
e7d9548 add WebPBlendAlpha() function to blend colors against background
ed4dc71 configure.ac: add AM_PROG_AR for automake >= 1.12
df4a406 Merge branch '0.3.0'
1e0d4b8 Update ChangeLog (tag: v0.3.0-rc7, tag: v0.3.0)
d52b405 Cosmetic fixes
6cb4a61 misc style fix
68111ab add missing YUVA->ARGB automatic conversion in WebPEncode()
e9a7990 Cosmetic fixes
403bfe8 Container spec: Clarify frame disposal
2aaa423 Merge "add missing YUVA->ARGB automatic conversion in WebPEncode()"
07d87bd add missing YUVA->ARGB automatic conversion in WebPEncode()
142c462 misc style fix
3e7a13a Merge "Container spec: clarify the background color field" into 0.3.0
14af774 container doc: add a note about the 'ANMF' payload
cc635ef Container spec: clarify the background color field
@ -18,7 +467,7 @@ a5ebd14 gif2webp: Bgcolor fix for a special case
3c8eb9a fix bad saturation order in QuantizeBlock
04c7a2e vwebp/animation: fix background dispose
81a5069 Makefile.vc: fix dynamic builds
5f25c39 update ChangeLog
5f25c39 update ChangeLog (tag: v0.3.0-rc6)
14d42af examples: don't use C99 %zu
5ccf1fe update ChangeLog
2560c24 update NEWS
@ -324,7 +773,7 @@ a61a824 Merge "Add NULL check in chunk APIs"
a077072 mux struct naming
6c66dde Merge "Tune Lossless encoder"
ab5ea21 Tune Lossless encoder
74fefc8 Update ChangeLog (v0.2.1, origin/0.2.0)
74fefc8 Update ChangeLog (tag: v0.2.1, origin/0.2.0, 0.2.0)
92f8059 Rename some chunks:
3bb4bbe Merge "Mux API change:"
d0c79f0 Mux API change:
@ -394,7 +843,7 @@ c7eb457 make VP8DspInitNEON() public
ab3234a Create WebPMuxFrameInfo struct for Mux APIs
e3990fd Alignment fixes
e55fbd6 Merge branch '0.2.0'
4238bc0 Update ChangeLog (v0.2.0)
4238bc0 Update ChangeLog (tag: v0.2.0)
c655380 dec/io.c: cosmetics
fe1958f RGBA4444: harmonize lossless/lossy alpha values
681cb30 fix RGBA4444 output w/fancy upsampling
@ -405,7 +854,7 @@ f56e98f Alignment fix
a0a4885 Lossless decoder fix for a special transform order
62dd9bb Update encoding heuristic w.r.t palette colors.
6f4272b remove unused ApplyInverseTransform()
93bf0fa Update ChangeLog (v0.2.0-rc1)
93bf0fa Update ChangeLog (tag: v0.2.0-rc1)
5934fc5 update AUTHORS
014a711 update NEWS
43b0d61 add support for ARGB -> YUVA conversion for lossless decoder
@ -448,7 +897,7 @@ cbee59e Merge commit 'v0.1.99'
3bc3f7c Merge "dwebp: add PAM output support" into 0.2.0
d919ed0 dwebp: add PAM output support
85e215d README/manpages/configure: update website link
c3a207b Update ChangeLog (v0.1.99)
c3a207b Update ChangeLog (tag: v0.1.99)
d1fd782 Merge "add extra precision about default values and behaviour" into 0.2.0
efc826e add extra precision about default values and behaviour
9f29635 header/doc clean up
@ -1073,7 +1522,7 @@ f3bf4c7 Added Mux Container Spec & README for MUX-API.
9f761cf Changed function signature for WebPMuxCreate
5f31b5e Merge "Add Mux library for manipulating WebP container."
2315785 Add Mux library for manipulating WebP container.
7e198ab update ChangeLog (v0.1.3)
7e198ab update ChangeLog (tag: v0.1.3)
dfc9c1e Harmonize the dates
28ad70c Fix PNG decoding bug
846e93c Update AUTHORS & add .mailmap
@ -1214,7 +1663,7 @@ cfbf88a add SSE2 functions. ~2x faster encoding on average.
e7ff3f9 merge two ITransforms together when applicable and change the TTransform to return the sum directly.
ca55413 fix WebPIDecGetRGB() to accept any RGB(A) mode, not just MODE_RGB
8aa50ef fix some 'man' typos
d3f3bdd update ChangeLog (v0.1.2)
d3f3bdd update ChangeLog (tag: v0.1.2)
d7e9a69 update contributor list
261abb8 add a 'superclean' section
276ae82 Remove files not mean to be in git, and update .gitignore

View File

@ -24,18 +24,18 @@ PLATFORM_LDFLAGS = /SAFESEH
#############################################################
## Nothing more to do below this line!
MT = mt.exe
CCNODBG = cl.exe /nologo /O2 /DNDEBUG
CCDEBUG = cl.exe /nologo /Od /Gm /Zi /D_DEBUG /RTC1
CFLAGS = /Isrc /nologo /W3 /EHsc /FD /c /GS
NOLOGO = /nologo
CCNODBG = cl.exe $(NOLOGO) /O2 /DNDEBUG
CCDEBUG = cl.exe $(NOLOGO) /Od /Gm /Zi /D_DEBUG /RTC1
CFLAGS = /Isrc $(NOLOGO) /W3 /EHsc /c /GS
CFLAGS = $(CFLAGS) /DWIN32 /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN
CFLAGS = $(CFLAGS) /DHAVE_WINCODEC_H /DWEBP_USE_THREAD
LDFLAGS = /LARGEADDRESSAWARE /MANIFEST /NXCOMPAT /DYNAMICBASE
LDFLAGS = $(LDFLAGS) $(PLATFORM_LDFLAGS)
LNKDLL = link.exe /DLL
LNKLIB = link.exe /lib
LNKEXE = link.exe
LFLAGS = /nologo /machine:$(ARCH)
LNKDLL = link.exe /DLL $(NOLOGO)
LNKEXE = link.exe $(NOLOGO)
LNKLIB = lib.exe $(NOLOGO)
MT = mt.exe $(NOLOGO)
CFGSET = FALSE
!IF "$(OBJDIR)" == ""
@ -108,7 +108,7 @@ CC = $(CC) /I$(DIROBJ) /FI$(DLLINC) $(RTLIB) /DWEBP_DLL
LIBWEBPDECODER = $(DIRLIB)\$(LIBWEBPDECODER_BASENAME)_dll.lib
LIBWEBP = $(DIRLIB)\$(LIBWEBP_BASENAME)_dll.lib
LIBWEBPMUX = $(DIRLIB)\$(LIBWEBPMUX_BASENAME)_dll.lib
LIBWEBPDEMUX = $(DIRLIB)\$(LIBWEBPMDEMUX_BASENAME)_dll.lib
LIBWEBPDEMUX = $(DIRLIB)\$(LIBWEBPDEMUX_BASENAME)_dll.lib
LIBWEBP_PDBNAME = $(DIROBJ)\$(LIBWEBP_BASENAME)_dll.pdb
CFGSET = TRUE
!ENDIF
@ -130,7 +130,8 @@ CFGSET = TRUE
!MESSAGE - clean - perform a clean for CFG
!MESSAGE - experimental - build CFG with experimental
!MESSAGE . features enabled.
!MESSAGE - (empty) or all - build all targets for CFG
!MESSAGE - (empty) - build libwebp-based targets for CFG
!MESSAGE - all - build (de)mux-based targets for CFG
!MESSAGE
!MESSAGE RTLIBCFG controls the runtime library linkage - 'static' or 'dynamic'.
!MESSAGE OBJDIR is the path where you like to build (obj, bins, etc.),
@ -216,12 +217,14 @@ MUX_OBJS = \
$(DIROBJ)\mux\muxread.obj \
UTILS_DEC_OBJS = \
$(DIROBJ)\utils\alpha_processing.obj \
$(DIROBJ)\utils\bit_reader.obj \
$(DIROBJ)\utils\color_cache.obj \
$(DIROBJ)\utils\filters.obj \
$(DIROBJ)\utils\huffman.obj \
$(DIROBJ)\utils\quant_levels_dec.obj \
$(DIROBJ)\utils\rescaler.obj \
$(DIROBJ)\utils\random.obj \
$(DIROBJ)\utils\thread.obj \
$(DIROBJ)\utils\utils.obj \
@ -238,8 +241,10 @@ LIBWEBPDEMUX_OBJS = $(DEMUX_OBJS) $(LIBWEBPDEMUX_OBJS)
OUT_LIBS = $(LIBWEBPDECODER) $(LIBWEBP)
OUT_EXAMPLES = $(DIRBIN)\cwebp.exe $(DIRBIN)\dwebp.exe
EXTRA_EXAMPLES = $(DIRBIN)\vwebp.exe $(DIRBIN)\webpmux.exe
all: $(OUT_LIBS) $(OUT_EXAMPLES)
ex: $(OUT_LIBS) $(OUT_EXAMPLES)
all: ex $(EXTRA_EXAMPLES)
$(DIRBIN)\cwebp.exe: $(DIROBJ)\examples\cwebp.obj $(EX_FORMAT_DEC_OBJS)
$(DIRBIN)\dwebp.exe: $(DIROBJ)\examples\dwebp.obj
$(DIRBIN)\vwebp.exe: $(DIROBJ)\examples\vwebp.obj
@ -247,6 +252,7 @@ $(DIRBIN)\vwebp.exe: $(EX_UTIL_OBJS) $(LIBWEBPDEMUX) $(LIBWEBP)
$(DIRBIN)\webpmux.exe: $(DIROBJ)\examples\webpmux.obj $(LIBWEBPMUX)
$(DIRBIN)\webpmux.exe: $(EX_UTIL_OBJS) $(LIBWEBP)
$(OUT_EXAMPLES): $(EX_UTIL_OBJS) $(LIBWEBP)
$(EX_UTIL_OBJS) $(EX_FORMAT_DEC_OBJS): $(OUTPUT_DIRS)
experimental:
$(MAKE) /f Makefile.vc \
@ -278,7 +284,7 @@ clean::
@-erase /s $(DIROBJ)\$(DLLC) $(DIROBJ)\$(DLLINC) 2> NUL
!ELSE
$(LIBWEBPDECODER) $(LIBWEBP) $(LIBWEBPMUX) $(LIBWEBPDEMUX):
$(LNKLIB) /out:$@ $(LFLAGS) $**
$(LNKLIB) /out:$@ $**
-xcopy $(DIROBJ)\*.pdb $(DIRLIB) /y
!ENDIF

21
NEWS
View File

@ -1,3 +1,24 @@
- 12/19/13: version 0.4.0
* improved gif2webp tool
* numerous fixes, compression improvement and speed-up
* dither option added to decoder (dwebp -dither 50 ...)
* improved multi-threaded modes (-mt option)
* improved filtering strength determination
* New function: WebPMuxGetCanvasSize
* BMP and TIFF format output added to 'dwebp'
* Significant memory reduction for decoding lossy images with alpha.
* Intertwined decoding of RGB and alpha for a shorter
time-to-first-decoded-pixel.
* WebPIterator has a new member 'has_alpha' denoting whether the frame
contains transparency.
* Container spec amended with new 'blending method' for animation.
- 6/13/13: version 0.3.1
This is a binary compatible release.
* Add incremental decoding support for images containing ALPH and ICCP chunks.
* Python bindings via swig for the simple encode/decode interfaces similar to
Java.
- 3/20/13: version 0.3.0
This is a binary compatible release.
* WebPINewRGB/WebPINewYUVA accept being passed a NULL output buffer

65
README
View File

@ -4,7 +4,7 @@
\__\__/\____/\_____/__/ ____ ___
/ _/ / \ \ / _ \/ _/
/ \_/ / / \ \ __/ \__
\____/____/\_____/_____/____/v0.3.0
\____/____/\_____/_____/____/v0.4.0
Description:
============
@ -80,8 +80,8 @@ more options.
SWIG bindings:
--------------
To generate language bindings from swig/libwebp.i swig-1.3
(http://www.swig.org) is required. 2.0 may work, but has not been tested.
To generate language bindings from swig/libwebp.swig at least swig-1.3
(http://www.swig.org) is required.
Currently the following functions are mapped:
Decode:
@ -104,12 +104,20 @@ Encode:
WebPEncodeLosslessRGB
WebPEncodeLosslessBGR
See swig/README for more detailed build instructions.
Java bindings:
To build the swig-generated JNI wrapper code at least JDK-1.5 (or equivalent)
is necessary for enum support. The output is intended to be a shared object /
DLL that can be loaded via System.loadLibrary("webp_jni").
Python bindings:
To build the swig-generated Python extension code at least Python 2.6 is
required. Python < 2.6 may build with some minor changes to libwebp.swig or the
generated code, but is untested.
Encoding tool:
==============
@ -170,6 +178,10 @@ options:
-alpha_filter <string> . predictive filtering for alpha plane.
One of: none, fast (default) or best.
-alpha_cleanup ......... Clean RGB values in transparent area.
-blend_alpha <hex> ..... Blend colors against background color
expressed as RGB values written in
hexadecimal, e.g. 0xc0e0d0 for red=0xc0
green=0xe0 and blue=0xd0.
-noalpha ............... discard any transparency information.
-lossless .............. Encode image losslessly.
-hint <string> ......... Specify image characteristics hint.
@ -243,18 +255,23 @@ Decodes the WebP image file to PNG format [Default]
Use following options to convert into alternate image formats:
-pam ......... save the raw RGBA samples as a color PAM
-ppm ......... save the raw RGB samples as a color PPM
-bmp ......... save as uncompressed BMP format
-tiff ........ save as uncompressed TIFF format
-pgm ......... save the raw YUV samples as a grayscale PGM
file with IMC4 layout.
-yuv ......... save the raw YUV samples in flat layout.
file with IMC4 layout
-yuv ......... save the raw YUV samples in flat layout
Other options are:
-version .... print version number and exit.
-nofancy ..... don't use the fancy YUV420 upscaler.
-nofilter .... disable in-loop filtering.
-nodither .... disable dithering.
-dither <d> .. dithering strength (in 0..100)
-mt .......... use multi-threading
-crop <x> <y> <w> <h> ... crop output with the given rectangle
-scale <w> <h> .......... scale the output (*after* any cropping)
-alpha ....... only save the alpha plane.
-incremental . use incremental decoding (useful for tests)
-h ....... this help message.
-v ....... verbose (e.g. print encoding/decoding times)
-noasm ....... disable all assembly optimizations.
@ -275,6 +292,7 @@ Options are:
-noicc ....... don't use the icc profile if present.
-nofancy ..... don't use the fancy YUV420 upscaler.
-nofilter .... disable in-loop filtering.
-dither <int> dithering strength (0..100). Default=50.
-mt .......... use multi-threading.
-info ........ print info.
-h ....... this help message.
@ -310,6 +328,43 @@ $ make -f makefile.unix examples/vwebp
> nmake /f Makefile.vc CFG=release-static \
../obj/x64/release-static/bin/vwebp.exe
Animated GIF conversion:
========================
Animated GIF files can be converted to WebP files with animation using the
gif2webp utility available under examples/. The files can then be viewed using
vwebp.
Usage:
gif2webp [options] gif_file -o webp_file
options:
-h / -help ............ this help
-lossy ................. Encode image using lossy compression.
-mixed ................. For each frame in the image, pick lossy
or lossless compression heuristically.
-q <float> ............. quality factor (0:small..100:big)
-m <int> ............... compression method (0=fast, 6=slowest)
-kmin <int> ............ Min distance between key frames
-kmax <int> ............ Max distance between key frames
-f <int> ............... filter strength (0=off..100)
-metadata <string> ..... comma separated list of metadata to
copy from the input to the output if present.
Valid values: all, none, icc, xmp (default)
-mt .................... use multi-threading if available
-version ............... print version number and exit.
-v ..................... verbose.
-quiet ................. don't print anything.
Building:
---------
With the libgif development files installed, gif2webp can be built using
makefile.unix:
$ make -f makefile.unix examples/gif2webp
or using autoconf:
$ ./configure --enable-everything
$ make
Encoding API:
=============

View File

@ -1,7 +1,7 @@
 __ __ ____ ____ ____ __ __ _ __ __
/ \\/ \/ _ \/ _ \/ _ \/ \ \/ \___/_ / _\
\ / __/ _ \ __/ / / (_/ /__
\__\__/\_____/_____/__/ \__//_/\_____/__/___/v0.1.0
\__\__/\_____/_____/__/ \__//_/\_____/__/___/v0.2.0
Description:
@ -56,11 +56,12 @@ STRIP_OPTIONS:
FRAME_OPTIONS(i):
Create animation.
file_i +di+xi+yi+mi
file_i +di+[xi+yi[+mi[bi]]]
where: 'file_i' is the i'th animation frame (WebP format),
'di' is the pause duration before next frame.
'xi','yi' specify the image offset for this frame.
'mi' is the dispose method for this frame (0 or 1).
'bi' is the blending method for this frame (+b or -b).
LOOP_COUNT:
Number of times to repeat the animation.

View File

@ -1,19 +1,67 @@
AC_INIT([libwebp], [0.3.0],
AC_INIT([libwebp], [0.4.0],
[http://code.google.com/p/webp/issues],,
[http://developers.google.com/speed/webp])
AC_CANONICAL_TARGET
AM_INIT_AUTOMAKE([-Wall foreign subdir-objects])
dnl === automake >= 1.12 requires this for 'unusual archivers' support.
dnl === it must occur before LT_INIT (AC_PROG_LIBTOOL).
m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
AC_PROG_LIBTOOL
AM_PROG_CC_C_O
dnl === Enable less verbose output when building.
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
dnl === SET_IF_UNSET(shell_var, value)
dnl === Set the shell variable 'shell_var' to 'value' if it is unset.
AC_DEFUN([SET_IF_UNSET], [test "${$1+set}" = "set" || $1=$2])
AC_ARG_ENABLE([everything],
AS_HELP_STRING([--enable-everything],
[Enable all optional targets. These can still be
disabled with --disable-target]),
[SET_IF_UNSET([enable_libwebpdecoder], [$enableval])
SET_IF_UNSET([enable_libwebpdemux], [$enableval])
SET_IF_UNSET([enable_libwebpmux], [$enableval])])
AC_ARG_WITH([pkgconfigdir], AS_HELP_STRING([--with-pkgconfigdir=DIR],
[Path to the pkgconfig directory @<:@LIBDIR/pkgconfig@:>@]),
[pkgconfigdir="$withval"], [pkgconfigdir='${libdir}/pkgconfig'])
AC_SUBST([pkgconfigdir])
dnl === TEST_AND_ADD_CFLAGS(flag)
dnl === Checks whether $CC supports 'flag' and adds it to AM_CFLAGS on success.
AC_DEFUN([TEST_AND_ADD_CFLAGS],
[SAVED_CFLAGS="$CFLAGS"
CFLAGS="-Werror $1"
AC_MSG_CHECKING([whether $CC supports $1])
dnl Note AC_LANG_PROGRAM([]) uses an old-style main definition.
AC_COMPILE_IFELSE([AC_LANG_SOURCE([int main(void) { return 0; }])],
[AC_MSG_RESULT([yes])]
dnl Simply append the variable avoiding a
dnl compatibility ifdef for AS_VAR_APPEND as this
dnl variable shouldn't grow all that large.
[AM_CFLAGS="$AM_CFLAGS $1"],
[AC_MSG_RESULT([no])])
CFLAGS="$SAVED_CFLAGS"])
TEST_AND_ADD_CFLAGS([-Wall])
TEST_AND_ADD_CFLAGS([-Wdeclaration-after-statement])
TEST_AND_ADD_CFLAGS([-Wextra])
TEST_AND_ADD_CFLAGS([-Wmissing-declarations])
TEST_AND_ADD_CFLAGS([-Wmissing-prototypes])
TEST_AND_ADD_CFLAGS([-Wold-style-definition])
TEST_AND_ADD_CFLAGS([-Wshadow])
TEST_AND_ADD_CFLAGS([-Wunused-but-set-variable])
TEST_AND_ADD_CFLAGS([-Wunused])
TEST_AND_ADD_CFLAGS([-Wvla])
AC_SUBST([AM_CFLAGS])
dnl === CLEAR_LIBVARS([var_pfx])
dnl === Clears <var_pfx>_{INCLUDES,LIBS}.
AC_DEFUN([CLEAR_LIBVARS], [$1_INCLUDES=""; $1_LIBS=""])
dnl === WITHLIB_OPTION([opt_pfx], [outvar_pfx])
dnl === Defines --with-<opt_pfx>{include,lib}dir options which set
dnl === the variables <outvar_pfx>_{INCLUDES,LIBS}.
@ -27,6 +75,24 @@ AC_DEFUN([WITHLIB_OPTION],
[use $2 libraries from DIR]),
[$2_LIBS="-L$withval"])])
dnl === LIBCHECK_PROLOGUE([var_pfx])
dnl === Caches the current values of CPPFLAGS/LIBS in SAVED_* then
dnl === prepends the current values with <var_pfx>_{INCLUDES,LIBS}.
AC_DEFUN([LIBCHECK_PROLOGUE],
[SAVED_CPPFLAGS=$CPPFLAGS
SAVED_LIBS=$LIBS
CPPFLAGS="$$1_INCLUDES $CPPFLAGS"
LIBS="$$1_LIBS $LIBS"])
dnl === LIBCHECK_EPILOGUE([var_pfx])
dnl === Restores the values of CPPFLAGS/LIBS from SAVED_* and exports
dnl === <var_pfx>_{INCLUDES,LIBS} with AC_SUBST.
AC_DEFUN([LIBCHECK_EPILOGUE],
[AC_SUBST($1_LIBS)
AC_SUBST($1_INCLUDES)
CPPFLAGS=$SAVED_CPPFLAGS
LIBS=$SAVED_LIBS])
dnl === Check for pthread support
AC_ARG_ENABLE([threading],
AS_HELP_STRING([--disable-threading],
@ -44,10 +110,110 @@ if test "$enable_threading" = "yes"; then
fi
AC_MSG_NOTICE([checking if threading is enabled... ${enable_threading-no}])
dnl === check for OpenGL/GLUT support ===
CLEAR_LIBVARS([GL])
WITHLIB_OPTION([gl], [GL])
LIBCHECK_PROLOGUE([GL])
glut_cflags="none"
glut_ldflags="none"
case $host_os in
darwin*)
# Special case for OSX builds. Append these to give the user a chance to
# override with --with-gl*
glut_cflags="$glut_cflags|-framework GLUT -framework OpenGL"
glut_ldflags="$glut_ldflags|-framework GLUT -framework OpenGL"
;;
esac
GLUT_SAVED_CPPFLAGS="$CPPFLAGS"
SAVED_IFS="$IFS"
IFS="|"
for flag in $glut_cflags; do
# restore IFS immediately as the autoconf macros may need the default.
IFS="$SAVED_IFS"
unset ac_cv_header_GL_glut_h
unset ac_cv_header_OpenGL_glut_h
case $flag in
none) ;;
*) CPPFLAGS="$flag $CPPFLAGS";;
esac
AC_CHECK_HEADERS([GL/glut.h GLUT/glut.h OpenGL/glut.h],
[glut_headers=yes;
test "$flag" = "none" || GL_INCLUDES="$CPPFLAGS";
break])
CPPFLAGS="$GLUT_SAVED_CPPFLAGS"
test "$glut_headers" = "yes" && break
done
IFS="$SAVED_IFS"
if test "$glut_headers" = "yes"; then
AC_LANG_PUSH([C])
GLUT_SAVED_LDFLAGS="$LDFLAGS"
SAVED_IFS="$IFS"
IFS="|"
for flag in $glut_ldflags; do
# restore IFS immediately as the autoconf macros may need the default.
IFS="$SAVED_IFS"
unset ac_cv_search_glBegin
case $flag in
none) ;;
*) LDFLAGS="$flag $LDFLAGS";;
esac
# find libGL
GL_SAVED_LIBS="$LIBS"
AC_SEARCH_LIBS([glBegin], [GL OpenGL])
LIBS="$GL_SAVED_LIBS"
# A direct link to libGL may not be necessary on e.g., linux.
GLUT_SAVED_LIBS="$LIBS"
for lib in "" "-lglut" "-lglut $ac_cv_search_glBegin"; do
LIBS="$lib"
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([
#ifdef __cplusplus
# define EXTERN_C extern "C"
#else
# define EXTERN_C
#endif
EXTERN_C char glOrtho();
EXTERN_C char glutMainLoop();
],[
glOrtho();
glutMainLoop();
])
],
AC_DEFINE(WEBP_HAVE_GL, [1],
[Set to 1 if OpenGL is supported])
[glut_support=yes], []
)
if test "$glut_support" = "yes"; then
GL_LIBS="$LDFLAGS $lib"
break
fi
done
LIBS="$GLUT_SAVED_LIBS"
LDFLAGS="$GLUT_SAVED_LDFLAGS"
test "$glut_support" = "yes" && break
done
IFS="$SAVED_IFS"
AC_LANG_POP
fi
LIBCHECK_EPILOGUE([GL])
if test "$glut_support" = "yes" -a "$enable_libwebpdemux" = "yes"; then
build_vwebp=yes
fi
AM_CONDITIONAL([BUILD_VWEBP], [test "$build_vwebp" = "yes"])
dnl === check for PNG support ===
PNG_INCLUDES=""
PNG_LIBS=""
CLEAR_LIBVARS([PNG])
AC_PATH_PROGS(LIBPNG_CONFIG,
[libpng-config libpng15-config libpng14-config libpng12-config])
if test -n "$LIBPNG_CONFIG"; then
@ -61,11 +227,7 @@ fi
WITHLIB_OPTION([png], [PNG])
SAVED_CPPFLAGS=$CPPFLAGS
SAVED_LIBS=$LIBS
CPPFLAGS="$PNG_INCLUDES $CPPFLAGS"
LIBS="$PNG_LIBS $LIBS"
LIBCHECK_PROLOGUE([PNG])
AC_CHECK_HEADER(png.h,
AC_SEARCH_LIBS(png_get_libpng_ver, [png],
[test "$ac_cv_search_png_get_libpng_ver" = "none required" \
@ -85,23 +247,14 @@ AC_CHECK_HEADER(png.h,
PNG_INCLUDES=""
],
)
AC_SUBST(PNG_LIBS)
AC_SUBST(PNG_INCLUDES)
CPPFLAGS=$SAVED_CPPFLAGS
LIBS=$SAVED_LIBS
LIBCHECK_EPILOGUE([PNG])
dnl === check for JPEG support ===
JPEG_INCLUDES=""
JPEG_LIBS=""
CLEAR_LIBVARS([JPEG])
WITHLIB_OPTION([jpeg], [JPEG])
SAVED_CPPFLAGS=$CPPFLAGS
SAVED_LIBS=$LIBS
CPPFLAGS="$JPEG_INCLUDES $CPPFLAGS"
LIBS="$JPEG_LIBS $LIBS"
LIBCHECK_PROLOGUE([JPEG])
AC_CHECK_HEADER(jpeglib.h,
AC_CHECK_LIB(jpeg, jpeg_set_defaults,
[JPEG_LIBS="$JPEG_LIBS -ljpeg"
@ -114,23 +267,14 @@ AC_CHECK_HEADER(jpeglib.h,
[$MATH_LIBS]),
AC_MSG_WARN(jpeg library not available - no jpeglib.h)
)
AC_SUBST(JPEG_LIBS)
AC_SUBST(JPEG_INCLUDES)
CPPFLAGS=$SAVED_CPPFLAGS
LIBS=$SAVED_LIBS
LIBCHECK_EPILOGUE([JPEG])
dnl === check for TIFF support ===
TIFF_INCLUDES=""
TIFF_LIBS=""
CLEAR_LIBVARS([TIFF])
WITHLIB_OPTION([tiff], [TIFF])
SAVED_CPPFLAGS=$CPPFLAGS
SAVED_LIBS=$LIBS
CPPFLAGS="$TIFF_INCLUDES $CPPFLAGS"
LIBS="$TIFF_LIBS $LIBS"
LIBCHECK_PROLOGUE([TIFF])
AC_CHECK_HEADER(tiffio.h,
AC_CHECK_LIB(tiff, TIFFGetVersion,
[TIFF_LIBS="$TIFF_LIBS -ltiff"
@ -143,37 +287,26 @@ AC_CHECK_HEADER(tiffio.h,
[$MATH_LIBS]),
AC_MSG_WARN(tiff library not available - no tiffio.h)
)
AC_SUBST(TIFF_LIBS)
AC_SUBST(TIFF_INCLUDES)
CPPFLAGS=$SAVED_CPPFLAGS
LIBS=$SAVED_LIBS
LIBCHECK_EPILOGUE([TIFF])
dnl === check for GIF support ===
GIF_INCLUDES=""
GIF_LIBS=""
CLEAR_LIBVARS([GIF])
WITHLIB_OPTION([gif], [GIF])
SAVED_CPPFLAGS=$CPPFLAGS
SAVED_LIBS=$LIBS
CPPFLAGS="$GIF_INCLUDES $CPPFLAGS"
LIBS="$GIF_LIBS $LIBS"
LIBCHECK_PROLOGUE([GIF])
AC_CHECK_HEADER(gif_lib.h,
AC_CHECK_LIB([gif], [DGifOpenFileHandle],
[GIF_LIBS="$GIF_LIBS -lgif"
AC_DEFINE(WEBP_HAVE_GIF, [1],
[Set to 1 if GIF library is installed])
gif_support=yes
],
AC_MSG_WARN(Optional gif library not found),
[$MATH_LIBS]),
AC_MSG_WARN(gif library not available - no gif_lib.h)
)
AC_SUBST(GIF_LIBS)
AC_SUBST(GIF_INCLUDES)
CPPFLAGS=$SAVED_CPPFLAGS
LIBS=$SAVED_LIBS
LIBCHECK_EPILOGUE([GIF])
if test "$gif_support" = "yes" -a \
"$enable_libwebpmux" = "yes"; then
@ -183,7 +316,13 @@ AM_CONDITIONAL([BUILD_GIF2WEBP], [test "${build_gif2webp}" = "yes"])
dnl === check for WIC support ===
if test "$target_os" = "mingw32"; then
AC_ARG_ENABLE([wic],
AS_HELP_STRING([--disable-wic],
[Disable Windows Imaging Component (WIC) detection.
@<:@default=auto@:>@]),,
[enable_wic=yes])
if test "$target_os" = "mingw32" -a "$enable_wic" = "yes"; then
AC_CHECK_HEADERS([wincodec.h shlwapi.h windows.h])
if test "$ac_cv_header_wincodec_h" = "yes"; then
AC_MSG_CHECKING(for Windows Imaging Component support)
@ -302,18 +441,20 @@ libwebpdemux: ${enable_libwebpdemux-no}
libwebpmux: ${enable_libwebpmux-no}
Tools:
cwebp: yes
cwebp : yes
Input format support
====================
JPEG : ${jpeg_support-no}
PNG : ${png_support-no}
TIFF : ${tiff_support-no}
WIC : ${wic_support-no}
dwebp: yes
dwebp : yes
Output format support
=====================
PNG : ${png_support-no}
WIC : ${wic_support-no}
gif2webp: ${build_gif2webp-no}
webpmux: ${enable_libwebpmux-no}
GIF support : ${gif_support-no}
gif2webp : ${build_gif2webp-no}
webpmux : ${enable_libwebpmux-no}
vwebp : ${build_vwebp-no}
])

View File

@ -382,13 +382,17 @@ animation.
Background Color: 32 bits (_uint32_)
: The default background color of the canvas in \[Blue, Green, Red, Alpha\]
byte order. This color is used to fill the unused space on the canvas around the
frames, as well as the transparent pixels of the first frame. Background color
is also used when disposal method is `1`.
byte order. This color MAY be used to fill the unused space on the canvas around
the frames, as well as the transparent pixels of the first frame. Background
color is also used when disposal method is `1`.
**Note**: Viewers that have a preferred background against which to present the
images (web browsers, for example) should ignore this value and use their
preferred background color instead.
**Note**:
* Background color MAY contain a transparency value (alpha), even if the
_Alpha_ flag in [VP8X chunk](#extended_header) is unset.
* Viewer applications SHOULD treat the background color value as a hint, and
are not required to use it.
Loop Count: 16 bits (_uint16_)
@ -415,7 +419,7 @@ If the _Animation flag_ is not set, then this chunk SHOULD NOT be present.
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
... | Frame Height Minus One |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Frame Duration | Reserved |D|
| Frame Duration | Reserved |B|D|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Frame Data |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@ -444,10 +448,24 @@ Frame Duration: 24 bits (_uint24_)
In particular, frame duration of 0 is useful when one wants to update multiple
areas of the canvas at once during the animation.
Reserved: 7 bits
Reserved: 6 bits
: SHOULD be 0.
Blending method (B): 1 bit
: Indicates how transparent pixels of _the current frame_ are to be blended with
corresponding pixels of the previous canvas:
* `0`: Use alpha blending. After disposing of the previous frame, render the
current frame on the canvas using [alpha-blending](#alpha-blending). If the
current frame does not have an alpha channel, assume alpha value of 255,
effectively replacing the rectangle.
* `1`: Do not blend. After disposing of the previous frame, render the
current frame on the canvas by overwriting the rectangle covered by the
current frame.
Disposal method (D): 1 bit
: Indicates how _the current frame_ is to be treated after it has been displayed
@ -459,10 +477,6 @@ Disposal method (D): 1 bit
by the _current frame_ with background color specified in the
[ANIM chunk](#anim_chunk).
After disposing the current frame, render the next frame on the canvas using
[alpha-blending](#alpha-blending). If the next frame does not have an alpha
channel, assume alpha value of 255, effectively replacing the rectangle.
**Notes**:
* The frame disposal only applies to the _frame rectangle_, that is, the

View File

@ -1,6 +1,9 @@
AM_CPPFLAGS = -I$(top_srcdir)/src
bin_PROGRAMS = dwebp cwebp
if BUILD_VWEBP
bin_PROGRAMS += vwebp
endif
if WANT_MUX
bin_PROGRAMS += webpmux
endif
@ -18,12 +21,6 @@ dwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
dwebp_CPPFLAGS += $(JPEG_INCLUDES) $(PNG_INCLUDES)
dwebp_LDADD = libexampleutil.la $(PNG_LIBS) $(JPEG_LIBS)
if BUILD_LIBWEBPDECODER
dwebp_LDADD += ../src/libwebpdecoder.la
else
dwebp_LDADD += ../src/libwebp.la
endif
cwebp_SOURCES = cwebp.c metadata.c metadata.h stopwatch.h
cwebp_SOURCES += jpegdec.c jpegdec.h
cwebp_SOURCES += pngdec.c pngdec.h
@ -33,7 +30,7 @@ cwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
cwebp_CPPFLAGS += $(JPEG_INCLUDES) $(PNG_INCLUDES) $(TIFF_INCLUDES)
cwebp_LDADD = ../src/libwebp.la $(JPEG_LIBS) $(PNG_LIBS) $(TIFF_LIBS)
gif2webp_SOURCES = gif2webp.c
gif2webp_SOURCES = gif2webp.c gif2webp_util.c
gif2webp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE) $(GIF_INCLUDES)
gif2webp_LDADD = libexampleutil.la ../src/mux/libwebpmux.la ../src/libwebp.la
gif2webp_LDADD += $(GIF_LIBS)
@ -41,3 +38,15 @@ gif2webp_LDADD += $(GIF_LIBS)
webpmux_SOURCES = webpmux.c
webpmux_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE)
webpmux_LDADD = libexampleutil.la ../src/mux/libwebpmux.la ../src/libwebp.la
vwebp_SOURCES = vwebp.c
vwebp_CPPFLAGS = $(AM_CPPFLAGS) $(USE_EXPERIMENTAL_CODE) $(GL_INCLUDES)
vwebp_LDADD = libexampleutil.la ../src/demux/libwebpdemux.la $(GL_LIBS)
if BUILD_LIBWEBPDECODER
dwebp_LDADD += ../src/libwebpdecoder.la
vwebp_LDADD += ../src/libwebpdecoder.la
else
dwebp_LDADD += ../src/libwebp.la
vwebp_LDADD += ../src/libwebp.la
endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// simple command line calling the WebPEncode function.
@ -29,13 +31,13 @@
#include "./wicdec.h"
#ifndef WEBP_DLL
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
extern void* VP8GetCPUInfo; // opaque forward declaration.
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_DLL
@ -109,15 +111,15 @@ typedef enum {
static InputFileFormat GetImageType(FILE* in_file) {
InputFileFormat format = UNSUPPORTED;
unsigned int magic;
unsigned char buf[4];
uint32_t magic;
uint8_t buf[4];
if ((fread(&buf[0], 4, 1, in_file) != 1) ||
(fseek(in_file, 0, SEEK_SET) != 0)) {
return format;
}
magic = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
magic = ((uint32_t)buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
if (magic == 0x89504E47U) {
format = PNG_;
} else if (magic >= 0xFFD8FF00U && magic <= 0xFFD8FFFFU) {
@ -492,11 +494,14 @@ static int WriteWebPWithMetadata(FILE* const out,
if (has_vp8x) { // update the existing VP8X flags
webp[kChunkHeaderSize] |= (uint8_t)(flags & 0xff);
ok = ok && (fwrite(webp, kVP8XChunkSize, 1, out) == 1);
webp += kVP8XChunkSize;
webp_size -= kVP8XChunkSize;
} else {
const int is_lossless = !memcmp(webp, "VP8L", kTagSize);
// The alpha flag is forced with lossless images.
if (is_lossless) flags |= kAlphaFlag;
if (is_lossless) {
// Presence of alpha is stored in the 29th bit of VP8L data.
if (webp[kChunkHeaderSize + 3] & (1 << 5)) flags |= kAlphaFlag;
}
ok = ok && (fwrite(kVP8XHeader, kChunkHeaderSize, 1, out) == 1);
ok = ok && WriteLE32(out, flags);
ok = ok && WriteLE24(out, picture->width - 1);
@ -594,6 +599,10 @@ static void HelpLong(void) {
printf(" -alpha_filter <string> . predictive filtering for alpha plane.\n");
printf(" One of: none, fast (default) or best.\n");
printf(" -alpha_cleanup ......... Clean RGB values in transparent area.\n");
printf(" -blend_alpha <hex> ..... Blend colors against background color\n"
" expressed as RGB values written in\n"
" hexadecimal, e.g. 0xc0e0d0 for red=0xc0\n"
" green=0xe0 and blue=0xd0.\n");
printf(" -noalpha ............... discard any transparency information.\n");
printf(" -lossless .............. Encode image losslessly.\n");
printf(" -hint <string> ......... Specify image characteristics hint.\n");
@ -656,6 +665,8 @@ int main(int argc, const char *argv[]) {
int short_output = 0;
int quiet = 0;
int keep_alpha = 1;
int blend_alpha = 0;
uint32_t background_color = 0xffffffu;
int crop = 0, crop_x = 0, crop_y = 0, crop_w = 0, crop_h = 0;
int resize_w = 0, resize_h = 0;
int show_progress = 0;
@ -720,6 +731,10 @@ int main(int argc, const char *argv[]) {
config.alpha_compression = strtol(argv[++c], NULL, 0);
} else if (!strcmp(argv[c], "-alpha_cleanup")) {
keep_alpha = keep_alpha ? 2 : 0;
} else if (!strcmp(argv[c], "-blend_alpha") && c < argc - 1) {
blend_alpha = 1;
background_color = strtol(argv[++c], NULL, 16); // <- parses '0x' prefix
background_color = background_color & 0x00ffffffu;
} else if (!strcmp(argv[c], "-alpha_filter") && c < argc - 1) {
++c;
if (!strcmp(argv[c], "none")) {
@ -736,7 +751,6 @@ int main(int argc, const char *argv[]) {
keep_alpha = 0;
} else if (!strcmp(argv[c], "-lossless")) {
config.lossless = 1;
picture.use_argb = 1;
} else if (!strcmp(argv[c], "-hint") && c < argc - 1) {
++c;
if (!strcmp(argv[c], "photo")) {
@ -882,6 +896,9 @@ int main(int argc, const char *argv[]) {
#endif
} else if (!strcmp(argv[c], "-v")) {
verbose = 1;
} else if (!strcmp(argv[c], "--")) {
if (c < argc - 1) in_file = argv[++c];
break;
} else if (argv[c][0] == '-') {
fprintf(stderr, "Error! Unknown option '%s'\n", argv[c]);
HelpLong();
@ -916,7 +933,7 @@ int main(int argc, const char *argv[]) {
// Read the input
if (verbose) {
StopwatchReadAndReset(&stop_watch);
StopwatchReset(&stop_watch);
}
if (!ReadPicture(in_file, &picture, keep_alpha,
(keep_metadata == 0) ? NULL : &metadata)) {
@ -924,6 +941,11 @@ int main(int argc, const char *argv[]) {
goto Error;
}
picture.progress_hook = (show_progress && !quiet) ? ProgressReport : NULL;
if (blend_alpha) {
WebPBlendAlpha(&picture, background_color);
}
if (keep_alpha == 2) {
WebPCleanupTransparentArea(&picture);
}
@ -965,7 +987,7 @@ int main(int argc, const char *argv[]) {
// Compress
if (verbose) {
StopwatchReadAndReset(&stop_watch);
StopwatchReset(&stop_watch);
}
if (crop != 0) {
// We use self-cropping using a view.
@ -1006,11 +1028,32 @@ int main(int argc, const char *argv[]) {
}
}
if (keep_metadata != 0 && out != NULL) {
if (!WriteWebPWithMetadata(out, &picture, &memory_writer,
&metadata, keep_metadata, &metadata_written)) {
fprintf(stderr, "Error writing WebP file with metadata!\n");
goto Error;
if (keep_metadata != 0) {
if (out != NULL) {
if (!WriteWebPWithMetadata(out, &picture, &memory_writer,
&metadata, keep_metadata, &metadata_written)) {
fprintf(stderr, "Error writing WebP file with metadata!\n");
goto Error;
}
} else { // output is disabled, just display the metadata stats.
const struct {
const MetadataPayload* const payload;
int flag;
} *iter, info[] = {
{ &metadata.exif, METADATA_EXIF },
{ &metadata.iccp, METADATA_ICC },
{ &metadata.xmp, METADATA_XMP },
{ NULL, 0 }
};
uint32_t unused1 = 0;
uint64_t unused2 = 0;
for (iter = info; iter->payload != NULL; ++iter) {
if (UpdateFlagsAndSize(iter->payload, !!(keep_metadata & iter->flag),
0, &unused1, &unused2)) {
metadata_written |= iter->flag;
}
}
}
}
@ -1027,8 +1070,22 @@ int main(int argc, const char *argv[]) {
if (!quiet && !short_output && print_distortion >= 0) { // print distortion
static const char* distortion_names[] = { "PSNR", "SSIM", "LSIM" };
float values[5];
WebPPictureDistortion(&picture, &original_picture,
print_distortion, values);
// Comparison is performed in YUVA colorspace.
if (original_picture.use_argb &&
!WebPPictureARGBToYUVA(&original_picture, WEBP_YUV420A)) {
fprintf(stderr, "Error while converting original picture to YUVA.\n");
goto Error;
}
if (picture.use_argb &&
!WebPPictureARGBToYUVA(&picture, WEBP_YUV420A)) {
fprintf(stderr, "Error while converting compressed picture to YUVA.\n");
goto Error;
}
if (!WebPPictureDistortion(&picture, &original_picture,
print_distortion, values)) {
fprintf(stderr, "Error while computing the distortion.\n");
goto Error;
}
fprintf(stderr, "%s: Y:%.2f U:%.2f V:%.2f A:%.2f Total:%.2f\n",
distortion_names[print_distortion],
values[0], values[1], values[2], values[3], values[4]);

View File

@ -1,13 +1,13 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Command-line tool for decoding a WebP image
//
// Compile with: gcc -o dwebp dwebp.c -lwebpdecode
// Command-line tool for decoding a WebP image.
//
// Author: Skal (pascal.massimino@gmail.com)
@ -32,24 +32,30 @@
#define COBJMACROS
#define _WIN32_IE 0x500 // Workaround bug in shlwapi.h when compiling C++
// code with COBJMACROS.
#include <ole2.h> // CreateStreamOnHGlobal()
#include <shlwapi.h>
#include <windows.h>
#include <wincodec.h>
#endif
#if defined(_WIN32)
#include <fcntl.h> // for _O_BINARY
#include <io.h> // for _setmode()
#endif
#include "webp/decode.h"
#include "./example_util.h"
#include "./stopwatch.h"
static int verbose = 0;
#ifndef WEBP_DLL
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
extern void* VP8GetCPUInfo; // opaque forward declaration.
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_DLL
@ -62,6 +68,8 @@ typedef enum {
PAM,
PPM,
PGM,
BMP,
TIFF,
YUV,
ALPHA_PLANE_ONLY // this is for experimenting only
} OutputFileFormat;
@ -82,9 +90,15 @@ typedef enum {
#define MAKE_REFGUID(x) &(x)
#endif
static HRESULT CreateOutputStream(const char* out_file_name, IStream** stream) {
static HRESULT CreateOutputStream(const char* out_file_name,
int write_to_mem, IStream** stream) {
HRESULT hr = S_OK;
IFS(SHCreateStreamOnFileA(out_file_name, STGM_WRITE | STGM_CREATE, stream));
if (write_to_mem) {
// Output to a memory buffer. This is freed when 'stream' is released.
IFS(CreateStreamOnHGlobal(NULL, TRUE, stream));
} else {
IFS(SHCreateStreamOnFileA(out_file_name, STGM_WRITE | STGM_CREATE, stream));
}
if (FAILED(hr)) {
fprintf(stderr, "Error opening output file %s (%08lx)\n",
out_file_name, hr);
@ -92,8 +106,9 @@ static HRESULT CreateOutputStream(const char* out_file_name, IStream** stream) {
return hr;
}
static HRESULT WriteUsingWIC(const char* out_file_name, REFGUID container_guid,
unsigned char* rgb, int stride,
static HRESULT WriteUsingWIC(const char* out_file_name, int use_stdout,
REFGUID container_guid,
uint8_t* rgb, int stride,
uint32_t width, uint32_t height, int has_alpha) {
HRESULT hr = S_OK;
IWICImagingFactory* factory = NULL;
@ -114,7 +129,7 @@ static HRESULT WriteUsingWIC(const char* out_file_name, REFGUID container_guid,
"Windows XP SP3 or newer?). PNG support not available. "
"Use -ppm or -pgm for available PPM and PGM formats.\n");
}
IFS(CreateOutputStream(out_file_name, &stream));
IFS(CreateOutputStream(out_file_name, use_stdout, &stream));
IFS(IWICImagingFactory_CreateEncoder(factory, container_guid, NULL,
&encoder));
IFS(IWICBitmapEncoder_Initialize(encoder, stream,
@ -128,6 +143,28 @@ static HRESULT WriteUsingWIC(const char* out_file_name, REFGUID container_guid,
IFS(IWICBitmapFrameEncode_Commit(frame));
IFS(IWICBitmapEncoder_Commit(encoder));
if (SUCCEEDED(hr) && use_stdout) {
HGLOBAL image;
IFS(GetHGlobalFromStream(stream, &image));
if (SUCCEEDED(hr)) {
HANDLE std_output = GetStdHandle(STD_OUTPUT_HANDLE);
DWORD mode;
const BOOL update_mode = GetConsoleMode(std_output, &mode);
const void* const image_mem = GlobalLock(image);
DWORD bytes_written = 0;
// Clear output processing if necessary, then output the image.
if (update_mode) SetConsoleMode(std_output, 0);
if (!WriteFile(std_output, image_mem, (DWORD)GlobalSize(image),
&bytes_written, NULL) ||
bytes_written != GlobalSize(image)) {
hr = E_FAIL;
}
if (update_mode) SetConsoleMode(std_output, mode);
GlobalUnlock(image);
}
}
if (frame != NULL) IUnknown_Release(frame);
if (encoder != NULL) IUnknown_Release(encoder);
if (factory != NULL) IUnknown_Release(factory);
@ -135,21 +172,21 @@ static HRESULT WriteUsingWIC(const char* out_file_name, REFGUID container_guid,
return hr;
}
static int WritePNG(const char* out_file_name,
static int WritePNG(const char* out_file_name, int use_stdout,
const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
unsigned char* const rgb = buffer->u.RGBA.rgba;
uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const int has_alpha = (buffer->colorspace == MODE_BGRA);
return SUCCEEDED(WriteUsingWIC(out_file_name,
return SUCCEEDED(WriteUsingWIC(out_file_name, use_stdout,
MAKE_REFGUID(GUID_ContainerFormatPng),
rgb, stride, width, height, has_alpha));
}
#elif defined(WEBP_HAVE_PNG) // !HAVE_WINCODEC_H
static void PNGAPI error_function(png_structp png, png_const_charp dummy) {
static void PNGAPI PNGErrorFunction(png_structp png, png_const_charp dummy) {
(void)dummy; // remove variable-unused warning
longjmp(png_jmpbuf(png), 1);
}
@ -157,7 +194,7 @@ static void PNGAPI error_function(png_structp png, png_const_charp dummy) {
static int WritePNG(FILE* out_file, const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
unsigned char* const rgb = buffer->u.RGBA.rgba;
uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const int has_alpha = (buffer->colorspace == MODE_RGBA);
png_structp png;
@ -165,7 +202,7 @@ static int WritePNG(FILE* out_file, const WebPDecBuffer* const buffer) {
png_uint_32 y;
png = png_create_write_struct(PNG_LIBPNG_VER_STRING,
NULL, error_function, NULL);
NULL, PNGErrorFunction, NULL);
if (png == NULL) {
return 0;
}
@ -206,7 +243,7 @@ static int WritePNG(FILE* out_file, const WebPDecBuffer* const buffer) {
static int WritePPM(FILE* fout, const WebPDecBuffer* const buffer, int alpha) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const unsigned char* const rgb = buffer->u.RGBA.rgba;
const uint8_t* const rgb = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const size_t bytes_per_px = alpha ? 4 : 3;
uint32_t y;
@ -225,10 +262,150 @@ static int WritePPM(FILE* fout, const WebPDecBuffer* const buffer, int alpha) {
return 1;
}
static void PutLE16(uint8_t* const dst, uint32_t value) {
dst[0] = (value >> 0) & 0xff;
dst[1] = (value >> 8) & 0xff;
}
static void PutLE32(uint8_t* const dst, uint32_t value) {
PutLE16(dst + 0, (value >> 0) & 0xffff);
PutLE16(dst + 2, (value >> 16) & 0xffff);
}
#define BMP_HEADER_SIZE 54
static int WriteBMP(FILE* fout, const WebPDecBuffer* const buffer) {
const int has_alpha = (buffer->colorspace != MODE_BGR);
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* const rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint32_t bytes_per_px = has_alpha ? 4 : 3;
uint32_t y;
const uint32_t line_size = bytes_per_px * width;
const uint32_t bmp_stride = (line_size + 3) & ~3; // pad to 4
const uint32_t total_size = bmp_stride * height + BMP_HEADER_SIZE;
uint8_t bmp_header[BMP_HEADER_SIZE] = { 0 };
// bitmap file header
PutLE16(bmp_header + 0, 0x4d42); // signature 'BM'
PutLE32(bmp_header + 2, total_size); // size including header
PutLE32(bmp_header + 6, 0); // reserved
PutLE32(bmp_header + 10, BMP_HEADER_SIZE); // offset to pixel array
// bitmap info header
PutLE32(bmp_header + 14, 40); // DIB header size
PutLE32(bmp_header + 18, width); // dimensions
PutLE32(bmp_header + 22, -(int)height); // vertical flip!
PutLE16(bmp_header + 26, 1); // number of planes
PutLE16(bmp_header + 28, bytes_per_px * 8); // bits per pixel
PutLE32(bmp_header + 30, 0); // no compression (BI_RGB)
PutLE32(bmp_header + 34, 0); // image size (dummy)
PutLE32(bmp_header + 38, 2400); // x pixels/meter
PutLE32(bmp_header + 42, 2400); // y pixels/meter
PutLE32(bmp_header + 46, 0); // number of palette colors
PutLE32(bmp_header + 50, 0); // important color count
// TODO(skal): color profile
// write header
if (fwrite(bmp_header, sizeof(bmp_header), 1, fout) != 1) {
return 0;
}
// write pixel array
for (y = 0; y < height; ++y) {
if (fwrite(rgba + y * stride, line_size, 1, fout) != 1) {
return 0;
}
// write padding zeroes
if (bmp_stride != line_size) {
const uint8_t zeroes[3] = { 0 };
if (fwrite(zeroes, bmp_stride - line_size, 1, fout) != 1) {
return 0;
}
}
}
return 1;
}
#undef BMP_HEADER_SIZE
#define NUM_IFD_ENTRIES 15
#define EXTRA_DATA_SIZE 16
// 10b for signature/header + n * 12b entries + 4b for IFD terminator:
#define EXTRA_DATA_OFFSET (10 + 12 * NUM_IFD_ENTRIES + 4)
#define TIFF_HEADER_SIZE (EXTRA_DATA_OFFSET + EXTRA_DATA_SIZE)
static int WriteTIFF(FILE* fout, const WebPDecBuffer* const buffer) {
const int has_alpha = (buffer->colorspace != MODE_RGB);
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const uint8_t* const rgba = buffer->u.RGBA.rgba;
const int stride = buffer->u.RGBA.stride;
const uint8_t bytes_per_px = has_alpha ? 4 : 3;
// For non-alpha case, we omit tag 0x152 (ExtraSamples).
const uint8_t num_ifd_entries = has_alpha ? NUM_IFD_ENTRIES
: NUM_IFD_ENTRIES - 1;
uint8_t tiff_header[TIFF_HEADER_SIZE] = {
0x49, 0x49, 0x2a, 0x00, // little endian signature
8, 0, 0, 0, // offset to the unique IFD that follows
// IFD (offset = 8). Entries must be written in increasing tag order.
num_ifd_entries, 0, // Number of entries in the IFD (12 bytes each).
0x00, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 10: Width (TBD)
0x01, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 22: Height (TBD)
0x02, 0x01, 3, 0, bytes_per_px, 0, 0, 0, // 34: BitsPerSample: 8888
EXTRA_DATA_OFFSET + 0, 0, 0, 0,
0x03, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 46: Compression: none
0x06, 0x01, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, // 58: Photometric: RGB
0x11, 0x01, 4, 0, 1, 0, 0, 0, // 70: Strips offset:
TIFF_HEADER_SIZE, 0, 0, 0, // data follows header
0x12, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 82: Orientation: topleft
0x15, 0x01, 3, 0, 1, 0, 0, 0, // 94: SamplesPerPixels
bytes_per_px, 0, 0, 0,
0x16, 0x01, 3, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 106: Rows per strip (TBD)
0x17, 0x01, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, // 118: StripByteCount (TBD)
0x1a, 0x01, 5, 0, 1, 0, 0, 0, // 130: X-resolution
EXTRA_DATA_OFFSET + 8, 0, 0, 0,
0x1b, 0x01, 5, 0, 1, 0, 0, 0, // 142: Y-resolution
EXTRA_DATA_OFFSET + 8, 0, 0, 0,
0x1c, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 154: PlanarConfiguration
0x28, 0x01, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, // 166: ResolutionUnit (inch)
0x52, 0x01, 3, 0, 1, 0, 0, 0, 1, 0, 0, 0, // 178: ExtraSamples: rgbA
0, 0, 0, 0, // 190: IFD terminator
// EXTRA_DATA_OFFSET:
8, 0, 8, 0, 8, 0, 8, 0, // BitsPerSample
72, 0, 0, 0, 1, 0, 0, 0 // 72 pixels/inch, for X/Y-resolution
};
uint32_t y;
// Fill placeholders in IFD:
PutLE32(tiff_header + 10 + 8, width);
PutLE32(tiff_header + 22 + 8, height);
PutLE32(tiff_header + 106 + 8, height);
PutLE32(tiff_header + 118 + 8, width * bytes_per_px * height);
if (!has_alpha) PutLE32(tiff_header + 178, 0); // IFD terminator
// write header
if (fwrite(tiff_header, sizeof(tiff_header), 1, fout) != 1) {
return 0;
}
// write pixel values
for (y = 0; y < height; ++y) {
if (fwrite(rgba + y * stride, bytes_per_px, width, fout) != width) {
return 0;
}
}
return 1;
}
#undef TIFF_HEADER_SIZE
#undef EXTRA_DATA_OFFSET
#undef EXTRA_DATA_SIZE
#undef NUM_IFD_ENTRIES
static int WriteAlphaPlane(FILE* fout, const WebPDecBuffer* const buffer) {
const uint32_t width = buffer->width;
const uint32_t height = buffer->height;
const unsigned char* const a = buffer->u.YUVA.a;
const uint8_t* const a = buffer->u.YUVA.a;
const int a_stride = buffer->u.YUVA.a_stride;
uint32_t y;
assert(a != NULL);
@ -289,30 +466,40 @@ static int WritePGMOrYUV(FILE* fout, const WebPDecBuffer* const buffer,
return ok;
}
static void SaveOutput(const WebPDecBuffer* const buffer,
OutputFileFormat format, const char* const out_file) {
static int SaveOutput(const WebPDecBuffer* const buffer,
OutputFileFormat format, const char* const out_file) {
FILE* fout = NULL;
int needs_open_file = 1;
const int use_stdout = !strcmp(out_file, "-");
int ok = 1;
Stopwatch stop_watch;
if (verbose)
StopwatchReadAndReset(&stop_watch);
if (verbose) {
StopwatchReset(&stop_watch);
}
#ifdef HAVE_WINCODEC_H
needs_open_file = (format != PNG);
#endif
#if defined(_WIN32)
if (use_stdout && _setmode(_fileno(stdout), _O_BINARY) == -1) {
fprintf(stderr, "Failed to reopen stdout in O_BINARY mode.\n");
return -1;
}
#endif
if (needs_open_file) {
fout = fopen(out_file, "wb");
if (!fout) {
fout = use_stdout ? stdout : fopen(out_file, "wb");
if (fout == NULL) {
fprintf(stderr, "Error opening output file %s\n", out_file);
return;
return 0;
}
}
if (format == PNG) {
#ifdef HAVE_WINCODEC_H
ok &= WritePNG(out_file, buffer);
ok &= WritePNG(out_file, use_stdout, buffer);
#else
ok &= WritePNG(fout, buffer);
#endif
@ -320,23 +507,36 @@ static void SaveOutput(const WebPDecBuffer* const buffer,
ok &= WritePPM(fout, buffer, 1);
} else if (format == PPM) {
ok &= WritePPM(fout, buffer, 0);
} else if (format == BMP) {
ok &= WriteBMP(fout, buffer);
} else if (format == TIFF) {
ok &= WriteTIFF(fout, buffer);
} else if (format == PGM || format == YUV) {
ok &= WritePGMOrYUV(fout, buffer, format);
} else if (format == ALPHA_PLANE_ONLY) {
ok &= WriteAlphaPlane(fout, buffer);
}
if (fout) {
if (fout != NULL && fout != stdout) {
fclose(fout);
}
if (ok) {
printf("Saved file %s\n", out_file);
if (use_stdout) {
fprintf(stderr, "Saved to stdout\n");
} else {
fprintf(stderr, "Saved file %s\n", out_file);
}
if (verbose) {
const double write_time = StopwatchReadAndReset(&stop_watch);
printf("Time to write output: %.3fs\n", write_time);
fprintf(stderr, "Time to write output: %.3fs\n", write_time);
}
} else {
fprintf(stderr, "Error writing file %s !!\n", out_file);
if (use_stdout) {
fprintf(stderr, "Error writing to stdout !!\n");
} else {
fprintf(stderr, "Error writing file %s !!\n", out_file);
}
}
return ok;
}
static void Help(void) {
@ -345,18 +545,23 @@ static void Help(void) {
"Use following options to convert into alternate image formats:\n"
" -pam ......... save the raw RGBA samples as a color PAM\n"
" -ppm ......... save the raw RGB samples as a color PPM\n"
" -bmp ......... save as uncompressed BMP format\n"
" -tiff ........ save as uncompressed TIFF format\n"
" -pgm ......... save the raw YUV samples as a grayscale PGM\n"
" file with IMC4 layout.\n"
" -yuv ......... save the raw YUV samples in flat layout.\n"
" file with IMC4 layout\n"
" -yuv ......... save the raw YUV samples in flat layout\n"
"\n"
" Other options are:\n"
" -version .... print version number and exit.\n"
" -nofancy ..... don't use the fancy YUV420 upscaler.\n"
" -nofilter .... disable in-loop filtering.\n"
" -nodither .... disable dithering.\n"
" -dither <d> .. dithering strength (in 0..100)\n"
" -mt .......... use multi-threading\n"
" -crop <x> <y> <w> <h> ... crop output with the given rectangle\n"
" -scale <w> <h> .......... scale the output (*after* any cropping)\n"
" -alpha ....... only save the alpha plane.\n"
" -incremental . use incremental decoding (useful for tests)\n"
" -h ....... this help message.\n"
" -v ....... verbose (e.g. print encoding/decoding times)\n"
#ifndef WEBP_DLL
@ -370,7 +575,12 @@ static const char* const kStatusMessages[] = {
"UNSUPPORTED_FEATURE", "SUSPENDED", "USER_ABORT", "NOT_ENOUGH_DATA"
};
static const char* const kFormatType[] = {
"unspecified", "lossy", "lossless"
};
int main(int argc, const char *argv[]) {
int ok = 0;
const char *in_file = NULL;
const char *out_file = NULL;
@ -378,6 +588,7 @@ int main(int argc, const char *argv[]) {
WebPDecBuffer* const output_buffer = &config.output;
WebPBitstreamFeatures* const bitstream = &config.input;
OutputFileFormat format = PNG;
int incremental = 0;
int c;
if (!WebPInitDecoderConfig(&config)) {
@ -401,6 +612,10 @@ int main(int argc, const char *argv[]) {
format = PAM;
} else if (!strcmp(argv[c], "-ppm")) {
format = PPM;
} else if (!strcmp(argv[c], "-bmp")) {
format = BMP;
} else if (!strcmp(argv[c], "-tiff")) {
format = TIFF;
} else if (!strcmp(argv[c], "-version")) {
const int version = WebPGetDecoderVersion();
printf("%d.%d.%d\n",
@ -412,6 +627,10 @@ int main(int argc, const char *argv[]) {
format = YUV;
} else if (!strcmp(argv[c], "-mt")) {
config.options.use_threads = 1;
} else if (!strcmp(argv[c], "-nodither")) {
config.options.dithering_strength = 0;
} else if (!strcmp(argv[c], "-dither") && c < argc - 1) {
config.options.dithering_strength = strtol(argv[++c], NULL, 0);
} else if (!strcmp(argv[c], "-crop") && c < argc - 4) {
config.options.use_cropping = 1;
config.options.crop_left = strtol(argv[++c], NULL, 0);
@ -428,6 +647,11 @@ int main(int argc, const char *argv[]) {
} else if (!strcmp(argv[c], "-noasm")) {
VP8GetCPUInfo = NULL;
#endif
} else if (!strcmp(argv[c], "-incremental")) {
incremental = 1;
} else if (!strcmp(argv[c], "--")) {
if (c < argc - 1) in_file = argv[++c];
break;
} else if (argv[c][0] == '-') {
fprintf(stderr, "Unknown option '%s'\n", argv[c]);
Help();
@ -446,14 +670,14 @@ int main(int argc, const char *argv[]) {
{
Stopwatch stop_watch;
VP8StatusCode status = VP8_STATUS_OK;
int ok;
size_t data_size = 0;
const uint8_t* data = NULL;
if (!ExUtilReadFile(in_file, &data, &data_size)) return -1;
if (verbose)
StopwatchReadAndReset(&stop_watch);
if (verbose) {
StopwatchReset(&stop_watch);
}
status = WebPGetFeatures(data, data_size, bitstream);
if (status != VP8_STATUS_OK) {
@ -481,6 +705,13 @@ int main(int argc, const char *argv[]) {
case PPM:
output_buffer->colorspace = MODE_RGB; // drops alpha for PPM
break;
case BMP:
output_buffer->colorspace = bitstream->has_alpha ? MODE_BGRA : MODE_BGR;
break;
case TIFF: // note: force pre-multiplied alpha
output_buffer->colorspace =
bitstream->has_alpha ? MODE_rgbA : MODE_RGB;
break;
case PGM:
case YUV:
output_buffer->colorspace = bitstream->has_alpha ? MODE_YUVA : MODE_YUV;
@ -492,11 +723,25 @@ int main(int argc, const char *argv[]) {
free((void*)data);
return -1;
}
status = WebPDecode(data, data_size, &config);
// Decoding call.
if (!incremental) {
status = WebPDecode(data, data_size, &config);
} else {
WebPIDecoder* const idec = WebPIDecode(data, data_size, &config);
if (idec == NULL) {
fprintf(stderr, "Failed during WebPINewDecoder().\n");
status = VP8_STATUS_OUT_OF_MEMORY;
goto end;
} else {
status = WebPIUpdate(idec, data, data_size);
WebPIDelete(idec);
}
}
if (verbose) {
const double decode_time = StopwatchReadAndReset(&stop_watch);
printf("Time to decode picture: %.3fs\n", decode_time);
fprintf(stderr, "Time to decode picture: %.3fs\n", decode_time);
}
end:
free((void*)data);
@ -504,24 +749,29 @@ int main(int argc, const char *argv[]) {
if (!ok) {
fprintf(stderr, "Decoding of %s failed.\n", in_file);
fprintf(stderr, "Status: %d (%s)\n", status, kStatusMessages[status]);
return -1;
goto Exit;
}
}
if (out_file) {
printf("Decoded %s. Dimensions: %d x %d%s. Now saving...\n", in_file,
output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "");
SaveOutput(output_buffer, format, out_file);
if (out_file != NULL) {
fprintf(stderr, "Decoded %s. Dimensions: %d x %d %s. Format: %s. "
"Now saving...\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "",
kFormatType[bitstream->format]);
ok = SaveOutput(output_buffer, format, out_file);
} else {
printf("File %s can be decoded (dimensions: %d x %d)%s.\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "");
printf("Nothing written; use -o flag to save the result as e.g. PNG.\n");
fprintf(stderr, "File %s can be decoded "
"(dimensions: %d x %d %s. Format: %s).\n",
in_file, output_buffer->width, output_buffer->height,
bitstream->has_alpha ? " (with alpha)" : "",
kFormatType[bitstream->format]);
fprintf(stderr, "Nothing written; "
"use -o flag to save the result as e.g. PNG.\n");
}
Exit:
WebPFreeDecBuffer(output_buffer);
return 0;
return ok ? 0 : -1;
}
//------------------------------------------------------------------------------

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Utility functions used by the example programs.
@ -12,10 +14,6 @@
#include <stdio.h>
#include <stdlib.h>
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
// -----------------------------------------------------------------------------
// File I/O
@ -72,6 +70,3 @@ int ExUtilWriteFile(const char* const file_name,
return ok;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Utility functions used by the example programs.
@ -13,7 +15,7 @@
#include "webp/types.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -27,7 +29,7 @@ int ExUtilReadFile(const char* const file_name,
int ExUtilWriteFile(const char* const file_name,
const uint8_t* data, size_t data_size);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,54 +1,86 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// simple tool to convert animated GIFs to WebP
//
// Getting the prerequisites:
// Debian-like linux:
// sudo apt-get install libgif-dev
// MacPorts
// sudo port install giflib
//
// Compiling:
// gcc -o gif2webp gif2webp.c -O3 -lwebpmux -lwebp -lgif -lpthread -lm
//
// Authors: Skal (pascal.massimino@gmail.com)
// Urvang (urvang@google.com)
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef WEBP_HAVE_GIF
#include <gif_lib.h>
#include "webp/encode.h"
#include "webp/mux.h"
#include "./example_util.h"
#include "./gif2webp_util.h"
#define GIF_TRANSPARENT_MASK 0x01
#define GIF_DISPOSE_MASK 0x07
#define GIF_DISPOSE_SHIFT 2
#define TRANSPARENT_COLOR 0x00ffffff
#define WHITE_COLOR 0xffffffff
#define MAX_CACHE_SIZE 30
//------------------------------------------------------------------------------
static int transparent_index = -1; // No transparency by default.
static int transparent_index = -1; // Index of transparent color in the map.
static void ClearPicture(WebPPicture* const picture, uint32_t color) {
int x, y;
for (y = 0; y < picture->height; ++y) {
uint32_t* const dst = picture->argb + y * picture->argb_stride;
for (x = 0; x < picture->width; ++x) dst[x] = color;
static void SanitizeKeyFrameIntervals(size_t* const kmin_ptr,
size_t* const kmax_ptr) {
size_t kmin = *kmin_ptr;
size_t kmax = *kmax_ptr;
int print_warning = 1;
if (kmin == 0) { // Disable keyframe insertion.
kmax = ~0;
kmin = kmax - 1;
print_warning = 0;
}
if (kmax == 0) {
kmax = ~0;
print_warning = 0;
}
if (kmin >= kmax) {
kmin = kmax - 1;
if (print_warning) {
fprintf(stderr,
"WARNING: Setting kmin = %d, so that kmin < kmax.\n", (int)kmin);
}
} else if (kmin < (kmax / 2 + 1)) {
// This ensures that cache.keyframe + kmin >= kmax is always true. So, we
// can flush all the frames in the count_since_key_frame == kmax case.
kmin = (kmax / 2 + 1);
if (print_warning) {
fprintf(stderr,
"WARNING: Setting kmin = %d, so that kmin >= kmax / 2 + 1.\n",
(int)kmin);
}
}
// Limit the max number of frames that are allocated.
if (kmax - kmin > MAX_CACHE_SIZE) {
kmin = kmax - MAX_CACHE_SIZE;
if (print_warning) {
fprintf(stderr,
"WARNING: Setting kmin = %d, so that kmax - kmin <= 30.\n",
(int)kmin);
}
}
*kmin_ptr = kmin;
*kmax_ptr = kmax;
}
static void Remap(const uint8_t* const src, const GifFileType* const gif,
@ -62,30 +94,34 @@ static void Remap(const uint8_t* const src, const GifFileType* const gif,
for (i = 0; i < len; ++i) {
const GifColorType c = colors[src[i]];
dst[i] = (src[i] == transparent_index) ? TRANSPARENT_COLOR
dst[i] = (src[i] == transparent_index) ? WEBP_UTIL_TRANSPARENT_COLOR
: c.Blue | (c.Green << 8) | (c.Red << 16) | (0xff << 24);
}
}
static int ReadSubImage(GifFileType* gif, WebPPicture* pic, WebPPicture* view) {
// Read the GIF image frame.
static int ReadFrame(GifFileType* const gif, WebPFrameRect* const gif_rect,
WebPPicture* const webp_frame) {
WebPPicture sub_image;
const GifImageDesc image_desc = gif->Image;
const int offset_x = image_desc.Left;
const int offset_y = image_desc.Top;
const int sub_w = image_desc.Width;
const int sub_h = image_desc.Height;
uint32_t* dst = NULL;
uint8_t* tmp = NULL;
int ok = 0;
WebPFrameRect rect = {
image_desc.Left, image_desc.Top, image_desc.Width, image_desc.Height
};
*gif_rect = rect;
// Use a view for the sub-picture:
if (!WebPPictureView(pic, offset_x, offset_y, sub_w, sub_h, view)) {
if (!WebPPictureView(webp_frame, rect.x_offset, rect.y_offset,
rect.width, rect.height, &sub_image)) {
fprintf(stderr, "Sub-image %dx%d at position %d,%d is invalid!\n",
sub_w, sub_h, offset_x, offset_y);
goto End;
rect.width, rect.height, rect.x_offset, rect.y_offset);
return 0;
}
dst = view->argb;
dst = sub_image.argb;
tmp = (uint8_t*)malloc(sub_w * sizeof(*tmp));
tmp = (uint8_t*)malloc(rect.width * sizeof(*tmp));
if (tmp == NULL) goto End;
if (image_desc.Interlace) { // Interlaced image.
@ -95,32 +131,32 @@ static int ReadSubImage(GifFileType* gif, WebPPicture* pic, WebPPicture* view) {
int pass;
for (pass = 0; pass < 4; ++pass) {
int y;
for (y = interlace_offsets[pass]; y < sub_h; y += interlace_jumps[pass]) {
if (DGifGetLine(gif, tmp, sub_w) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * view->argb_stride, sub_w);
for (y = interlace_offsets[pass]; y < rect.height;
y += interlace_jumps[pass]) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * sub_image.argb_stride, rect.width);
}
}
} else { // Non-interlaced image.
int y;
for (y = 0; y < sub_h; ++y) {
if (DGifGetLine(gif, tmp, sub_w) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * view->argb_stride, sub_w);
for (y = 0; y < rect.height; ++y) {
if (DGifGetLine(gif, tmp, rect.width) == GIF_ERROR) goto End;
Remap(tmp, gif, dst + y * sub_image.argb_stride, rect.width);
}
}
// re-align the view with even offset (and adjust dimensions if needed).
WebPPictureView(pic, offset_x & ~1, offset_y & ~1,
sub_w + (offset_x & 1), sub_h + (offset_y & 1), view);
ok = 1;
End:
if (!ok) webp_frame->error_code = sub_image.error_code;
WebPPictureFree(&sub_image);
free(tmp);
return ok;
}
static int GetBackgroundColor(const ColorMapObject* const color_map,
GifWord bgcolor_idx, uint32_t* const bgcolor) {
int bgcolor_idx, uint32_t* const bgcolor) {
if (transparent_index != -1 && bgcolor_idx == transparent_index) {
*bgcolor = TRANSPARENT_COLOR; // Special case.
*bgcolor = WEBP_UTIL_TRANSPARENT_COLOR; // Special case.
return 1;
} else if (color_map == NULL || color_map->Colors == NULL
|| bgcolor_idx >= color_map->ColorCount) {
@ -141,20 +177,20 @@ static void DisplayGifError(const GifFileType* const gif, int gif_error) {
#if defined(GIFLIB_MAJOR) && defined(GIFLIB_MINOR) && \
((GIFLIB_MAJOR == 4 && GIFLIB_MINOR >= 2) || GIFLIB_MAJOR > 4)
#if GIFLIB_MAJOR >= 5
// Static string actually, hence the const char* cast.
const char* error_str = (const char*)GifErrorString(
(gif == NULL) ? gif_error : gif->Error);
// Static string actually, hence the const char* cast.
const char* error_str = (const char*)GifErrorString(
(gif == NULL) ? gif_error : gif->Error);
#else
const char* error_str = (const char*)GifErrorString();
(void)gif;
const char* error_str = (const char*)GifErrorString();
(void)gif;
#endif
if (error_str == NULL) error_str = "Unknown error";
fprintf(stderr, "GIFLib Error %d: %s\n", gif_error, error_str);
if (error_str == NULL) error_str = "Unknown error";
fprintf(stderr, "GIFLib Error %d: %s\n", gif_error, error_str);
#else
(void)gif;
fprintf(stderr, "GIFLib Error %d: ", gif_error);
PrintGifError();
fprintf(stderr, "\n");
(void)gif;
fprintf(stderr, "GIFLib Error %d: ", gif_error);
PrintGifError();
fprintf(stderr, "\n");
#endif
}
@ -168,6 +204,12 @@ static const char* ErrorString(WebPMuxError err) {
return kErrorMessages[-err];
}
enum {
METADATA_ICC = (1 << 0),
METADATA_XMP = (1 << 1),
METADATA_ALL = METADATA_ICC | METADATA_XMP
};
//------------------------------------------------------------------------------
static void Help(void) {
@ -176,9 +218,19 @@ static void Help(void) {
printf("options:\n");
printf(" -h / -help ............ this help\n");
printf(" -lossy ................. Encode image using lossy compression.\n");
printf(" -mixed ................. For each frame in the image, pick lossy\n"
" or lossless compression heuristically.\n");
printf(" -q <float> ............. quality factor (0:small..100:big)\n");
printf(" -m <int> ............... compression method (0=fast, 6=slowest)\n");
printf(" -kmin <int> ............ Min distance between key frames\n");
printf(" -kmax <int> ............ Max distance between key frames\n");
printf(" -f <int> ............... filter strength (0=off..100)\n");
printf(" -metadata <string> ..... comma separated list of metadata to\n");
printf(" ");
printf("copy from the input to the output if present.\n");
printf(" "
"Valid values: all, none, icc, xmp (default)\n");
printf(" -mt .................... use multi-threading if available\n");
printf("\n");
printf(" -version ............... print version number and exit.\n");
printf(" -v ..................... verbose.\n");
@ -196,29 +248,39 @@ int main(int argc, const char *argv[]) {
const char *in_file = NULL, *out_file = NULL;
FILE* out = NULL;
GifFileType* gif = NULL;
WebPPicture picture;
WebPPicture view;
WebPMemoryWriter memory;
WebPMuxFrameInfo frame;
WebPConfig config;
WebPPicture frame;
WebPMuxFrameInfo info;
WebPMuxAnimParams anim = { WHITE_COLOR, 0 };
WebPFrameCache* cache = NULL;
int is_first_frame = 1;
int is_first_frame = 1; // Whether we are processing the first frame.
int done;
int c;
int quiet = 0;
WebPConfig config;
WebPMux* mux = NULL;
WebPData webp_data = { NULL, 0 };
int keep_metadata = METADATA_XMP; // ICC not output by default.
int stored_icc = 0; // Whether we have already stored an ICC profile.
int stored_xmp = 0;
memset(&frame, 0, sizeof(frame));
frame.id = WEBP_CHUNK_ANMF;
frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
int default_kmin = 1; // Whether to use default kmin value.
int default_kmax = 1;
size_t kmin = 0;
size_t kmax = 0;
int allow_mixed = 0; // If true, each frame can be lossy or lossless.
if (!WebPConfigInit(&config) || !WebPPictureInit(&picture)) {
memset(&info, 0, sizeof(info));
info.id = WEBP_CHUNK_ANMF;
info.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
info.blend_method = WEBP_MUX_BLEND;
if (!WebPConfigInit(&config) || !WebPPictureInit(&frame)) {
fprintf(stderr, "Error! Version mismatch!\n");
return -1;
}
config.lossless = 1; // Use lossless compression by default.
config.image_hint = WEBP_HINT_GRAPH; // always low-color
if (argc == 1) {
Help();
@ -233,12 +295,62 @@ int main(int argc, const char *argv[]) {
out_file = argv[++c];
} else if (!strcmp(argv[c], "-lossy")) {
config.lossless = 0;
} else if (!strcmp(argv[c], "-mixed")) {
allow_mixed = 1;
config.lossless = 0;
} else if (!strcmp(argv[c], "-q") && c < argc - 1) {
config.quality = (float)strtod(argv[++c], NULL);
} else if (!strcmp(argv[c], "-m") && c < argc - 1) {
config.method = strtol(argv[++c], NULL, 0);
} else if (!strcmp(argv[c], "-kmax") && c < argc - 1) {
kmax = strtoul(argv[++c], NULL, 0);
default_kmax = 0;
} else if (!strcmp(argv[c], "-kmin") && c < argc - 1) {
kmin = strtoul(argv[++c], NULL, 0);
default_kmin = 0;
} else if (!strcmp(argv[c], "-f") && c < argc - 1) {
config.filter_strength = strtol(argv[++c], NULL, 0);
} else if (!strcmp(argv[c], "-metadata") && c < argc - 1) {
static const struct {
const char* option;
int flag;
} kTokens[] = {
{ "all", METADATA_ALL },
{ "none", 0 },
{ "icc", METADATA_ICC },
{ "xmp", METADATA_XMP },
};
const size_t kNumTokens = sizeof(kTokens) / sizeof(*kTokens);
const char* start = argv[++c];
const char* const end = start + strlen(start);
keep_metadata = 0;
while (start < end) {
size_t i;
const char* token = strchr(start, ',');
if (token == NULL) token = end;
for (i = 0; i < kNumTokens; ++i) {
if ((size_t)(token - start) == strlen(kTokens[i].option) &&
!strncmp(start, kTokens[i].option, strlen(kTokens[i].option))) {
if (kTokens[i].flag != 0) {
keep_metadata |= kTokens[i].flag;
} else {
keep_metadata = 0;
}
break;
}
}
if (i == kNumTokens) {
fprintf(stderr, "Error! Unknown metadata type '%.*s'\n",
(int)(token - start), start);
Help();
return -1;
}
start = token + 1;
}
} else if (!strcmp(argv[c], "-mt")) {
++config.thread_level;
} else if (!strcmp(argv[c], "-version")) {
const int enc_version = WebPGetEncoderVersion();
const int mux_version = WebPGetMuxVersion();
@ -251,6 +363,9 @@ int main(int argc, const char *argv[]) {
quiet = 1;
} else if (!strcmp(argv[c], "-v")) {
verbose = 1;
} else if (!strcmp(argv[c], "--")) {
if (c < argc - 1) in_file = argv[++c];
break;
} else if (argv[c][0] == '-') {
fprintf(stderr, "Error! Unknown option '%s'\n", argv[c]);
Help();
@ -259,6 +374,16 @@ int main(int argc, const char *argv[]) {
in_file = argv[c];
}
}
// Appropriate default kmin, kmax values for lossy and lossless.
if (default_kmin) {
kmin = config.lossless ? 9 : 3;
}
if (default_kmax) {
kmax = config.lossless ? 17 : 5;
}
SanitizeKeyFrameIntervals(&kmin, &kmax);
if (!WebPValidateConfig(&config)) {
fprintf(stderr, "Error! Invalid configuration.\n");
goto End;
@ -279,13 +404,15 @@ int main(int argc, const char *argv[]) {
#endif
if (gif == NULL) goto End;
// Allocate picture buffer
picture.width = gif->SWidth;
picture.height = gif->SHeight;
picture.use_argb = 1;
picture.writer = WebPMemoryWrite;
picture.custom_ptr = &memory;
if (!WebPPictureAlloc(&picture)) goto End;
// Allocate current buffer
frame.width = gif->SWidth;
frame.height = gif->SHeight;
frame.use_argb = 1;
if (!WebPPictureAlloc(&frame)) goto End;
// Initialize cache
cache = WebPFrameCacheNew(frame.width, frame.height, kmin, kmax, allow_mixed);
if (cache == NULL) goto End;
mux = WebPMuxNew();
if (mux == NULL) {
@ -301,51 +428,25 @@ int main(int argc, const char *argv[]) {
switch (type) {
case IMAGE_DESC_RECORD_TYPE: {
if (frame.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
ClearPicture(&picture, anim.bgcolor);
}
WebPFrameRect gif_rect;
if (!DGifGetImageDesc(gif)) goto End;
if (!ReadSubImage(gif, &picture, &view)) goto End;
WebPMemoryWriterInit(&memory);
if (!config.lossless) {
// We need to call BGRA variant because of the way we do Remap().
// TODO(later): This works for little-endian only due to uint32_t to
// uint8_t conversion. Make it work for big-endian too.
WebPPictureImportBGRA(&view, (uint8_t*)view.argb,
view.argb_stride * sizeof(*view.argb));
view.use_argb = 0;
} else {
view.use_argb = 1;
}
if (!WebPEncode(&config, &view)) {
fprintf(stderr, "Error! Cannot encode picture as WebP\n");
fprintf(stderr, "Error code: %d\n", view.error_code);
if (!ReadFrame(gif, &gif_rect, &frame)) {
goto End;
}
// Now we have all the info about the frame, as a Graphic Control
// Extension Block always appears before the Image Descriptor Block.
// So add the frame to mux.
frame.x_offset = gif->Image.Left & ~1;
frame.y_offset = gif->Image.Top & ~1;
frame.bitstream.bytes = memory.mem;
frame.bitstream.size = memory.size;
err = WebPMuxPushFrame(mux, &frame, 1);
if (!WebPFrameCacheAddFrame(cache, &config, &gif_rect, &frame, &info)) {
fprintf(stderr, "Error! Cannot encode frame as WebP\n");
fprintf(stderr, "Error code: %d\n", frame.error_code);
}
err = WebPFrameCacheFlush(cache, verbose, mux);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not add animation frame.\n",
ErrorString(err));
goto End;
}
if (verbose) {
printf("Added frame %dx%d (offset:%d,%d duration:%d) ",
view.width, view.height, frame.x_offset, frame.y_offset,
frame.duration);
printf("dispose:%d transparent index:%d\n",
frame.dispose_method, transparent_index);
}
WebPDataClear(&frame.bitstream);
is_first_frame = 0;
break;
}
case EXTENSION_RECORD_TYPE: {
@ -363,14 +464,18 @@ int main(int argc, const char *argv[]) {
const int dispose = (flags >> GIF_DISPOSE_SHIFT) & GIF_DISPOSE_MASK;
const int delay = data[2] | (data[3] << 8); // In 10 ms units.
if (data[0] != 4) goto End;
frame.duration = delay * 10; // Duration is in 1 ms units for WebP.
info.duration = delay * 10; // Duration is in 1 ms units for WebP.
if (dispose == 3) {
fprintf(stderr, "WARNING: GIF_DISPOSE_RESTORE not supported.");
static int warning_printed = 0;
if (!warning_printed) {
fprintf(stderr, "WARNING: GIF_DISPOSE_RESTORE unsupported.\n");
warning_printed = 1;
}
// failsafe. TODO(urvang): emulate the correct behaviour by
// recoding the whole frame.
frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
info.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
} else {
frame.dispose_method =
info.dispose_method =
(dispose == 2) ? WEBP_MUX_DISPOSE_BACKGROUND
: WEBP_MUX_DISPOSE_NONE;
}
@ -379,10 +484,9 @@ int main(int argc, const char *argv[]) {
if (!GetBackgroundColor(gif->SColorMap, gif->SBackGroundColor,
&anim.bgcolor)) {
fprintf(stderr, "GIF decode warning: invalid background color "
"index. Assuming white background.\n");
"index. Assuming white background.\n");
}
ClearPicture(&picture, anim.bgcolor);
is_first_frame = 0;
WebPUtilClearPic(&frame, NULL);
}
break;
}
@ -398,24 +502,73 @@ int main(int argc, const char *argv[]) {
if (data[0] != 3 && data[1] != 1) break; // wrong size/marker
anim.loop_count = data[2] | (data[3] << 8);
if (verbose) printf("Loop count: %d\n", anim.loop_count);
} else if (!memcmp(data + 1, "XMP dataXMP", 11)) {
// Read XMP metadata.
WebPData xmp;
if (DGifGetExtensionNext(gif, &data) == GIF_ERROR) goto End;
if (data == NULL) goto End;
xmp.bytes = (uint8_t*)data;
xmp.size = data[0] + 1;
WebPMuxSetChunk(mux, "XMP ", &xmp, 1);
if (verbose) printf("XMP size: %d\n", (int)xmp.size);
} else if (!memcmp(data + 1, "ICCRGBG1012", 11)) {
// Read ICC profile.
WebPData icc;
if (DGifGetExtensionNext(gif, &data) == GIF_ERROR) goto End;
if (data == NULL) goto End;
icc.bytes = (uint8_t*)data;
icc.size = data[0] + 1;
WebPMuxSetChunk(mux, "ICCP", &icc, 1);
if (verbose) printf("ICC size: %d\n", (int)icc.size);
} else { // An extension containing metadata.
// We only store the first encountered chunk of each type, and
// only if requested by the user.
const int is_xmp = (keep_metadata & METADATA_XMP) &&
!stored_xmp &&
!memcmp(data + 1, "XMP DataXMP", 11);
const int is_icc = (keep_metadata & METADATA_ICC) &&
!stored_icc &&
!memcmp(data + 1, "ICCRGBG1012", 11);
if (is_xmp || is_icc) {
const char* const fourccs[2] = { "XMP " , "ICCP" };
const char* const features[2] = { "XMP" , "ICC" };
WebPData metadata = { NULL, 0 };
// Construct metadata from sub-blocks.
// Usual case (including ICC profile): In each sub-block, the
// first byte specifies its size in bytes (0 to 255) and the
// rest of the bytes contain the data.
// Special case for XMP data: In each sub-block, the first byte
// is also part of the XMP payload. XMP in GIF also has a 257
// byte padding data. See the XMP specification for details.
while (1) {
WebPData prev_metadata = metadata;
WebPData subblock;
if (DGifGetExtensionNext(gif, &data) == GIF_ERROR) {
WebPDataClear(&metadata);
goto End;
}
if (data == NULL) break; // Finished.
subblock.size = is_xmp ? data[0] + 1 : data[0];
assert(subblock.size > 0);
subblock.bytes = is_xmp ? data : data + 1;
metadata.bytes =
(uint8_t*)realloc((void*)metadata.bytes,
prev_metadata.size + subblock.size);
if (metadata.bytes == NULL) {
WebPDataClear(&prev_metadata);
goto End;
}
metadata.size += subblock.size;
memcpy((void*)(metadata.bytes + prev_metadata.size),
subblock.bytes, subblock.size);
}
if (is_xmp) {
// XMP padding data is 0x01, 0xff, 0xfe ... 0x01, 0x00.
const size_t xmp_pading_size = 257;
if (metadata.size > xmp_pading_size) {
metadata.size -= xmp_pading_size;
}
}
// Add metadata chunk.
err = WebPMuxSetChunk(mux, fourccs[is_icc], &metadata, 1);
if (verbose) {
printf("%s size: %d\n", features[is_icc], (int)metadata.size);
}
WebPDataClear(&metadata);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not set %s chunk.\n",
ErrorString(err), features[is_icc]);
goto End;
}
if (is_icc) {
stored_icc = 1;
} else if (is_xmp) {
stored_xmp = 1;
}
}
}
break;
}
@ -423,9 +576,9 @@ int main(int argc, const char *argv[]) {
break; // skip
}
}
do {
while (data != NULL) {
if (DGifGetExtensionNext(gif, &data) == GIF_ERROR) goto End;
} while (data != NULL);
}
break;
}
case TERMINATE_RECORD_TYPE: {
@ -441,6 +594,14 @@ int main(int argc, const char *argv[]) {
}
} while (!done);
// Flush any pending frames.
err = WebPFrameCacheFlushAll(cache, verbose, mux);
if (err != WEBP_MUX_OK) {
fprintf(stderr, "ERROR (%s): Could not add animation frame.\n",
ErrorString(err));
goto End;
}
// Finish muxing
err = WebPMuxSetAnimationParams(mux, &anim);
if (err != WEBP_MUX_OK) {
@ -475,7 +636,8 @@ int main(int argc, const char *argv[]) {
End:
WebPDataClear(&webp_data);
WebPMuxDelete(mux);
WebPPictureFree(&picture);
WebPPictureFree(&frame);
WebPFrameCacheDelete(cache);
if (out != NULL && out_file != NULL) fclose(out);
if (gif_error != GIF_OK) {
@ -488,4 +650,14 @@ int main(int argc, const char *argv[]) {
return !ok;
}
#else // !WEBP_HAVE_GIF
int main(int argc, const char *argv[]) {
fprintf(stderr, "GIF support not enabled in %s.\n", argv[0]);
(void)argc;
return 0;
}
#endif
//------------------------------------------------------------------------------

667
examples/gif2webp_util.c Normal file
View File

@ -0,0 +1,667 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Helper structs and methods for gif2webp tool.
//
#include <assert.h>
#include <stdio.h>
#include "webp/encode.h"
#include "./gif2webp_util.h"
#define DELTA_INFINITY 1ULL << 32
#define KEYFRAME_NONE -1
//------------------------------------------------------------------------------
// Helper utilities.
static void ClearRectangle(WebPPicture* const picture,
int left, int top, int width, int height) {
int j;
for (j = top; j < top + height; ++j) {
uint32_t* const dst = picture->argb + j * picture->argb_stride;
int i;
for (i = left; i < left + width; ++i) {
dst[i] = WEBP_UTIL_TRANSPARENT_COLOR;
}
}
}
void WebPUtilClearPic(WebPPicture* const picture,
const WebPFrameRect* const rect) {
if (rect != NULL) {
ClearRectangle(picture, rect->x_offset, rect->y_offset,
rect->width, rect->height);
} else {
ClearRectangle(picture, 0, 0, picture->width, picture->height);
}
}
// TODO: Also used in picture.c. Move to a common location?
// Copy width x height pixels from 'src' to 'dst' honoring the strides.
static void CopyPlane(const uint8_t* src, int src_stride,
uint8_t* dst, int dst_stride, int width, int height) {
while (height-- > 0) {
memcpy(dst, src, width);
src += src_stride;
dst += dst_stride;
}
}
// Copy pixels from 'src' to 'dst' honoring strides. 'src' and 'dst' are assumed
// to be already allocated.
static void CopyPixels(const WebPPicture* const src, WebPPicture* const dst) {
assert(src->width == dst->width && src->height == dst->height);
CopyPlane((uint8_t*)src->argb, 4 * src->argb_stride, (uint8_t*)dst->argb,
4 * dst->argb_stride, 4 * src->width, src->height);
}
// Given 'src' picture and its frame rectangle 'rect', blend it into 'dst'.
static void BlendPixels(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
int j;
assert(src->width == dst->width && src->height == dst->height);
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
int i;
for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {
const uint32_t src_pixel = src->argb[j * src->argb_stride + i];
const int src_alpha = src_pixel >> 24;
if (src_alpha != 0) {
dst->argb[j * dst->argb_stride + i] = src_pixel;
}
}
}
}
// Replace transparent pixels within 'dst_rect' of 'dst' by those in the 'src'.
static void ReduceTransparency(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
int i, j;
assert(src != NULL && dst != NULL && rect != NULL);
assert(src->width == dst->width && src->height == dst->height);
for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {
for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {
const uint32_t src_pixel = src->argb[j * src->argb_stride + i];
const int src_alpha = src_pixel >> 24;
const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i];
const int dst_alpha = dst_pixel >> 24;
if (dst_alpha == 0 && src_alpha == 0xff) {
dst->argb[j * dst->argb_stride + i] = src_pixel;
}
}
}
}
// Replace similar blocks of pixels by a 'see-through' transparent block
// with uniform average color.
static void FlattenSimilarBlocks(const WebPPicture* const src,
const WebPFrameRect* const rect,
WebPPicture* const dst) {
int i, j;
const int block_size = 8;
const int y_start = (rect->y_offset + block_size) & ~(block_size - 1);
const int y_end = (rect->y_offset + rect->height) & ~(block_size - 1);
const int x_start = (rect->x_offset + block_size) & ~(block_size - 1);
const int x_end = (rect->x_offset + rect->width) & ~(block_size - 1);
assert(src != NULL && dst != NULL && rect != NULL);
assert(src->width == dst->width && src->height == dst->height);
assert((block_size & (block_size - 1)) == 0); // must be a power of 2
// Iterate over each block and count similar pixels.
for (j = y_start; j < y_end; j += block_size) {
for (i = x_start; i < x_end; i += block_size) {
int cnt = 0;
int avg_r = 0, avg_g = 0, avg_b = 0;
int x, y;
const uint32_t* const psrc = src->argb + j * src->argb_stride + i;
uint32_t* const pdst = dst->argb + j * dst->argb_stride + i;
for (y = 0; y < block_size; ++y) {
for (x = 0; x < block_size; ++x) {
const uint32_t src_pixel = psrc[x + y * src->argb_stride];
const int alpha = src_pixel >> 24;
if (alpha == 0xff &&
src_pixel == pdst[x + y * dst->argb_stride]) {
++cnt;
avg_r += (src_pixel >> 16) & 0xff;
avg_g += (src_pixel >> 8) & 0xff;
avg_b += (src_pixel >> 0) & 0xff;
}
}
}
// If we have a fully similar block, we replace it with an
// average transparent block. This compresses better in lossy mode.
if (cnt == block_size * block_size) {
const uint32_t color = (0x00 << 24) |
((avg_r / cnt) << 16) |
((avg_g / cnt) << 8) |
((avg_b / cnt) << 0);
for (y = 0; y < block_size; ++y) {
for (x = 0; x < block_size; ++x) {
pdst[x + y * dst->argb_stride] = color;
}
}
}
}
}
}
//------------------------------------------------------------------------------
// Key frame related utilities.
// Returns true if 'curr' frame with frame rectangle 'curr_rect' is a key frame,
// that is, it can be decoded independently of 'prev' canvas.
static int IsKeyFrame(const WebPPicture* const curr,
const WebPFrameRect* const curr_rect,
const WebPPicture* const prev) {
int i, j;
int is_key_frame = 1;
// If previous canvas (with previous frame disposed) is all transparent,
// current frame is a key frame.
for (i = 0; i < prev->width; ++i) {
for (j = 0; j < prev->height; ++j) {
const uint32_t prev_alpha = (prev->argb[j * prev->argb_stride + i]) >> 24;
if (prev_alpha != 0) {
is_key_frame = 0;
break;
}
}
if (!is_key_frame) break;
}
if (is_key_frame) return 1;
// If current frame covers the whole canvas and does not contain any
// transparent pixels that depend on previous canvas, then current frame is
// a key frame.
if (curr_rect->width == curr->width && curr_rect->height == curr->height) {
assert(curr_rect->x_offset == 0 && curr_rect->y_offset == 0);
is_key_frame = 1;
for (j = 0; j < prev->height; ++j) {
for (i = 0; i < prev->width; ++i) {
const uint32_t prev_alpha =
(prev->argb[j * prev->argb_stride + i]) >> 24;
const uint32_t curr_alpha =
(curr->argb[j * curr->argb_stride + i]) >> 24;
if (curr_alpha != 0xff && prev_alpha != 0) {
is_key_frame = 0;
break;
}
}
if (!is_key_frame) break;
}
if (is_key_frame) return 1;
}
return 0;
}
// Given 'prev' frame and current frame rectangle 'rect', convert 'curr' frame
// to a key frame.
static void ConvertToKeyFrame(const WebPPicture* const prev,
WebPFrameRect* const rect,
WebPPicture* const curr) {
int j;
assert(curr->width == prev->width && curr->height == prev->height);
// Replace transparent pixels of current canvas with those from previous
// canvas (with previous frame disposed).
for (j = 0; j < curr->height; ++j) {
int i;
for (i = 0; i < curr->width; ++i) {
uint32_t* const curr_pixel = curr->argb + j * curr->argb_stride + i;
const int curr_alpha = *curr_pixel >> 24;
if (curr_alpha == 0) {
*curr_pixel = prev->argb[j * prev->argb_stride + i];
}
}
}
// Frame rectangle now covers the whole canvas.
rect->x_offset = 0;
rect->y_offset = 0;
rect->width = curr->width;
rect->height = curr->height;
}
//------------------------------------------------------------------------------
// Encoded frame.
// Used to store two candidates of encoded data for an animation frame. One of
// the two will be chosen later.
typedef struct {
WebPMuxFrameInfo sub_frame; // Encoded frame rectangle.
WebPMuxFrameInfo key_frame; // Encoded frame if it was converted to keyframe.
} EncodedFrame;
// Release the data contained by 'encoded_frame'.
static void FrameRelease(EncodedFrame* const encoded_frame) {
if (encoded_frame != NULL) {
WebPDataClear(&encoded_frame->sub_frame.bitstream);
WebPDataClear(&encoded_frame->key_frame.bitstream);
memset(encoded_frame, 0, sizeof(*encoded_frame));
}
}
//------------------------------------------------------------------------------
// Frame cache.
// Used to store encoded frames that haven't been output yet.
struct WebPFrameCache {
EncodedFrame* encoded_frames; // Array of encoded frames.
size_t size; // Number of allocated data elements.
size_t start; // Start index.
size_t count; // Number of valid data elements.
int flush_count; // If >0, flush_count frames starting from
// 'start' are ready to be added to mux.
int64_t best_delta; // min(canvas size - frame size) over the frames.
// Can be negative in certain cases due to
// transparent pixels in a frame.
int keyframe; // Index of selected keyframe relative to 'start'.
size_t kmin; // Min distance between key frames.
size_t kmax; // Max distance between key frames.
size_t count_since_key_frame; // Frames seen since the last key frame.
int allow_mixed; // If true, each frame can be lossy or lossless.
WebPPicture prev_canvas; // Previous canvas (properly disposed).
WebPPicture curr_canvas; // Current canvas (temporary buffer).
int is_first_frame; // True if no frames have been added to the cache
// since WebPFrameCacheNew().
};
// Reset the counters in the cache struct. Doesn't touch 'cache->encoded_frames'
// and 'cache->size'.
static void CacheReset(WebPFrameCache* const cache) {
cache->start = 0;
cache->count = 0;
cache->flush_count = 0;
cache->best_delta = DELTA_INFINITY;
cache->keyframe = KEYFRAME_NONE;
}
WebPFrameCache* WebPFrameCacheNew(int width, int height,
size_t kmin, size_t kmax, int allow_mixed) {
WebPFrameCache* cache = (WebPFrameCache*)malloc(sizeof(*cache));
if (cache == NULL) return NULL;
CacheReset(cache);
// sanity init, so we can call WebPFrameCacheDelete():
cache->encoded_frames = NULL;
cache->is_first_frame = 1;
// Picture buffers.
if (!WebPPictureInit(&cache->prev_canvas) ||
!WebPPictureInit(&cache->curr_canvas)) {
return NULL;
}
cache->prev_canvas.width = width;
cache->prev_canvas.height = height;
cache->prev_canvas.use_argb = 1;
if (!WebPPictureAlloc(&cache->prev_canvas) ||
!WebPPictureCopy(&cache->prev_canvas, &cache->curr_canvas)) {
goto Err;
}
WebPUtilClearPic(&cache->prev_canvas, NULL);
// Cache data.
cache->allow_mixed = allow_mixed;
cache->kmin = kmin;
cache->kmax = kmax;
cache->count_since_key_frame = 0;
assert(kmax > kmin);
cache->size = kmax - kmin;
cache->encoded_frames =
(EncodedFrame*)calloc(cache->size, sizeof(*cache->encoded_frames));
if (cache->encoded_frames == NULL) goto Err;
return cache; // All OK.
Err:
WebPFrameCacheDelete(cache);
return NULL;
}
void WebPFrameCacheDelete(WebPFrameCache* const cache) {
if (cache != NULL) {
if (cache->encoded_frames != NULL) {
size_t i;
for (i = 0; i < cache->size; ++i) {
FrameRelease(&cache->encoded_frames[i]);
}
free(cache->encoded_frames);
}
WebPPictureFree(&cache->prev_canvas);
WebPPictureFree(&cache->curr_canvas);
free(cache);
}
}
static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic,
WebPMemoryWriter* const memory) {
pic->use_argb = 1;
pic->writer = WebPMemoryWrite;
pic->custom_ptr = memory;
if (!WebPEncode(config, pic)) {
return 0;
}
return 1;
}
static void GetEncodedData(const WebPMemoryWriter* const memory,
WebPData* const encoded_data) {
encoded_data->bytes = memory->mem;
encoded_data->size = memory->size;
}
#define MIN_COLORS_LOSSY 31 // Don't try lossy below this threshold.
#define MAX_COLORS_LOSSLESS 194 // Don't try lossless above this threshold.
#define MAX_COLOR_COUNT 256 // Power of 2 greater than MAX_COLORS_LOSSLESS.
#define HASH_SIZE (MAX_COLOR_COUNT * 4)
#define HASH_RIGHT_SHIFT 22 // 32 - log2(HASH_SIZE).
// TODO(urvang): Also used in enc/vp8l.c. Move to utils.
// If the number of colors in the 'pic' is at least MAX_COLOR_COUNT, return
// MAX_COLOR_COUNT. Otherwise, return the exact number of colors in the 'pic'.
static int GetColorCount(const WebPPicture* const pic) {
int x, y;
int num_colors = 0;
uint8_t in_use[HASH_SIZE] = { 0 };
uint32_t colors[HASH_SIZE];
static const uint32_t kHashMul = 0x1e35a7bd;
const uint32_t* argb = pic->argb;
const int width = pic->width;
const int height = pic->height;
uint32_t last_pix = ~argb[0]; // so we're sure that last_pix != argb[0]
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
int key;
if (argb[x] == last_pix) {
continue;
}
last_pix = argb[x];
key = (kHashMul * last_pix) >> HASH_RIGHT_SHIFT;
while (1) {
if (!in_use[key]) {
colors[key] = last_pix;
in_use[key] = 1;
++num_colors;
if (num_colors >= MAX_COLOR_COUNT) {
return MAX_COLOR_COUNT; // Exact count not needed.
}
break;
} else if (colors[key] == last_pix) {
break; // The color is already there.
} else {
// Some other color sits here, so do linear conflict resolution.
++key;
key &= (HASH_SIZE - 1); // Key mask.
}
}
}
argb += pic->argb_stride;
}
return num_colors;
}
#undef MAX_COLOR_COUNT
#undef HASH_SIZE
#undef HASH_RIGHT_SHIFT
static int SetFrame(const WebPConfig* const config, int allow_mixed,
int is_key_frame, const WebPPicture* const prev_canvas,
WebPPicture* const frame, const WebPFrameRect* const rect,
const WebPMuxFrameInfo* const info,
WebPPicture* const sub_frame, EncodedFrame* encoded_frame) {
int try_lossless;
int try_lossy;
int try_both;
WebPMemoryWriter mem1, mem2;
WebPData* encoded_data;
WebPMuxFrameInfo* const dst =
is_key_frame ? &encoded_frame->key_frame : &encoded_frame->sub_frame;
*dst = *info;
encoded_data = &dst->bitstream;
WebPMemoryWriterInit(&mem1);
WebPMemoryWriterInit(&mem2);
if (!allow_mixed) {
try_lossless = config->lossless;
try_lossy = !try_lossless;
} else { // Use a heuristic for trying lossless and/or lossy compression.
const int num_colors = GetColorCount(sub_frame);
try_lossless = (num_colors < MAX_COLORS_LOSSLESS);
try_lossy = (num_colors >= MIN_COLORS_LOSSY);
}
try_both = try_lossless && try_lossy;
if (try_lossless) {
WebPConfig config_ll = *config;
config_ll.lossless = 1;
if (!EncodeFrame(&config_ll, sub_frame, &mem1)) {
goto Err;
}
}
if (try_lossy) {
WebPConfig config_lossy = *config;
config_lossy.lossless = 0;
if (!is_key_frame) {
// For lossy compression of a frame, it's better to replace transparent
// pixels of 'curr' with actual RGB values, whenever possible.
ReduceTransparency(prev_canvas, rect, frame);
// TODO(later): Investigate if this helps lossless compression as well.
FlattenSimilarBlocks(prev_canvas, rect, frame);
}
if (!EncodeFrame(&config_lossy, sub_frame, &mem2)) {
goto Err;
}
}
if (try_both) { // Pick the encoding with smallest size.
// TODO(later): Perhaps a rough SSIM/PSNR produced by the encoder should
// also be a criteria, in addition to sizes.
if (mem1.size <= mem2.size) {
free(mem2.mem);
GetEncodedData(&mem1, encoded_data);
} else {
free(mem1.mem);
GetEncodedData(&mem2, encoded_data);
}
} else {
GetEncodedData(try_lossless ? &mem1 : &mem2, encoded_data);
}
return 1;
Err:
free(mem1.mem);
free(mem2.mem);
return 0;
}
#undef MIN_COLORS_LOSSY
#undef MAX_COLORS_LOSSLESS
// Returns cached frame at given 'position' index.
static EncodedFrame* CacheGetFrame(const WebPFrameCache* const cache,
size_t position) {
assert(cache->start + position < cache->size);
return &cache->encoded_frames[cache->start + position];
}
// Calculate the penalty incurred if we encode given frame as a key frame
// instead of a sub-frame.
static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) {
return ((int64_t)encoded_frame->key_frame.bitstream.size -
encoded_frame->sub_frame.bitstream.size);
}
static void DisposeFrame(WebPMuxAnimDispose dispose_method,
const WebPFrameRect* const gif_rect,
WebPPicture* const frame, WebPPicture* const canvas) {
if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
WebPUtilClearPic(frame, NULL);
WebPUtilClearPic(canvas, gif_rect);
}
}
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
const WebPConfig* const config,
const WebPFrameRect* const orig_rect,
WebPPicture* const frame,
WebPMuxFrameInfo* const info) {
int ok = 0;
WebPFrameRect rect = *orig_rect;
WebPPicture sub_image; // View extracted from 'frame' with rectangle 'rect'.
WebPPicture* const prev_canvas = &cache->prev_canvas;
const size_t position = cache->count;
const int allow_mixed = cache->allow_mixed;
EncodedFrame* const encoded_frame = CacheGetFrame(cache, position);
assert(position < cache->size);
// Snap to even offsets (and adjust dimensions if needed).
rect.width += (rect.x_offset & 1);
rect.height += (rect.y_offset & 1);
rect.x_offset &= ~1;
rect.y_offset &= ~1;
if (!WebPPictureView(frame, rect.x_offset, rect.y_offset,
rect.width, rect.height, &sub_image)) {
return 0;
}
info->x_offset = rect.x_offset;
info->y_offset = rect.y_offset;
++cache->count;
if (cache->is_first_frame || IsKeyFrame(frame, &rect, prev_canvas)) {
// Add this as a key frame.
if (!SetFrame(config, allow_mixed, 1, NULL, NULL, NULL, info, &sub_image,
encoded_frame)) {
goto End;
}
cache->keyframe = position;
cache->flush_count = cache->count;
cache->count_since_key_frame = 0;
// Update prev_canvas by simply copying from 'curr'.
CopyPixels(frame, prev_canvas);
} else {
++cache->count_since_key_frame;
if (cache->count_since_key_frame <= cache->kmin) {
// Add this as a frame rectangle.
if (!SetFrame(config, allow_mixed, 0, prev_canvas, frame, &rect, info,
&sub_image, encoded_frame)) {
goto End;
}
cache->flush_count = cache->count;
// Update prev_canvas by blending 'curr' into it.
BlendPixels(frame, orig_rect, prev_canvas);
} else {
WebPPicture full_image;
WebPMuxFrameInfo full_image_info;
int frame_added;
int64_t curr_delta;
// Add frame rectangle to cache.
if (!SetFrame(config, allow_mixed, 0, prev_canvas, frame, &rect, info,
&sub_image, encoded_frame)) {
goto End;
}
// Convert to a key frame.
CopyPixels(frame, &cache->curr_canvas);
ConvertToKeyFrame(prev_canvas, &rect, &cache->curr_canvas);
if (!WebPPictureView(&cache->curr_canvas, rect.x_offset, rect.y_offset,
rect.width, rect.height, &full_image)) {
goto End;
}
full_image_info = *info;
full_image_info.x_offset = rect.x_offset;
full_image_info.y_offset = rect.y_offset;
// Add key frame to cache, too.
frame_added = SetFrame(config, allow_mixed, 1, NULL, NULL, NULL,
&full_image_info, &full_image, encoded_frame);
WebPPictureFree(&full_image);
if (!frame_added) goto End;
// Analyze size difference of the two variants.
curr_delta = KeyFramePenalty(encoded_frame);
if (curr_delta <= cache->best_delta) { // Pick this as keyframe.
cache->keyframe = position;
cache->best_delta = curr_delta;
cache->flush_count = cache->count - 1; // We can flush previous frames.
}
if (cache->count_since_key_frame == cache->kmax) {
cache->flush_count = cache->count;
cache->count_since_key_frame = 0;
}
// Update prev_canvas by simply copying from 'curr_canvas'.
CopyPixels(&cache->curr_canvas, prev_canvas);
}
}
DisposeFrame(info->dispose_method, orig_rect, frame, prev_canvas);
cache->is_first_frame = 0;
ok = 1;
End:
WebPPictureFree(&sub_image);
if (!ok) {
FrameRelease(encoded_frame);
--cache->count; // We reset the count, as the frame addition failed.
}
return ok;
}
WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
WebPMux* const mux) {
while (cache->flush_count > 0) {
WebPMuxFrameInfo* info;
WebPMuxError err;
EncodedFrame* const curr = CacheGetFrame(cache, 0);
// Pick frame or full canvas.
if (cache->keyframe == 0) {
info = &curr->key_frame;
info->blend_method = WEBP_MUX_NO_BLEND;
cache->keyframe = KEYFRAME_NONE;
cache->best_delta = DELTA_INFINITY;
} else {
info = &curr->sub_frame;
info->blend_method = WEBP_MUX_BLEND;
}
// Add to mux.
err = WebPMuxPushFrame(mux, info, 1);
if (err != WEBP_MUX_OK) return err;
if (verbose) {
printf("Added frame. offset:%d,%d duration:%d dispose:%d blend:%d\n",
info->x_offset, info->y_offset, info->duration,
info->dispose_method, info->blend_method);
}
FrameRelease(curr);
++cache->start;
--cache->flush_count;
--cache->count;
if (cache->keyframe != KEYFRAME_NONE) --cache->keyframe;
}
if (cache->count == 0) CacheReset(cache);
return WEBP_MUX_OK;
}
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
WebPMux* const mux) {
cache->flush_count = cache->count; // Force flushing of all frames.
return WebPFrameCacheFlush(cache, verbose, mux);
}
//------------------------------------------------------------------------------

80
examples/gif2webp_util.h Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Helper structs and methods for gif2webp tool.
//
// Author: Urvang (urvang@google.com)
#ifndef WEBP_EXAMPLES_GIF2WEBP_UTIL_H_
#define WEBP_EXAMPLES_GIF2WEBP_UTIL_H_
#include <stdlib.h>
#include "webp/mux.h"
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
// Helper utilities.
#define WEBP_UTIL_TRANSPARENT_COLOR 0x00ffffff
struct WebPPicture;
typedef struct {
int x_offset, y_offset, width, height;
} WebPFrameRect;
// Clear pixels in 'picture' within given 'rect' to transparent color.
void WebPUtilClearPic(struct WebPPicture* const picture,
const WebPFrameRect* const rect);
//------------------------------------------------------------------------------
// Frame cache.
typedef struct WebPFrameCache WebPFrameCache;
// Given the minimum distance between key frames 'kmin' and maximum distance
// between key frames 'kmax', returns an appropriately allocated cache object.
// If 'allow_mixed' is true, the subsequent calls to WebPFrameCacheAddFrame()
// will heuristically pick lossy or lossless compression for each frame.
// Use WebPFrameCacheDelete() to deallocate the 'cache'.
WebPFrameCache* WebPFrameCacheNew(int width, int height,
size_t kmin, size_t kmax, int allow_mixed);
// Release all the frame data from 'cache' and free 'cache'.
void WebPFrameCacheDelete(WebPFrameCache* const cache);
// Given an image described by 'frame', 'info' and 'orig_rect', optimize it for
// WebP, encode it and add it to 'cache'.
// This takes care of frame disposal too, according to 'info->dispose_method'.
int WebPFrameCacheAddFrame(WebPFrameCache* const cache,
const WebPConfig* const config,
const WebPFrameRect* const orig_rect,
WebPPicture* const frame,
WebPMuxFrameInfo* const info);
// Flush the *ready* frames from cache and add them to 'mux'. If 'verbose' is
// true, prints the information about these frames.
WebPMuxError WebPFrameCacheFlush(WebPFrameCache* const cache, int verbose,
WebPMux* const mux);
// Similar to 'WebPFrameCacheFlushFrames()', but flushes *all* the frames.
WebPMuxError WebPFrameCacheFlushAll(WebPFrameCache* const cache, int verbose,
WebPMux* const mux);
//------------------------------------------------------------------------------
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBP_EXAMPLES_GIF2WEBP_UTIL_H_

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// JPEG decode.
@ -230,7 +232,6 @@ int ReadJPEG(FILE* in_file, WebPPicture* const pic, Metadata* const metadata) {
jpeg_read_header(&dinfo, TRUE);
dinfo.out_color_space = JCS_RGB;
dinfo.dct_method = JDCT_IFAST;
dinfo.do_fancy_upsampling = TRUE;
jpeg_start_decompress(&dinfo);

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// JPEG decode.
@ -13,7 +15,7 @@
#include <stdio.h>
#include "webp/types.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -26,7 +28,7 @@ struct WebPPicture;
int ReadJPEG(FILE* in_file, struct WebPPicture* const pic,
struct Metadata* const metadata);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Metadata types and functions.

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Metadata types and functions.
@ -13,7 +15,7 @@
#include "webp/types.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -38,7 +40,7 @@ void MetadataFree(Metadata* const metadata);
int MetadataCopy(const char* metadata, size_t metadata_len,
MetadataPayload* const payload);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// PNG decode.
@ -19,12 +21,13 @@
#include <png.h>
#include <setjmp.h> // note: this must be included *after* png.h
#include <stdlib.h>
#include <string.h>
#include "webp/encode.h"
#include "./metadata.h"
static void PNGAPI error_function(png_structp png, png_const_charp dummy) {
(void)dummy; // remove variable-unused warning
static void PNGAPI error_function(png_structp png, png_const_charp error) {
if (error != NULL) fprintf(stderr, "libpng error: %s\n", error);
longjmp(png_jmpbuf(png), 1);
}
@ -108,6 +111,8 @@ static const struct {
// See also: ExifTool on CPAN.
{ "Raw profile type exif", ProcessRawProfile, METADATA_OFFSET(exif) },
{ "Raw profile type xmp", ProcessRawProfile, METADATA_OFFSET(xmp) },
// Exiftool puts exif data in APP1 chunk, too.
{ "Raw profile type APP1", ProcessRawProfile, METADATA_OFFSET(exif) },
// XMP Specification Part 3, Section 3 #PNG
{ "XML:com.adobe.xmp", MetadataCopy, METADATA_OFFSET(xmp) },
{ NULL, NULL, 0 },
@ -207,7 +212,6 @@ int ReadPNG(FILE* in_file, WebPPicture* const pic, int keep_alpha,
Error:
MetadataFree(metadata);
png_destroy_read_struct(&png, &info, &end_info);
free(rgb);
goto End;
}
@ -267,15 +271,16 @@ int ReadPNG(FILE* in_file, WebPPicture* const pic, int keep_alpha,
pic->width = width;
pic->height = height;
pic->use_argb = 1;
ok = has_alpha ? WebPPictureImportRGBA(pic, rgb, stride)
: WebPPictureImportRGB(pic, rgb, stride);
free(rgb);
if (!ok) {
goto Error;
}
End:
free(rgb);
return ok;
}
#else // !WEBP_HAVE_PNG

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// PNG decode.
@ -12,7 +14,7 @@
#include <stdio.h>
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -26,7 +28,7 @@ struct WebPPicture;
int ReadPNG(FILE* in_file, struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Helper functions to measure elapsed time.
@ -17,6 +19,10 @@
typedef LARGE_INTEGER Stopwatch;
static WEBP_INLINE void StopwatchReset(Stopwatch* watch) {
QueryPerformanceCounter(watch);
}
static WEBP_INLINE double StopwatchReadAndReset(Stopwatch* watch) {
const LARGE_INTEGER old_value = *watch;
LARGE_INTEGER freq;
@ -35,6 +41,10 @@ static WEBP_INLINE double StopwatchReadAndReset(Stopwatch* watch) {
typedef struct timeval Stopwatch;
static WEBP_INLINE void StopwatchReset(Stopwatch* watch) {
gettimeofday(watch, NULL);
}
static WEBP_INLINE double StopwatchReadAndReset(Stopwatch* watch) {
const struct timeval old_value = *watch;
gettimeofday(watch, NULL);

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// TIFF decode.
@ -98,6 +100,7 @@ int ReadTIFF(const char* const filename,
#ifdef __BIG_ENDIAN__
TIFFSwabArrayOfLong(raster, width * height);
#endif
pic->use_argb = 1;
ok = keep_alpha
? WebPPictureImportRGBA(pic, (const uint8_t*)raster, stride)
: WebPPictureImportRGBX(pic, (const uint8_t*)raster, stride);

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// TIFF decode.
@ -10,7 +12,7 @@
#ifndef WEBP_EXAMPLES_TIFFDEC_H_
#define WEBP_EXAMPLES_TIFFDEC_H_
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -25,7 +27,7 @@ int ReadTIFF(const char* const filename,
struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,25 +1,26 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Simple WebP file viewer.
//
// Compiling on linux:
// sudo apt-get install freeglut3-dev mesa-common-dev
// gcc -o vwebp vwebp.c -O3 -lwebp -lwebpmux -lglut -lGL -lpthread -lm
// Compiling on Mac + XCode:
// gcc -o vwebp vwebp.c -lwebp -lwebpmux -framework GLUT -framework OpenGL
// Simple OpenGL-based WebP file viewer.
//
// Author: Skal (pascal.massimino@gmail.com)
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef __APPLE__
#if defined(WEBP_HAVE_GL)
#if defined(HAVE_GLUT_GLUT_H)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
@ -58,15 +59,11 @@ static struct {
const char* file_name;
WebPData data;
WebPDecoderConfig* config;
WebPDecoderConfig config;
const WebPDecBuffer* pic;
WebPDemuxer* dmux;
WebPIterator frameiter;
struct {
int width, height;
int x_offset, y_offset;
enum WebPMuxAnimDispose dispose_method;
} prev_frame;
WebPIterator curr_frame;
WebPIterator prev_frame;
WebPChunkIterator iccp;
} kParams;
@ -78,7 +75,8 @@ static void ClearPreviousPic(void) {
static void ClearParams(void) {
ClearPreviousPic();
WebPDataClear(&kParams.data);
WebPDemuxReleaseIterator(&kParams.frameiter);
WebPDemuxReleaseIterator(&kParams.curr_frame);
WebPDemuxReleaseIterator(&kParams.prev_frame);
WebPDemuxReleaseChunkIterator(&kParams.iccp);
WebPDemuxDelete(kParams.dmux);
kParams.dmux = NULL;
@ -148,25 +146,25 @@ static int ApplyColorProfile(const WebPData* const profile,
//------------------------------------------------------------------------------
// File decoding
static int Decode(void) { // Fills kParams.frameiter
const WebPIterator* const iter = &kParams.frameiter;
WebPDecoderConfig* const config = kParams.config;
static int Decode(void) { // Fills kParams.curr_frame
const WebPIterator* const curr = &kParams.curr_frame;
WebPDecoderConfig* const config = &kParams.config;
WebPDecBuffer* const output_buffer = &config->output;
int ok = 0;
ClearPreviousPic();
output_buffer->colorspace = MODE_RGBA;
ok = (WebPDecode(iter->fragment.bytes, iter->fragment.size,
ok = (WebPDecode(curr->fragment.bytes, curr->fragment.size,
config) == VP8_STATUS_OK);
if (!ok) {
fprintf(stderr, "Decoding of frame #%d failed!\n", iter->frame_num);
fprintf(stderr, "Decoding of frame #%d failed!\n", curr->frame_num);
} else {
kParams.pic = output_buffer;
if (kParams.use_color_profile) {
ok = ApplyColorProfile(&kParams.iccp.chunk, output_buffer);
if (!ok) {
fprintf(stderr, "Applying color profile to frame #%d failed!\n",
iter->frame_num);
curr->frame_num);
}
}
}
@ -177,10 +175,10 @@ static void decode_callback(int what) {
if (what == 0 && !kParams.done) {
int duration = 0;
if (kParams.dmux != NULL) {
WebPIterator* const iter = &kParams.frameiter;
if (!WebPDemuxNextFrame(iter)) {
WebPDemuxReleaseIterator(iter);
if (WebPDemuxGetFrame(kParams.dmux, 1, iter)) {
WebPIterator* const curr = &kParams.curr_frame;
if (!WebPDemuxNextFrame(curr)) {
WebPDemuxReleaseIterator(curr);
if (WebPDemuxGetFrame(kParams.dmux, 1, curr)) {
--kParams.loop_count;
kParams.done = (kParams.loop_count == 0);
} else {
@ -189,7 +187,7 @@ static void decode_callback(int what) {
return;
}
}
duration = iter->duration;
duration = curr->duration;
}
if (!Decode()) {
kParams.decoding_error = 1;
@ -282,40 +280,45 @@ static void DrawCheckerBoard(void) {
static void HandleDisplay(void) {
const WebPDecBuffer* const pic = kParams.pic;
const WebPIterator* const iter = &kParams.frameiter;
const WebPIterator* const curr = &kParams.curr_frame;
WebPIterator* const prev = &kParams.prev_frame;
GLfloat xoff, yoff;
if (pic == NULL) return;
glPushMatrix();
glPixelZoom(1, -1);
xoff = (GLfloat)(2. * iter->x_offset / kParams.canvas_width);
yoff = (GLfloat)(2. * iter->y_offset / kParams.canvas_height);
xoff = (GLfloat)(2. * curr->x_offset / kParams.canvas_width);
yoff = (GLfloat)(2. * curr->y_offset / kParams.canvas_height);
glRasterPos2f(-1.f + xoff, 1.f - yoff);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_UNPACK_ROW_LENGTH, pic->u.RGBA.stride / 4);
if (kParams.prev_frame.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
if (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ||
curr->blend_method == WEBP_MUX_NO_BLEND) {
// TODO(later): these offsets and those above should factor in window size.
// they will be incorrect if the window is resized.
// glScissor() takes window coordinates (0,0 at bottom left).
const int window_x = kParams.prev_frame.x_offset;
const int window_y = kParams.canvas_height -
kParams.prev_frame.y_offset -
kParams.prev_frame.height;
int window_x, window_y;
if (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {
// Clear the previous frame rectangle.
window_x = prev->x_offset;
window_y = kParams.canvas_height - prev->y_offset - prev->height;
} else { // curr->blend_method == WEBP_MUX_NO_BLEND.
// We simulate no-blending behavior by first clearing the current frame
// rectangle (to a checker-board) and then alpha-blending against it.
window_x = curr->x_offset;
window_y = kParams.canvas_height - curr->y_offset - curr->height;
}
glEnable(GL_SCISSOR_TEST);
// Only updated the requested area, not the whole canvas.
glScissor(window_x, window_y,
kParams.prev_frame.width, kParams.prev_frame.height);
// Only update the requested area, not the whole canvas.
glScissor(window_x, window_y, prev->width, prev->height);
glClear(GL_COLOR_BUFFER_BIT); // use clear color
DrawCheckerBoard();
glDisable(GL_SCISSOR_TEST);
}
kParams.prev_frame.width = iter->width;
kParams.prev_frame.height = iter->height;
kParams.prev_frame.x_offset = iter->x_offset;
kParams.prev_frame.y_offset = iter->y_offset;
kParams.prev_frame.dispose_method = iter->dispose_method;
*prev = *curr;
glDrawPixels(pic->width, pic->height,
GL_RGBA, GL_UNSIGNED_BYTE,
@ -331,9 +334,9 @@ static void HandleDisplay(void) {
glColor4f(0.90f, 0.0f, 0.90f, 1.0f);
glRasterPos2f(-0.95f, 0.80f);
PrintString(tmp);
if (iter->x_offset != 0 || iter->y_offset != 0) {
if (curr->x_offset != 0 || curr->y_offset != 0) {
snprintf(tmp, sizeof(tmp), " (offset:%d,%d)",
iter->x_offset, iter->y_offset);
curr->x_offset, curr->y_offset);
glRasterPos2f(-0.95f, 0.70f);
PrintString(tmp);
}
@ -373,6 +376,7 @@ static void Help(void) {
" -noicc ....... don't use the icc profile if present.\n"
" -nofancy ..... don't use the fancy YUV420 upscaler.\n"
" -nofilter .... disable in-loop filtering.\n"
" -dither <int> dithering strength (0..100). Default=50.\n"
" -mt .......... use multi-threading.\n"
" -info ........ print info.\n"
" -h ....... this help message.\n"
@ -385,14 +389,16 @@ static void Help(void) {
}
int main(int argc, char *argv[]) {
WebPDecoderConfig config;
int c;
WebPDecoderConfig* const config = &kParams.config;
WebPIterator* const curr = &kParams.curr_frame;
WebPIterator* const prev = &kParams.prev_frame;
if (!WebPInitDecoderConfig(&config)) {
if (!WebPInitDecoderConfig(config)) {
fprintf(stderr, "Library version mismatch!\n");
return -1;
}
kParams.config = &config;
config->options.dithering_strength = 50;
kParams.use_color_profile = 1;
for (c = 1; c < argc; ++c) {
@ -402,9 +408,11 @@ int main(int argc, char *argv[]) {
} else if (!strcmp(argv[c], "-noicc")) {
kParams.use_color_profile = 0;
} else if (!strcmp(argv[c], "-nofancy")) {
config.options.no_fancy_upsampling = 1;
config->options.no_fancy_upsampling = 1;
} else if (!strcmp(argv[c], "-nofilter")) {
config.options.bypass_filtering = 1;
config->options.bypass_filtering = 1;
} else if (!strcmp(argv[c], "-dither") && c + 1 < argc) {
config->options.dithering_strength = strtol(argv[++c], NULL, 0);
} else if (!strcmp(argv[c], "-info")) {
kParams.print_info = 1;
} else if (!strcmp(argv[c], "-version")) {
@ -416,7 +424,10 @@ int main(int argc, char *argv[]) {
(dmux_version >> 8) & 0xff, dmux_version & 0xff);
return 0;
} else if (!strcmp(argv[c], "-mt")) {
config.options.use_threads = 1;
config->options.use_threads = 1;
} else if (!strcmp(argv[c], "--")) {
if (c < argc - 1) kParams.file_name = argv[++c];
break;
} else if (argv[c][0] == '-') {
printf("Unknown option '%s'\n", argv[c]);
Help();
@ -437,6 +448,11 @@ int main(int argc, char *argv[]) {
goto Error;
}
if (!WebPGetInfo(kParams.data.bytes, kParams.data.size, NULL, NULL)) {
fprintf(stderr, "Input file doesn't appear to be WebP format.\n");
goto Error;
}
kParams.dmux = WebPDemux(&kParams.data);
if (kParams.dmux == NULL) {
fprintf(stderr, "Could not create demuxing object!\n");
@ -453,10 +469,10 @@ int main(int argc, char *argv[]) {
printf("Canvas: %d x %d\n", kParams.canvas_width, kParams.canvas_height);
}
kParams.prev_frame.width = kParams.canvas_width;
kParams.prev_frame.height = kParams.canvas_height;
kParams.prev_frame.x_offset = kParams.prev_frame.y_offset = 0;
kParams.prev_frame.dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
prev->width = kParams.canvas_width;
prev->height = kParams.canvas_height;
prev->x_offset = prev->y_offset = 0;
prev->dispose_method = WEBP_MUX_DISPOSE_BACKGROUND;
memset(&kParams.iccp, 0, sizeof(kParams.iccp));
kParams.has_color_profile =
@ -472,20 +488,20 @@ int main(int argc, char *argv[]) {
#endif
}
if (!WebPDemuxGetFrame(kParams.dmux, 1, &kParams.frameiter)) goto Error;
if (!WebPDemuxGetFrame(kParams.dmux, 1, curr)) goto Error;
kParams.has_animation = (kParams.frameiter.num_frames > 1);
kParams.has_animation = (curr->num_frames > 1);
kParams.loop_count = (int)WebPDemuxGetI(kParams.dmux, WEBP_FF_LOOP_COUNT);
kParams.bg_color = WebPDemuxGetI(kParams.dmux, WEBP_FF_BACKGROUND_COLOR);
printf("VP8X: Found %d images in file (loop count = %d)\n",
kParams.frameiter.num_frames, kParams.loop_count);
curr->num_frames, kParams.loop_count);
// Decode first frame
if (!Decode()) goto Error;
// Position iterator to last frame. Next call to HandleDisplay will wrap over.
// We take this into account by bumping up loop_count.
WebPDemuxGetFrame(kParams.dmux, 0, &kParams.frameiter);
WebPDemuxGetFrame(kParams.dmux, 0, curr);
if (kParams.loop_count) ++kParams.loop_count;
// Start display (and timer)
@ -507,4 +523,14 @@ int main(int argc, char *argv[]) {
return -1;
}
#else // !WEBP_HAVE_GL
int main(int argc, const char *argv[]) {
fprintf(stderr, "OpenGL support not enabled in %s.\n", argv[0]);
(void)argc;
return 0;
}
#endif
//------------------------------------------------------------------------------

View File

@ -1,16 +1,15 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Simple command-line to create a WebP container file and to extract or strip
// relevant data from the container file.
//
// Compile with: gcc -o webpmux webpmux.c -lwebpmux -lwebp
//
//
// Authors: Vikas (vikaas.arora@gmail.com),
// Urvang (urvang@google.com)
@ -54,6 +53,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "webp/decode.h"
#include "webp/mux.h"
#include "./example_util.h"
@ -146,12 +146,6 @@ static const char* ErrorString(WebPMuxError err) {
return err; \
}
#define RETURN_IF_ERROR2(ERR_MSG, FORMAT_STR) \
if (err != WEBP_MUX_OK) { \
fprintf(stderr, ERR_MSG, FORMAT_STR); \
return err; \
}
#define RETURN_IF_ERROR3(ERR_MSG, FORMAT_STR1, FORMAT_STR2) \
if (err != WEBP_MUX_OK) { \
fprintf(stderr, ERR_MSG, FORMAT_STR1, FORMAT_STR2); \
@ -173,16 +167,21 @@ static const char* ErrorString(WebPMuxError err) {
} while (0)
#define ERROR_GOTO3(ERR_MSG, FORMAT_STR1, FORMAT_STR2, LABEL) \
do { \
fprintf(stderr, ERR_MSG, FORMAT_STR1, FORMAT_STR2); \
ok = 0; \
goto LABEL; \
} while (0)
do { \
fprintf(stderr, ERR_MSG, FORMAT_STR1, FORMAT_STR2); \
ok = 0; \
goto LABEL; \
} while (0)
static WebPMuxError DisplayInfo(const WebPMux* mux) {
int width, height;
uint32_t flag;
WebPMuxError err = WebPMuxGetFeatures(mux, &flag);
WebPMuxError err = WebPMuxGetCanvasSize(mux, &width, &height);
assert(err == WEBP_MUX_OK); // As WebPMuxCreate() was successful earlier.
printf("Canvas size: %d x %d\n", width, height);
err = WebPMuxGetFeatures(mux, &flag);
#ifndef WEBP_EXPERIMENTAL_FEATURES
if (flag & FRAGMENTS_FLAG) err = WEBP_MUX_INVALID_ARGUMENT;
#endif
@ -212,28 +211,44 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
if (is_anim) {
WebPMuxAnimParams params;
err = WebPMuxGetAnimationParams(mux, &params);
RETURN_IF_ERROR("Failed to retrieve animation parameters\n");
assert(err == WEBP_MUX_OK);
printf("Background color : 0x%.8X Loop Count : %d\n",
params.bgcolor, params.loop_count);
}
err = WebPMuxNumChunks(mux, id, &nFrames);
RETURN_IF_ERROR2("Failed to retrieve number of %ss\n", type_str);
assert(err == WEBP_MUX_OK);
printf("Number of %ss: %d\n", type_str, nFrames);
if (nFrames > 0) {
int i;
printf("No.: x_offset y_offset ");
if (is_anim) printf("duration dispose ");
printf("No.: width height alpha x_offset y_offset ");
if (is_anim) printf("duration dispose blend ");
printf("image_size\n");
for (i = 1; i <= nFrames; i++) {
WebPMuxFrameInfo frame;
err = WebPMuxGetFrame(mux, i, &frame);
RETURN_IF_ERROR3("Failed to retrieve %s#%d\n", type_str, i);
printf("%3d: %8d %8d ", i, frame.x_offset, frame.y_offset);
if (is_anim) printf("%8d %7d ", frame.duration, frame.dispose_method);
printf("%10d\n", (int)frame.bitstream.size);
if (err == WEBP_MUX_OK) {
WebPBitstreamFeatures features;
const VP8StatusCode status = WebPGetFeatures(
frame.bitstream.bytes, frame.bitstream.size, &features);
assert(status == VP8_STATUS_OK); // Checked by WebPMuxCreate().
(void)status;
printf("%3d: %5d %5d %5s %8d %8d ", i, features.width,
features.height, features.has_alpha ? "yes" : "no",
frame.x_offset, frame.y_offset);
if (is_anim) {
const char* const dispose =
(frame.dispose_method == WEBP_MUX_DISPOSE_NONE) ? "none"
: "background";
const char* const blend =
(frame.blend_method == WEBP_MUX_BLEND) ? "yes" : "no";
printf("%8d %10s %5s ", frame.duration, dispose, blend);
}
printf("%10d\n", (int)frame.bitstream.size);
}
WebPDataClear(&frame.bitstream);
RETURN_IF_ERROR3("Failed to retrieve %s#%d\n", type_str, i);
}
}
}
@ -241,29 +256,32 @@ static WebPMuxError DisplayInfo(const WebPMux* mux) {
if (flag & ICCP_FLAG) {
WebPData icc_profile;
err = WebPMuxGetChunk(mux, "ICCP", &icc_profile);
RETURN_IF_ERROR("Failed to retrieve the ICC profile\n");
assert(err == WEBP_MUX_OK);
printf("Size of the ICC profile data: %d\n", (int)icc_profile.size);
}
if (flag & EXIF_FLAG) {
WebPData exif;
err = WebPMuxGetChunk(mux, "EXIF", &exif);
RETURN_IF_ERROR("Failed to retrieve the EXIF metadata\n");
assert(err == WEBP_MUX_OK);
printf("Size of the EXIF metadata: %d\n", (int)exif.size);
}
if (flag & XMP_FLAG) {
WebPData xmp;
err = WebPMuxGetChunk(mux, "XMP ", &xmp);
RETURN_IF_ERROR("Failed to retrieve the XMP metadata\n");
assert(err == WEBP_MUX_OK);
printf("Size of the XMP metadata: %d\n", (int)xmp.size);
}
if ((flag & ALPHA_FLAG) && !(flag & (ANIMATION_FLAG | FRAGMENTS_FLAG))) {
WebPMuxFrameInfo image;
err = WebPMuxGetFrame(mux, 1, &image);
if (err == WEBP_MUX_OK) {
printf("Size of the image (with alpha): %d\n", (int)image.bitstream.size);
}
WebPDataClear(&image.bitstream);
RETURN_IF_ERROR("Failed to retrieve the image\n");
printf("Size of the image (with alpha): %d\n", (int)image.bitstream.size);
}
return WEBP_MUX_OK;
@ -324,11 +342,13 @@ static void PrintHelp(void) {
printf("\n");
printf("FRAME_OPTIONS(i):\n");
printf(" Create animation.\n");
printf(" file_i +di+xi+yi+mi\n");
printf(" file_i +di+[xi+yi[+mi[bi]]]\n");
printf(" where: 'file_i' is the i'th animation frame (WebP format),\n");
printf(" 'di' is the pause duration before next frame.\n");
printf(" 'xi','yi' specify the image offset for this frame.\n");
printf(" 'mi' is the dispose method for this frame (0 or 1).\n");
printf(" 'bi' is the blending method for this frame (+b or -b)."
"\n");
printf("\n");
printf("LOOP_COUNT:\n");
@ -405,22 +425,33 @@ static int WriteWebP(WebPMux* const mux, const char* filename) {
static int ParseFrameArgs(const char* args, WebPMuxFrameInfo* const info) {
int dispose_method, dummy;
const int num_args = sscanf(args, "+%d+%d+%d+%d+%d",
&info->duration, &info->x_offset, &info->y_offset,
&dispose_method, &dummy);
char plus_minus, blend_method;
const int num_args = sscanf(args, "+%d+%d+%d+%d%c%c+%d", &info->duration,
&info->x_offset, &info->y_offset, &dispose_method,
&plus_minus, &blend_method, &dummy);
switch (num_args) {
case 1:
info->x_offset = info->y_offset = 0; // fall through
case 3:
dispose_method = 0; // fall through
case 4:
plus_minus = '+';
blend_method = 'b'; // fall through
case 6:
break;
case 2:
case 5:
default:
return 0;
}
// Note: The sanity of the following conversion is checked by
// WebPMuxSetAnimationParams().
// WebPMuxPushFrame().
info->dispose_method = (WebPMuxAnimDispose)dispose_method;
if (blend_method != 'b') return 0;
if (plus_minus != '-' && plus_minus != '+') return 0;
info->blend_method =
(plus_minus == '+') ? WEBP_MUX_BLEND : WEBP_MUX_NO_BLEND;
return 1;
}
@ -646,6 +677,17 @@ static int ParseCommandLine(int argc, const char* argv[],
(version >> 16) & 0xff, (version >> 8) & 0xff, version & 0xff);
DeleteConfig(config);
exit(0);
} else if (!strcmp(argv[i], "--")) {
if (i < argc - 1) {
++i;
if (config->input_ == NULL) {
config->input_ = argv[i];
} else {
ERROR_GOTO2("ERROR at '%s': Multiple input files specified.\n",
argv[i], ErrParse);
}
}
break;
} else {
ERROR_GOTO2("ERROR: Unknown option: '%s'.\n", argv[i], ErrParse);
}

View File

@ -1,8 +1,10 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Windows Imaging Component (WIC) decode.
@ -126,7 +128,7 @@ static HRESULT ExtractICCP(IWICImagingFactory* const factory,
&size));
if (SUCCEEDED(hr) && size != iccp->size) {
fprintf(stderr, "Warning! ICC profile size (%u) != expected (%u)\n",
size, iccp->size);
size, (uint32_t)iccp->size);
iccp->size = size;
}
break;
@ -306,6 +308,7 @@ int ReadPictureWithWIC(const char* const filename,
int ok;
pic->width = width;
pic->height = height;
pic->use_argb = 1;
ok = importer->import(pic, rgb, stride);
if (!ok) hr = E_FAIL;
}

View File

@ -1,8 +1,10 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Windows Imaging Component (WIC) decode.
@ -10,7 +12,7 @@
#ifndef WEBP_EXAMPLES_WICDEC_H_
#define WEBP_EXAMPLES_WICDEC_H_
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -25,7 +27,7 @@ int ReadPictureWithWIC(const char* const filename,
struct WebPPicture* const pic, int keep_alpha,
struct Metadata* const metadata);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -15,6 +15,9 @@ set -e
declare -r SDK=$(xcodebuild -showsdks \
| grep iphoneos | sort | tail -n 1 | awk '{print substr($NF, 9)}'
)
# Extract Xcode version.
declare -r XCODE=$(xcodebuild -version | grep Xcode | cut -d " " -f2)
declare -r OLDPATH=${PATH}
# Add iPhoneOS-V6 to the list of platforms below if you need armv6 support.
@ -63,12 +66,24 @@ for PLATFORM in ${PLATFORMS}; do
ROOTDIR="${BUILDDIR}/${PLATFORM}-${SDK}-${ARCH}"
mkdir -p "${ROOTDIR}"
export DEVROOT="${PLATFORMSROOT}/${PLATFORM}.platform/Developer"
export SDKROOT="${DEVROOT}/SDKs/${PLATFORM}${SDK}.sdk"
SDKROOT="${PLATFORMSROOT}/${PLATFORM}.platform/Developer/SDKs/${PLATFORM}${SDK}.sdk/"
CFLAGS="-arch ${ARCH} -pipe -isysroot ${SDKROOT}"
LDFLAGS="-arch ${ARCH} -pipe -isysroot ${SDKROOT}"
export CFLAGS="-arch ${ARCH} -pipe -isysroot ${SDKROOT}"
if [[ -z "${XCODE}" ]]; then
echo "XCODE not available"
exit 1
elif [[ ${SDK} < 5.0.0 ]]; then
DEVROOT="${PLATFORMSROOT}/${PLATFORM}.platform/Developer/"
else
DEVROOT="${DEVELOPER}/Toolchains/XcodeDefault.xctoolchain"
CFLAGS+=" -miphoneos-version-min=5.0"
LDFLAGS+=" -miphoneos-version-min=5.0"
fi
export CFLAGS
export LDFLAGS
export CXXFLAGS=${CFLAGS}
export LDFLAGS="-arch ${ARCH} -pipe -isysroot ${SDKROOT}"
export PATH="${DEVROOT}/usr/bin:${OLDPATH}"
${SRCDIR}/configure --host=${ARCH}-apple-darwin --prefix=${ROOTDIR} \

View File

@ -24,6 +24,7 @@ ifeq ($(strip $(shell uname)), Darwin)
# cf., src/enc/yuv.[hc]
# Failure observed with: gcc 4.2.1 and 4.0.1.
EXTRA_FLAGS += -fno-common
EXTRA_FLAGS += -DHAVE_GLUT_GLUT_H
EXTRA_FLAGS += -I/opt/local/include
EXTRA_LIBS += -L/opt/local/lib
GL_LIBS = -framework GLUT -framework OpenGL
@ -140,17 +141,22 @@ EX_FORMAT_DEC_OBJS = \
EX_UTIL_OBJS = \
examples/example_util.o \
GIF2WEBP_UTIL_OBJS = \
examples/gif2webp_util.o \
MUX_OBJS = \
src/mux/muxedit.o \
src/mux/muxinternal.o \
src/mux/muxread.o \
UTILS_DEC_OBJS = \
src/utils/alpha_processing.o \
src/utils/bit_reader.o \
src/utils/color_cache.o \
src/utils/filters.o \
src/utils/huffman.o \
src/utils/quant_levels_dec.o \
src/utils/random.o \
src/utils/rescaler.o \
src/utils/thread.o \
src/utils/utils.o \
@ -175,6 +181,7 @@ HDRS_INSTALLED = \
src/webp/types.h \
HDRS = \
src/dec/alphai.h \
src/dec/decode_vp8.h \
src/dec/vp8i.h \
src/dec/vp8li.h \
@ -184,6 +191,7 @@ HDRS = \
src/dsp/yuv.h \
src/enc/cost.h \
src/enc/vp8enci.h \
src/utils/alpha_processing.h \
src/utils/bit_reader.h \
src/utils/bit_writer.h \
src/utils/color_cache.h \
@ -192,6 +200,7 @@ HDRS = \
src/utils/huffman_encode.h \
src/utils/quant_levels.h \
src/utils/quant_levels_dec.h \
src/utils/random.h \
src/utils/rescaler.h \
src/utils/thread.h \
src/webp/format_constants.h \
@ -205,6 +214,7 @@ OUTPUT = $(OUT_LIBS) $(OUT_EXAMPLES)
ifeq ($(MAKECMDGOALS),clean)
OUTPUT += $(EXTRA_EXAMPLES)
OUTPUT += src/demux/libwebpdemux.a src/mux/libwebpmux.a
OUTPUT += examples/libgif2webp_util.a
endif
ex: $(OUT_EXAMPLES)
@ -216,6 +226,7 @@ $(EX_FORMAT_DEC_OBJS): %.o: %.h
$(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
examples/libexample_util.a: $(EX_UTIL_OBJS)
examples/libgif2webp_util.a: $(GIF2WEBP_UTIL_OBJS)
src/libwebpdecoder.a: $(LIBWEBPDECODER_OBJS)
src/libwebp.a: $(LIBWEBP_OBJS)
src/mux/libwebpmux.a: $(LIBWEBPMUX_OBJS)
@ -234,11 +245,14 @@ examples/cwebp: src/libwebp.a
examples/cwebp: EXTRA_LIBS += $(CWEBP_LIBS)
examples/dwebp: examples/libexample_util.a src/libwebpdecoder.a
examples/dwebp: EXTRA_LIBS += $(DWEBP_LIBS)
examples/gif2webp: examples/libexample_util.a src/mux/libwebpmux.a src/libwebp.a
examples/gif2webp: examples/libexample_util.a examples/libgif2webp_util.a
examples/gif2webp: src/mux/libwebpmux.a src/libwebp.a
examples/gif2webp: EXTRA_LIBS += $(GIF_LIBS)
examples/gif2webp: EXTRA_FLAGS += -DWEBP_HAVE_GIF
examples/vwebp: examples/libexample_util.a src/demux/libwebpdemux.a
examples/vwebp: src/libwebp.a
examples/vwebp: EXTRA_LIBS += $(GL_LIBS)
examples/vwebp: EXTRA_FLAGS += -DWEBP_HAVE_GL
examples/webpmux: examples/libexample_util.a src/mux/libwebpmux.a
examples/webpmux: src/libwebpdecoder.a
@ -249,8 +263,8 @@ dist: DESTDIR := dist
dist: OUT_EXAMPLES += $(EXTRA_EXAMPLES)
dist: all
$(INSTALL) -m755 -d $(DESTDIR)/include/webp \
$(DESTDIR)/doc $(DESTDIR)/lib
$(INSTALL) -m755 -s $(OUT_EXAMPLES) $(DESTDIR)
$(DESTDIR)/bin $(DESTDIR)/doc $(DESTDIR)/lib
$(INSTALL) -m755 -s $(OUT_EXAMPLES) $(DESTDIR)/bin
$(INSTALL) -m644 $(HDRS_INSTALLED) $(DESTDIR)/include/webp
$(INSTALL) -m644 src/libwebp.a $(DESTDIR)/lib
$(INSTALL) -m644 src/demux/libwebpdemux.a $(DESTDIR)/lib

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH CWEBP 1 "March 13, 2013"
.TH CWEBP 1 "December 12, 2013"
.SH NAME
cwebp \- compress an image file to a WebP file
.SH SYNOPSIS
@ -73,7 +73,7 @@ trade off between encoding speed and the compressed file size and quality.
Possible values range from 0 to 6. Default value is 4.
When higher values are used, the encoder will spend more time inspecting
additional encoding possibilities and decide on the quality gain.
Lower value can result is faster processing time at the expense of
Lower value can result in faster processing time at the expense of
larger file size and lower compression quality.
.TP
.B \-jpeg_like
@ -153,6 +153,11 @@ close as possible to this target.
Set a maximum number of passes to use during the dichotomy used by
options \fB\-size\fP or \fB\-psnr\fP. Maximum value is 10.
.TP
.BI \-resize " width height
Resize the source to a rectangle with size \fBwidth\fP x \fBheight\fP.
If either (but not both) of the \fBwidth\fP or \fBheight\fP parameters is 0,
the value will be calculated preserving the aspect-ratio.
.TP
.BI \-crop " x_position y_position width height
Crop the source to a rectangle with top-left corner at coordinates
(\fBx_position\fP, \fBy_position\fP) and size \fBwidth\fP x \fBheight\fP.
@ -168,8 +173,9 @@ Output additional ASCII-map of encoding information. Possible map values
range from 1 to 6. This is only meant to help debugging.
.TP
.BI \-pre " int
Specify a pre-processing filter. This option is a placeholder
and has currently no effect.
Specify some pre-processing steps. Using a value of '2' will trigger
quality-dependent pseudo-random dithering during RGBA->YUVA conversion
(lossy compression only).
.TP
.BI \-alpha_filter " string
Specify the predictive filtering method for the alpha plane. One of 'none',
@ -187,6 +193,11 @@ no compression, 1 uses WebP lossless format for compression. The default is 1.
Modify unseen RGB values under fully transparent area, to help compressibility.
The default is off.
.TP
.BI \-blend_alpha " int
This option blends the alpha channel (if present) with the source using the
background color specified in hexadecimal as 0xrrggbb. The alpha channel is
afterward reset to the opaque value 255.
.TP
.B \-noalpha
Using this option will discard the alpha channel.
.TP
@ -244,6 +255,8 @@ cwebp \-q 50 -lossless picture.png \-o picture_lossless.webp
cwebp \-q 70 picture_with_alpha.png \-o picture_with_alpha.webp
.br
cwebp \-sns 70 \-f 50 \-size 60000 picture.png \-o picture.webp
.br
cwebp \-o picture.webp \-\- \-\-\-picture.png
.SH AUTHORS
\fBcwebp\fP was written by the WebP team.
@ -255,7 +268,7 @@ for the Debian project (and may be used by others).
.SH SEE ALSO
.BR dwebp (1),
.BR gif2webp (1).
.BR gif2webp (1)
.br
Please refer to http://developers.google.com/speed/webp/ for additional
information.

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH DWEBP 1 "February 01, 2013"
.TH DWEBP 1 "December 12, 2013"
.SH NAME
dwebp \- decompress a WebP file to an image file
.SH SYNOPSIS
@ -23,6 +23,13 @@ Print the version number (as major.minor.revision) and exit.
.TP
.BI \-o " string
Specify the name of the output file (as PNG format by default).
Using "-" as output name will direct output to 'stdout'.
.TP
.B \-bmp
Change the output format to uncompressed BMP.
.TP
.B \-tiff
Change the output format to uncompressed TIFF.
.TP
.B \-pam
Change the output format to PAM (retains alpha).
@ -32,7 +39,7 @@ Change the output format to PPM (discards alpha).
.TP
.B \-pgm
Change the output format to PGM. The output consists of luma/chroma
samples instead of RGB, using the ICM4 layout. This option is mainly
samples instead of RGB, using the IMC4 layout. This option is mainly
for verification and debugging purposes.
.TP
.B \-yuv
@ -48,7 +55,15 @@ edges (especially the red ones), but should be faster.
.B \-nofilter
Don't use the in-loop filtering process even if it is required by
the bitstream. This may produce visible blocks on the non-compliant output,
but will make the decoding faster.
but it will make the decoding faster.
.TP
.B \-dither " strength
Specify a dithering \fBstrength\fP between 0 and 100. Dithering is a
post-processing effect applied to chroma components in lossy compression.
It helps by smoothing gradients and avoiding banding artifacts.
.TP
.B \-nodither
Disable all dithering (default).
.TP
.B \-mt
Use multi-threading for decoding, if possible.
@ -84,6 +99,8 @@ http://www.webmproject.org/code/contribute/submitting-patches/
dwebp picture.webp \-o output.png
.br
dwebp picture.webp \-ppm \-o output.ppm
.br
dwebp \-o output.ppm \-\- \-\-\-picture.webp
.SH AUTHORS
\fBdwebp\fP was written by the WebP team.
@ -95,8 +112,8 @@ for the Debian project (and may be used by others).
.SH SEE ALSO
.BR cwebp (1),
.BR webpmux (1),
.BR gif2webp (1).
.BR gif2webp (1),
.BR webpmux (1)
.br
Please refer to http://developers.google.com/speed/webp/ for additional
information.

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH GIF2WEBP 1 "February 01, 2013"
.TH GIF2WEBP 1 "December 17, 2013"
.SH NAME
gif2webp \- Convert a GIF image to WebP
.SH SYNOPSIS
@ -28,6 +28,10 @@ Print the version number (as major.minor.revision) and exit.
.B \-lossy
Encode the image using lossy compression.
.TP
.B \-mixed
Mixed compression mode: optimize compression of the image by picking either
lossy or lossless compression for each frame heuristically.
.TP
.BI \-q " float
Specify the compression factor for RGB channels between 0 and 100. The default
is 75.
@ -49,6 +53,43 @@ additional encoding possibilities and decide on the quality gain.
Lower value can result is faster processing time at the expense of
larger file size and lower compression quality.
.TP
.BI \-kmin " int
.TP
.BI \-kmax " int
Specify the minimum and maximum distance between consecutive key frames
(independently decodable frames) in the output animation. The tool will insert
some key frames into the output animation as needed so that this criteria is
satisfied.
.br
A 'kmin' value of 0 will turn off insertion of key frames.
Typical values are in the range 3 to 30. Default values are kmin = 9,
kmax = 17 for lossless compression and kmin = 3, kmax = 5 for lossy compression.
.br
These two options are relevant only for animated images with large number of
frames (>50).
.br
When lower values are used, more frames will be converted to key frames. This
may lead to smaller number of frames required to decode a frame on average,
thereby improving the decoding performance. But this may lead to slightly bigger
file sizes.
Higher values may lead to worse decoding performance, but smaller file sizes.
.br
Some restrictions:
.br
(i) kmin < kmax,
.br
(ii) kmin >= kmax / 2 + 1 and
.br
(iii) kmax - kmin <= 30.
.br
If any of these restrictions are not met, they will be enforced automatically.
.TP
.BI \-metadata " string
A comma separated list of metadata to copy from the input to the output if
present.
Valid values: \fBall\fP, \fBnone\fP, \fBicc\fP, \fBxmp\fP.
The default is \fBxmp\fP.
.TP
.BI \-f " int
For lossy encoding only (specified by the \-lossy option). Specify the strength
of the deblocking filter, between 0 (no filtering) and 100 (maximum filtering).
@ -57,6 +98,10 @@ strength of the filtering process applied after decoding the picture. The higher
the value the smoother the picture will appear. Typical values are usually in
the range of 20 to 50.
.TP
.B \-mt
Use multi-threading for encoding, if possible. This option is only effective
when using lossy compression.
.TP
.B \-v
Print extra information.
.TP
@ -78,6 +123,8 @@ gif2webp \-q 70 picture.gif \-o picture.webp
gif2webp \-lossy \-m 3 picture.gif \-o picture_lossy.webp
.br
gif2webp \-lossy \-f 50 picture.gif \-o picture.webp
.br
gif2webp \-q 70 \-o picture.webp \-\- \-\-\-picture.gif
.SH AUTHORS
\fBgif2webp\fP was written by the WebP team.
@ -88,9 +135,9 @@ This manual page was written by Urvang Joshi <urvang@google.com>, for the
Debian project (and may be used by others).
.SH SEE ALSO
.BR dwebp (1),
.BR cwebp (1),
.BR webpmux (1).
.BR dwebp (1),
.BR webpmux (1)
.br
Please refer to http://developers.google.com/speed/webp/ for additional
information.

View File

@ -1,5 +1,5 @@
.\" Hey, EMACS: -*- nroff -*-
.TH WEBPMUX 1 "March 16, 2013"
.TH WEBPMUX 1 "December 17, 2013"
.SH NAME
webpmux \- command line tool to create WebP Mux/container file.
.SH SYNOPSIS
@ -92,12 +92,14 @@ Strip XMP metadata.
.SS FRAME_OPTIONS (\-frame)
.TP
.I file_i +di[+xi+yi[+mi]]
.I file_i +di[+xi+yi[+mi[bi]]]
Where: 'file_i' is the i'th frame (WebP format), 'xi','yi' specify the image
offset for this frame, 'di' is the pause duration before next frame and 'mi' is
the dispose method for this frame (0 for NONE or 1 for BACKGROUND).
'mi' can be omitted and will default to 0 (NONE).
Additionally, if 'mi' is ommitted then'xi' and 'yi' can be omitted and will
offset for this frame, 'di' is the pause duration before next frame, 'mi' is
the dispose method for this frame (0 for NONE or 1 for BACKGROUND) and 'bi' is
the blending method for this frame (+b for BLEND or -b for NO_BLEND).
Argument 'bi' can be omitted and will default to +b (BLEND).
Also, 'mi' can be omitted if 'bi' is omitted and will default to 0 (NONE).
Finally, if 'mi' and 'bi' are omitted then 'xi' and 'yi' can be omitted and will
default to +0+0.
.TP
.BI \-loop " n
@ -149,13 +151,22 @@ webpmux \-get exif exif_container.webp \-o image_metadata.exif
.br
webpmux \-strip exif exif_container.webp \-o without_exif.webp
.br
webpmux \-frame anim_1.webp +100 \-frame anim_2.webp +100+50+50 \-loop 10
webpmux \-frame anim_1.webp +100 \-frame anim_2.webp +100+50+50
.br
.RS 8
\-bgcolor 255,255,255,255 \-o anim_container.webp
\-frame anim_2.webp +100+50+50+1+b \-loop 10 \-bgcolor 255,255,255,255
.br
.RS 8
\-o anim_container.webp
.RE
.br
webpmux \-get frame 2 anim_container.webp \-o frame_2.webp
.br
webpmux \-set icc image_profile.icc \-o icc_container.webp \-\- \-\-\-in.webp
.br
webpmux \-get icc \-o image_profile.icc \-\- \-\-\-icc_container.webp
.br
webpmux \-strip icc \-o without_icc.webp \-\- \-\-\-icc_container.webp
.SH AUTHORS
\fBwebpmux\fP is written by the WebP team.
@ -166,9 +177,9 @@ This manual page was written by Vikas Arora <vikaas.arora@gmail.com>,
for the Debian project (and may be used by others).
.SH SEE ALSO
.BR dwebp (1),
.BR cwebp (1),
.BR gif2webp (1).
.BR dwebp (1),
.BR gif2webp (1)
.br
Please refer to http://developers.google.com/speed/webp/ for additional
information.

View File

@ -36,7 +36,7 @@ libwebp_la_LIBADD += utils/libwebputils.la
# other than the ones listed on the command line, i.e., after linking, it will
# not have unresolved symbols. Some platforms (Windows among them) require all
# symbols in shared libraries to be resolved at library creation.
libwebp_la_LDFLAGS = -no-undefined -version-info 4:2:0
libwebp_la_LDFLAGS = -no-undefined -version-info 5:0:0
libwebpincludedir = $(includedir)/webp
pkgconfig_DATA = libwebp.pc
@ -48,7 +48,7 @@ if BUILD_LIBWEBPDECODER
libwebpdecoder_la_LIBADD += dsp/libwebpdspdecode.la
libwebpdecoder_la_LIBADD += utils/libwebputilsdecode.la
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 0:0:0
libwebpdecoder_la_LDFLAGS = -no-undefined -version-info 1:0:0
pkgconfig_DATA += libwebpdecoder.pc
endif

View File

@ -3,6 +3,7 @@ noinst_LTLIBRARIES = libwebpdecode.la
libwebpdecode_la_SOURCES =
libwebpdecode_la_SOURCES += alpha.c
libwebpdecode_la_SOURCES += alphai.h
libwebpdecode_la_SOURCES += buffer.c
libwebpdecode_la_SOURCES += decode_vp8.h
libwebpdecode_la_SOURCES += frame.c

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Alpha-plane decompression.
@ -10,120 +12,150 @@
// Author: Skal (pascal.massimino@gmail.com)
#include <stdlib.h>
#include "./alphai.h"
#include "./vp8i.h"
#include "./vp8li.h"
#include "../utils/filters.h"
#include "../utils/quant_levels_dec.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// ALPHDecoder object.
// TODO(skal): move to dsp/ ?
static void CopyPlane(const uint8_t* src, int src_stride,
uint8_t* dst, int dst_stride, int width, int height) {
while (height-- > 0) {
memcpy(dst, src, width);
src += src_stride;
dst += dst_stride;
ALPHDecoder* ALPHNew(void) {
ALPHDecoder* const dec = (ALPHDecoder*)calloc(1, sizeof(*dec));
return dec;
}
void ALPHDelete(ALPHDecoder* const dec) {
if (dec != NULL) {
VP8LDelete(dec->vp8l_dec_);
dec->vp8l_dec_ = NULL;
free(dec);
}
}
//------------------------------------------------------------------------------
// Decodes the compressed data 'data' of size 'data_size' into the 'output'.
// The 'output' buffer should be pre-allocated and must be of the same
// dimension 'height'x'stride', as that of the image.
//
// Returns 1 on successfully decoding the compressed alpha and
// 0 if either:
// error in bit-stream header (invalid compression mode or filter), or
// error returned by appropriate compression method.
// Decoding.
static int DecodeAlpha(const uint8_t* data, size_t data_size,
int width, int height, int stride, uint8_t* output) {
uint8_t* decoded_data = NULL;
const size_t decoded_size = height * width;
WEBP_FILTER_TYPE filter;
int pre_processing;
int rsrv;
// Initialize alpha decoding by parsing the alpha header and decoding the image
// header for alpha data stored using lossless compression.
// Returns false in case of error in alpha header (data too short, invalid
// compression method or filter, error in lossless header data etc).
static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
size_t data_size, int width, int height, uint8_t* output) {
int ok = 0;
int method;
const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN;
const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN;
int rsrv;
assert(width > 0 && height > 0 && stride >= width);
assert(width > 0 && height > 0);
assert(data != NULL && output != NULL);
dec->width_ = width;
dec->height_ = height;
if (data_size <= ALPHA_HEADER_LEN) {
return 0;
}
method = (data[0] >> 0) & 0x03;
filter = (data[0] >> 2) & 0x03;
pre_processing = (data[0] >> 4) & 0x03;
dec->method_ = (data[0] >> 0) & 0x03;
dec->filter_ = (data[0] >> 2) & 0x03;
dec->pre_processing_ = (data[0] >> 4) & 0x03;
rsrv = (data[0] >> 6) & 0x03;
if (method < ALPHA_NO_COMPRESSION ||
method > ALPHA_LOSSLESS_COMPRESSION ||
filter >= WEBP_FILTER_LAST ||
pre_processing > ALPHA_PREPROCESSED_LEVELS ||
if (dec->method_ < ALPHA_NO_COMPRESSION ||
dec->method_ > ALPHA_LOSSLESS_COMPRESSION ||
dec->filter_ >= WEBP_FILTER_LAST ||
dec->pre_processing_ > ALPHA_PREPROCESSED_LEVELS ||
rsrv != 0) {
return 0;
}
if (method == ALPHA_NO_COMPRESSION) {
ok = (data_size >= decoded_size);
decoded_data = (uint8_t*)data + ALPHA_HEADER_LEN;
if (dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t alpha_decoded_size = dec->width_ * dec->height_;
ok = (alpha_data_size >= alpha_decoded_size);
} else {
decoded_data = (uint8_t*)malloc(decoded_size);
if (decoded_data == NULL) return 0;
ok = VP8LDecodeAlphaImageStream(width, height,
data + ALPHA_HEADER_LEN,
data_size - ALPHA_HEADER_LEN,
decoded_data);
}
if (ok) {
WebPUnfilterFunc unfilter_func = WebPUnfilters[filter];
if (unfilter_func != NULL) {
// TODO(vikas): Implement on-the-fly decoding & filter mechanism to decode
// and apply filter per image-row.
unfilter_func(width, height, width, decoded_data);
}
// Construct raw_data (height x stride) from alpha data (height x width).
CopyPlane(decoded_data, width, output, stride, width, height);
if (pre_processing == ALPHA_PREPROCESSED_LEVELS) {
ok = DequantizeLevels(decoded_data, width, height);
}
}
if (method != ALPHA_NO_COMPRESSION) {
free(decoded_data);
assert(dec->method_ == ALPHA_LOSSLESS_COMPRESSION);
ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size, output);
}
return ok;
}
// Decodes, unfilters and dequantizes *at least* 'num_rows' rows of alpha
// starting from row number 'row'. It assumes that rows up to (row - 1) have
// already been decoded.
// Returns false in case of bitstream error.
static int ALPHDecode(VP8Decoder* const dec, int row, int num_rows) {
ALPHDecoder* const alph_dec = dec->alph_dec_;
const int width = alph_dec->width_;
const int height = alph_dec->height_;
WebPUnfilterFunc unfilter_func = WebPUnfilters[alph_dec->filter_];
uint8_t* const output = dec->alpha_plane_;
if (alph_dec->method_ == ALPHA_NO_COMPRESSION) {
const size_t offset = row * width;
const size_t num_pixels = num_rows * width;
assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN + offset + num_pixels);
memcpy(dec->alpha_plane_ + offset,
dec->alpha_data_ + ALPHA_HEADER_LEN + offset, num_pixels);
} else { // alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION
assert(alph_dec->vp8l_dec_ != NULL);
if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) {
return 0;
}
}
if (unfilter_func != NULL) {
unfilter_func(width, height, width, row, num_rows, output);
}
if (alph_dec->pre_processing_ == ALPHA_PREPROCESSED_LEVELS) {
if (!DequantizeLevels(output, width, height, row, num_rows)) {
return 0;
}
}
if (row + num_rows == dec->pic_hdr_.height_) {
dec->is_alpha_decoded_ = 1;
}
return 1;
}
//------------------------------------------------------------------------------
// Main entry point.
const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
int row, int num_rows) {
const int stride = dec->pic_hdr_.width_;
const int width = dec->pic_hdr_.width_;
const int height = dec->pic_hdr_.height_;
if (row < 0 || num_rows < 0 || row + num_rows > dec->pic_hdr_.height_) {
if (row < 0 || num_rows <= 0 || row + num_rows > height) {
return NULL; // sanity check.
}
if (row == 0) {
// Decode everything during the first call.
if (!DecodeAlpha(dec->alpha_data_, (size_t)dec->alpha_data_size_,
dec->pic_hdr_.width_, dec->pic_hdr_.height_, stride,
dec->alpha_plane_)) {
return NULL; // Error.
// Initialize decoding.
assert(dec->alpha_plane_ != NULL);
dec->alph_dec_ = ALPHNew();
if (dec->alph_dec_ == NULL) return NULL;
if (!ALPHInit(dec->alph_dec_, dec->alpha_data_, dec->alpha_data_size_,
width, height, dec->alpha_plane_)) {
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
return NULL;
}
}
if (!dec->is_alpha_decoded_) {
int ok = 0;
assert(dec->alph_dec_ != NULL);
ok = ALPHDecode(dec, row, num_rows);
if (!ok || dec->is_alpha_decoded_) {
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
}
if (!ok) return NULL; // Error.
}
// Return a pointer to the current decoded row.
return dec->alpha_plane_ + row * stride;
return dec->alpha_plane_ + row * width;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

55
src/dec/alphai.h Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Alpha decoder: internal header.
//
// Author: Urvang (urvang@google.com)
#ifndef WEBP_DEC_ALPHAI_H_
#define WEBP_DEC_ALPHAI_H_
#include "./webpi.h"
#include "../utils/filters.h"
#ifdef __cplusplus
extern "C" {
#endif
struct VP8LDecoder; // Defined in dec/vp8li.h.
typedef struct ALPHDecoder ALPHDecoder;
struct ALPHDecoder {
int width_;
int height_;
int method_;
WEBP_FILTER_TYPE filter_;
int pre_processing_;
struct VP8LDecoder* vp8l_dec_;
VP8Io io_;
int use_8b_decode; // Although alpha channel requires only 1 byte per
// pixel, sometimes VP8LDecoder may need to allocate
// 4 bytes per pixel internally during decode.
};
//------------------------------------------------------------------------------
// internal functions. Not public.
// Allocates a new alpha decoder instance.
ALPHDecoder* ALPHNew(void);
// Clears and deallocates an alpha decoder instance.
void ALPHDelete(ALPHDecoder* const dec);
//------------------------------------------------------------------------------
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* WEBP_DEC_ALPHAI_H_ */

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Everything about WebPDecBuffer
@ -15,10 +17,6 @@
#include "./webpi.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// WebPDecBuffer
@ -210,6 +208,3 @@ void WebPGrabDecBuffer(WebPDecBuffer* const src, WebPDecBuffer* const dst) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Low-level API for VP8 decoder
@ -14,7 +16,7 @@
#include "../webp/decode.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -130,7 +132,8 @@ static WEBP_INLINE int VP8InitIo(VP8Io* const io) {
return VP8InitIoInternal(io, WEBP_DECODER_ABI_VERSION);
}
// Start decoding a new picture. Returns true if ok.
// Decode the VP8 frame header. Returns true if ok.
// Note: 'io->data' must be pointing to the start of the VP8 frame header.
int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io);
// Decode a picture. Will call VP8GetHeaders() if it wasn't done already.
@ -175,7 +178,7 @@ WEBP_EXTERN(int) VP8LGetInfo(
const uint8_t* data, size_t data_size, // data available so far
int* const width, int* const height, int* const has_alpha);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Frame-reconstruction function. Memory allocation.
@ -13,12 +15,11 @@
#include "./vp8i.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define ALIGN_MASK (32 - 1)
static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx); // TODO(skal): remove
//------------------------------------------------------------------------------
// Filtering
@ -29,25 +30,18 @@ extern "C" {
// U/V, so it's 8 samples total (because of the 2x upsampling).
static const uint8_t kFilterExtraRows[3] = { 0, 2, 8 };
static WEBP_INLINE int hev_thresh_from_level(int level, int keyframe) {
if (keyframe) {
return (level >= 40) ? 2 : (level >= 15) ? 1 : 0;
} else {
return (level >= 40) ? 3 : (level >= 20) ? 2 : (level >= 15) ? 1 : 0;
}
}
static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int cache_id = ctx->id_;
const int y_bps = dec->cache_y_stride_;
VP8FInfo* const f_info = ctx->f_info_ + mb_x;
uint8_t* const y_dst = dec->cache_y_ + ctx->id_ * 16 * y_bps + mb_x * 16;
const int level = f_info->f_level_;
const VP8FInfo* const f_info = ctx->f_info_ + mb_x;
uint8_t* const y_dst = dec->cache_y_ + cache_id * 16 * y_bps + mb_x * 16;
const int ilevel = f_info->f_ilevel_;
const int limit = 2 * level + ilevel;
if (level == 0) {
const int limit = f_info->f_limit_;
if (limit == 0) {
return;
}
assert(limit >= 3);
if (dec->filter_type_ == 1) { // simple
if (mb_x > 0) {
VP8SimpleHFilter16(y_dst, y_bps, limit + 4);
@ -63,10 +57,9 @@ static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) {
}
} else { // complex
const int uv_bps = dec->cache_uv_stride_;
uint8_t* const u_dst = dec->cache_u_ + ctx->id_ * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v_ + ctx->id_ * 8 * uv_bps + mb_x * 8;
const int hev_thresh =
hev_thresh_from_level(level, dec->frm_hdr_.key_frame_);
uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8;
const int hev_thresh = f_info->hev_thresh_;
if (mb_x > 0) {
VP8HFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh);
VP8HFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh);
@ -126,25 +119,107 @@ static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
}
}
level = (level < 0) ? 0 : (level > 63) ? 63 : level;
info->f_level_ = level;
if (hdr->sharpness_ > 0) {
if (hdr->sharpness_ > 4) {
level >>= 2;
} else {
level >>= 1;
}
if (level > 9 - hdr->sharpness_) {
level = 9 - hdr->sharpness_;
if (level > 0) {
int ilevel = level;
if (hdr->sharpness_ > 0) {
if (hdr->sharpness_ > 4) {
ilevel >>= 2;
} else {
ilevel >>= 1;
}
if (ilevel > 9 - hdr->sharpness_) {
ilevel = 9 - hdr->sharpness_;
}
}
if (ilevel < 1) ilevel = 1;
info->f_ilevel_ = ilevel;
info->f_limit_ = 2 * level + ilevel;
info->hev_thresh_ = (level >= 40) ? 2 : (level >= 15) ? 1 : 0;
} else {
info->f_limit_ = 0; // no filtering
}
info->f_ilevel_ = (level < 1) ? 1 : level;
info->f_inner_ = 0;
info->f_inner_ = i4x4;
}
}
}
}
//------------------------------------------------------------------------------
// Dithering
#define DITHER_AMP_TAB_SIZE 12
static const int kQuantToDitherAmp[DITHER_AMP_TAB_SIZE] = {
// roughly, it's dqm->uv_mat_[1]
8, 7, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1
};
void VP8InitDithering(const WebPDecoderOptions* const options,
VP8Decoder* const dec) {
assert(dec != NULL);
if (options != NULL) {
const int d = options->dithering_strength;
const int max_amp = (1 << VP8_RANDOM_DITHER_FIX) - 1;
const int f = (d < 0) ? 0 : (d > 100) ? max_amp : (d * max_amp / 100);
if (f > 0) {
int s;
int all_amp = 0;
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8QuantMatrix* const dqm = &dec->dqm_[s];
if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) {
// TODO(skal): should we specially dither more for uv_quant_ < 0?
const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_;
dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3;
}
all_amp |= dqm->dither_;
}
if (all_amp != 0) {
VP8InitRandom(&dec->dithering_rg_, 1.0f);
dec->dither_ = 1;
}
}
}
}
// minimal amp that will provide a non-zero dithering effect
#define MIN_DITHER_AMP 4
#define DITHER_DESCALE 4
#define DITHER_DESCALE_ROUNDER (1 << (DITHER_DESCALE - 1))
#define DITHER_AMP_BITS 8
#define DITHER_AMP_CENTER (1 << DITHER_AMP_BITS)
static void Dither8x8(VP8Random* const rg, uint8_t* dst, int bps, int amp) {
int i, j;
for (j = 0; j < 8; ++j) {
for (i = 0; i < 8; ++i) {
// TODO: could be made faster with SSE2
const int bits =
VP8RandomBits2(rg, DITHER_AMP_BITS + 1, amp) - DITHER_AMP_CENTER;
// Convert to range: [-2,2] for dither=50, [-4,4] for dither=100
const int delta = (bits + DITHER_DESCALE_ROUNDER) >> DITHER_DESCALE;
const int v = (int)dst[i] + delta;
dst[i] = (v < 0) ? 0 : (v > 255) ? 255u : (uint8_t)v;
}
dst += bps;
}
}
static void DitherRow(VP8Decoder* const dec) {
int mb_x;
assert(dec->dither_);
for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) {
const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const VP8MBData* const data = ctx->mb_data_ + mb_x;
const int cache_id = ctx->id_;
const int uv_bps = dec->cache_uv_stride_;
if (data->dither_ >= MIN_DITHER_AMP) {
uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8;
uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8;
Dither8x8(&dec->dithering_rg_, u_dst, uv_bps, data->dither_);
Dither8x8(&dec->dithering_rg_, v_dst, uv_bps, data->dither_);
}
}
}
//------------------------------------------------------------------------------
// This function is called after a row of macroblocks is finished decoding.
// It also takes into account the following restrictions:
@ -162,25 +237,35 @@ static void PrecomputeFilterStrengths(VP8Decoder* const dec) {
static int FinishRow(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1;
const VP8ThreadContext* const ctx = &dec->thread_ctx_;
const int cache_id = ctx->id_;
const int extra_y_rows = kFilterExtraRows[dec->filter_type_];
const int ysize = extra_y_rows * dec->cache_y_stride_;
const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride_;
const int y_offset = ctx->id_ * 16 * dec->cache_y_stride_;
const int uv_offset = ctx->id_ * 8 * dec->cache_uv_stride_;
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const ydst = dec->cache_y_ - ysize + y_offset;
uint8_t* const udst = dec->cache_u_ - uvsize + uv_offset;
uint8_t* const vdst = dec->cache_v_ - uvsize + uv_offset;
const int first_row = (ctx->mb_y_ == 0);
const int last_row = (ctx->mb_y_ >= dec->br_mb_y_ - 1);
int y_start = MACROBLOCK_VPOS(ctx->mb_y_);
int y_end = MACROBLOCK_VPOS(ctx->mb_y_ + 1);
const int mb_y = ctx->mb_y_;
const int is_first_row = (mb_y == 0);
const int is_last_row = (mb_y >= dec->br_mb_y_ - 1);
if (dec->mt_method_ == 2) {
ReconstructRow(dec, ctx);
}
if (ctx->filter_row_) {
FilterRow(dec);
}
if (io->put) {
if (!first_row) {
if (dec->dither_) {
DitherRow(dec);
}
if (io->put != NULL) {
int y_start = MACROBLOCK_VPOS(mb_y);
int y_end = MACROBLOCK_VPOS(mb_y + 1);
if (!is_first_row) {
y_start -= extra_y_rows;
io->y = ydst;
io->u = udst;
@ -191,7 +276,7 @@ static int FinishRow(VP8Decoder* const dec, VP8Io* const io) {
io->v = dec->cache_v_ + uv_offset;
}
if (!last_row) {
if (!is_last_row) {
y_end -= extra_y_rows;
}
if (y_end > io->crop_bottom) {
@ -199,11 +284,8 @@ static int FinishRow(VP8Decoder* const dec, VP8Io* const io) {
}
io->a = NULL;
if (dec->alpha_data_ != NULL && y_start < y_end) {
// TODO(skal): several things to correct here:
// * testing presence of alpha with dec->alpha_data_ is not a good idea
// * we're actually decompressing the full plane only once. It should be
// more obvious from signature.
// * we could free alpha_data_ right after this call, but we don't own.
// TODO(skal): testing presence of alpha with dec->alpha_data_ is not a
// good idea.
io->a = VP8DecompressAlphaRows(dec, y_start, y_end - y_start);
if (io->a == NULL) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
@ -235,8 +317,8 @@ static int FinishRow(VP8Decoder* const dec, VP8Io* const io) {
}
}
// rotate top samples if needed
if (ctx->id_ + 1 == dec->num_caches_) {
if (!last_row) {
if (cache_id + 1 == dec->num_caches_) {
if (!is_last_row) {
memcpy(dec->cache_y_ - ysize, ydst + 16 * dec->cache_y_stride_, ysize);
memcpy(dec->cache_u_ - uvsize, udst + 8 * dec->cache_uv_stride_, uvsize);
memcpy(dec->cache_v_ - uvsize, vdst + 8 * dec->cache_uv_stride_, uvsize);
@ -253,10 +335,14 @@ static int FinishRow(VP8Decoder* const dec, VP8Io* const io) {
int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1;
VP8ThreadContext* const ctx = &dec->thread_ctx_;
if (!dec->use_threads_) {
const int filter_row =
(dec->filter_type_ > 0) &&
(dec->mb_y_ >= dec->tl_mb_y_) && (dec->mb_y_ <= dec->br_mb_y_);
if (dec->mt_method_ == 0) {
// ctx->id_ and ctx->f_info_ are already set
ctx->mb_y_ = dec->mb_y_;
ctx->filter_row_ = dec->filter_row_;
ctx->filter_row_ = filter_row;
ReconstructRow(dec, ctx);
ok = FinishRow(dec, io);
} else {
WebPWorker* const worker = &dec->worker_;
@ -267,13 +353,21 @@ int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) {
ctx->io_ = *io;
ctx->id_ = dec->cache_id_;
ctx->mb_y_ = dec->mb_y_;
ctx->filter_row_ = dec->filter_row_;
if (ctx->filter_row_) { // just swap filter info
ctx->filter_row_ = filter_row;
if (dec->mt_method_ == 2) { // swap macroblock data
VP8MBData* const tmp = ctx->mb_data_;
ctx->mb_data_ = dec->mb_data_;
dec->mb_data_ = tmp;
} else {
// perform reconstruction directly in main thread
ReconstructRow(dec, ctx);
}
if (filter_row) { // swap filter info
VP8FInfo* const tmp = ctx->f_info_;
ctx->f_info_ = dec->f_info_;
dec->f_info_ = tmp;
}
WebPWorkerLaunch(worker);
WebPWorkerLaunch(worker); // (reconstruct)+filter in parallel
if (++dec->cache_id_ == dec->num_caches_) {
dec->cache_id_ = 0;
}
@ -287,8 +381,8 @@ int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) {
VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// Call setup() first. This may trigger additional decoding features on 'io'.
// Note: Afterward, we must call teardown() not matter what.
if (io->setup && !io->setup(io)) {
// Note: Afterward, we must call teardown() no matter what.
if (io->setup != NULL && !io->setup(io)) {
VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed");
return dec->status_;
}
@ -301,7 +395,7 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
// Define the area where we can skip in-loop filtering, in case of cropping.
//
// 'Simple' filter reads two luma samples outside of the macroblock and
// 'Simple' filter reads two luma samples outside of the macroblock
// and filters one. It doesn't filter the chroma samples. Hence, we can
// avoid doing the in-loop filtering before crop_top/crop_left position.
// For the 'Complex' filter, 3 samples are read and up to 3 are filtered.
@ -342,11 +436,11 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) {
int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) {
int ok = 1;
if (dec->use_threads_) {
if (dec->mt_method_ > 0) {
ok = WebPWorkerSync(&dec->worker_);
}
if (io->teardown) {
if (io->teardown != NULL) {
io->teardown(io);
}
return ok;
@ -382,7 +476,7 @@ int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) {
// Initialize multi/single-thread worker
static int InitThreadContext(VP8Decoder* const dec) {
dec->cache_id_ = 0;
if (dec->use_threads_) {
if (dec->mt_method_ > 0) {
WebPWorker* const worker = &dec->worker_;
if (!WebPWorkerReset(worker)) {
return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY,
@ -399,6 +493,28 @@ static int InitThreadContext(VP8Decoder* const dec) {
return 1;
}
int VP8GetThreadMethod(const WebPDecoderOptions* const options,
const WebPHeaderStructure* const headers,
int width, int height) {
if (options == NULL || options->use_threads == 0) {
return 0;
}
(void)headers;
(void)width;
(void)height;
assert(!headers->is_lossless);
#if defined(WEBP_USE_THREAD)
if (width < MIN_WIDTH_FOR_THREADS) return 0;
// TODO(skal): tune the heuristic further
#if 0
if (height < 2 * width) return 2;
#endif
return 2;
#else // !WEBP_USE_THREAD
return 0;
#endif
}
#undef MT_CACHE_LINES
#undef ST_CACHE_LINES
@ -410,14 +526,15 @@ static int AllocateMemory(VP8Decoder* const dec) {
const int mb_w = dec->mb_w_;
// Note: we use 'size_t' when there's no overflow risk, uint64_t otherwise.
const size_t intra_pred_mode_size = 4 * mb_w * sizeof(uint8_t);
const size_t top_size = (16 + 8 + 8) * mb_w;
const size_t top_size = sizeof(VP8TopSamples) * mb_w;
const size_t mb_info_size = (mb_w + 1) * sizeof(VP8MB);
const size_t f_info_size =
(dec->filter_type_ > 0) ?
mb_w * (dec->use_threads_ ? 2 : 1) * sizeof(VP8FInfo)
mb_w * (dec->mt_method_ > 0 ? 2 : 1) * sizeof(VP8FInfo)
: 0;
const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b_);
const size_t coeffs_size = 384 * sizeof(*dec->coeffs_);
const size_t mb_data_size =
(dec->mt_method_ == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data_);
const size_t cache_height = (16 * num_caches
+ kFilterExtraRows[dec->filter_type_]) * 3 / 2;
const size_t cache_size = top_size * cache_height;
@ -426,7 +543,7 @@ static int AllocateMemory(VP8Decoder* const dec) {
(uint64_t)dec->pic_hdr_.width_ * dec->pic_hdr_.height_ : 0ULL;
const uint64_t needed = (uint64_t)intra_pred_mode_size
+ top_size + mb_info_size + f_info_size
+ yuv_size + coeffs_size
+ yuv_size + mb_data_size
+ cache_size + alpha_size + ALIGN_MASK;
uint8_t* mem;
@ -447,12 +564,8 @@ static int AllocateMemory(VP8Decoder* const dec) {
dec->intra_t_ = (uint8_t*)mem;
mem += intra_pred_mode_size;
dec->y_t_ = (uint8_t*)mem;
mem += 16 * mb_w;
dec->u_t_ = (uint8_t*)mem;
mem += 8 * mb_w;
dec->v_t_ = (uint8_t*)mem;
mem += 8 * mb_w;
dec->yuv_t_ = (VP8TopSamples*)mem;
mem += top_size;
dec->mb_info_ = ((VP8MB*)mem) + 1;
mem += mb_info_size;
@ -461,7 +574,7 @@ static int AllocateMemory(VP8Decoder* const dec) {
mem += f_info_size;
dec->thread_ctx_.id_ = 0;
dec->thread_ctx_.f_info_ = dec->f_info_;
if (dec->use_threads_) {
if (dec->mt_method_ > 0) {
// secondary cache line. The deblocking process need to make use of the
// filtering strength from previous macroblock row, while the new ones
// are being decoded in parallel. We'll just swap the pointers.
@ -473,8 +586,12 @@ static int AllocateMemory(VP8Decoder* const dec) {
dec->yuv_b_ = (uint8_t*)mem;
mem += yuv_size;
dec->coeffs_ = (int16_t*)mem;
mem += coeffs_size;
dec->mb_data_ = (VP8MBData*)mem;
dec->thread_ctx_.mb_data_ = (VP8MBData*)mem;
if (dec->mt_method_ == 2) {
dec->thread_ctx_.mb_data_ += mb_w;
}
mem += mb_data_size;
dec->cache_y_stride_ = 16 * mb_w;
dec->cache_uv_stride_ = 8 * mb_w;
@ -496,8 +613,9 @@ static int AllocateMemory(VP8Decoder* const dec) {
mem += alpha_size;
assert(mem <= (uint8_t*)dec->mem_ + dec->mem_size_);
// note: left-info is initialized once for all.
// note: left/top-info is initialized once for all.
memset(dec->mb_info_ - 1, 0, mb_info_size);
VP8InitScanline(dec); // initialize left too.
// initialize top
memset(dec->intra_t_, B_DC_PRED, intra_pred_mode_size);
@ -534,159 +652,167 @@ static const int kScan[16] = {
0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS
};
static WEBP_INLINE int CheckMode(VP8Decoder* const dec, int mode) {
static int CheckMode(int mb_x, int mb_y, int mode) {
if (mode == B_DC_PRED) {
if (dec->mb_x_ == 0) {
return (dec->mb_y_ == 0) ? B_DC_PRED_NOTOPLEFT : B_DC_PRED_NOLEFT;
if (mb_x == 0) {
return (mb_y == 0) ? B_DC_PRED_NOTOPLEFT : B_DC_PRED_NOLEFT;
} else {
return (dec->mb_y_ == 0) ? B_DC_PRED_NOTOP : B_DC_PRED;
return (mb_y == 0) ? B_DC_PRED_NOTOP : B_DC_PRED;
}
}
return mode;
}
static WEBP_INLINE void Copy32b(uint8_t* dst, uint8_t* src) {
*(uint32_t*)dst = *(uint32_t*)src;
static void Copy32b(uint8_t* dst, uint8_t* src) {
memcpy(dst, src, 4);
}
void VP8ReconstructBlock(VP8Decoder* const dec) {
static WEBP_INLINE void DoTransform(uint32_t bits, const int16_t* const src,
uint8_t* const dst) {
switch (bits >> 30) {
case 3:
VP8Transform(src, dst, 0);
break;
case 2:
VP8TransformAC3(src, dst);
break;
case 1:
VP8TransformDC(src, dst);
break;
default:
break;
}
}
static void DoUVTransform(uint32_t bits, const int16_t* const src,
uint8_t* const dst) {
if (bits & 0xff) { // any non-zero coeff at all?
if (bits & 0xaa) { // any non-zero AC coefficient?
VP8TransformUV(src, dst); // note we don't use the AC3 variant for U/V
} else {
VP8TransformDCUV(src, dst);
}
}
}
static void ReconstructRow(const VP8Decoder* const dec,
const VP8ThreadContext* ctx) {
int j;
int mb_x;
const int mb_y = ctx->mb_y_;
const int cache_id = ctx->id_;
uint8_t* const y_dst = dec->yuv_b_ + Y_OFF;
uint8_t* const u_dst = dec->yuv_b_ + U_OFF;
uint8_t* const v_dst = dec->yuv_b_ + V_OFF;
for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) {
const VP8MBData* const block = ctx->mb_data_ + mb_x;
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
if (dec->mb_x_ > 0) {
for (j = -1; j < 16; ++j) {
Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]);
}
for (j = -1; j < 8; ++j) {
Copy32b(&u_dst[j * BPS - 4], &u_dst[j * BPS + 4]);
Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]);
}
} else {
for (j = 0; j < 16; ++j) {
y_dst[j * BPS - 1] = 129;
}
for (j = 0; j < 8; ++j) {
u_dst[j * BPS - 1] = 129;
v_dst[j * BPS - 1] = 129;
}
// Init top-left sample on left column too
if (dec->mb_y_ > 0) {
y_dst[-1 - BPS] = u_dst[-1 - BPS] = v_dst[-1 - BPS] = 129;
}
}
{
// bring top samples into the cache
uint8_t* const top_y = dec->y_t_ + dec->mb_x_ * 16;
uint8_t* const top_u = dec->u_t_ + dec->mb_x_ * 8;
uint8_t* const top_v = dec->v_t_ + dec->mb_x_ * 8;
const int16_t* coeffs = dec->coeffs_;
int n;
if (dec->mb_y_ > 0) {
memcpy(y_dst - BPS, top_y, 16);
memcpy(u_dst - BPS, top_u, 8);
memcpy(v_dst - BPS, top_v, 8);
} else if (dec->mb_x_ == 0) {
// we only need to do this init once at block (0,0).
// Afterward, it remains valid for the whole topmost row.
memset(y_dst - BPS - 1, 127, 16 + 4 + 1);
memset(u_dst - BPS - 1, 127, 8 + 1);
memset(v_dst - BPS - 1, 127, 8 + 1);
}
// predict and add residuals
if (dec->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (dec->mb_y_ > 0) {
if (dec->mb_x_ >= dec->mb_w_ - 1) { // on rightmost border
top_right[0] = top_y[15] * 0x01010101u;
} else {
memcpy(top_right, top_y + 16, sizeof(*top_right));
}
// Rotate in the left samples from previously decoded block. We move four
// pixels at a time for alignment reason, and because of in-loop filter.
if (mb_x > 0) {
for (j = -1; j < 16; ++j) {
Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]);
}
// replicate the top-right pixels below
top_right[BPS] = top_right[2 * BPS] = top_right[3 * BPS] = top_right[0];
// predict and add residues for all 4x4 blocks in turn.
for (n = 0; n < 16; n++) {
uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[dec->imodes_[n]](dst);
if (dec->non_zero_ac_ & (1 << n)) {
VP8Transform(coeffs + n * 16, dst, 0);
} else if (dec->non_zero_ & (1 << n)) { // only DC is present
VP8TransformDC(coeffs + n * 16, dst);
}
for (j = -1; j < 8; ++j) {
Copy32b(&u_dst[j * BPS - 4], &u_dst[j * BPS + 4]);
Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]);
}
} else { // 16x16
const int pred_func = CheckMode(dec, dec->imodes_[0]);
VP8PredLuma16[pred_func](y_dst);
if (dec->non_zero_) {
for (n = 0; n < 16; n++) {
uint8_t* const dst = y_dst + kScan[n];
if (dec->non_zero_ac_ & (1 << n)) {
VP8Transform(coeffs + n * 16, dst, 0);
} else if (dec->non_zero_ & (1 << n)) { // only DC is present
VP8TransformDC(coeffs + n * 16, dst);
}
}
} else {
for (j = 0; j < 16; ++j) {
y_dst[j * BPS - 1] = 129;
}
for (j = 0; j < 8; ++j) {
u_dst[j * BPS - 1] = 129;
v_dst[j * BPS - 1] = 129;
}
// Init top-left sample on left column too
if (mb_y > 0) {
y_dst[-1 - BPS] = u_dst[-1 - BPS] = v_dst[-1 - BPS] = 129;
}
}
{
// Chroma
const int pred_func = CheckMode(dec, dec->uvmode_);
VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst);
// bring top samples into the cache
VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x;
const int16_t* const coeffs = block->coeffs_;
uint32_t bits = block->non_zero_y_;
int n;
if (dec->non_zero_ & 0x0f0000) { // chroma-U
const int16_t* const u_coeffs = dec->coeffs_ + 16 * 16;
if (dec->non_zero_ac_ & 0x0f0000) {
VP8TransformUV(u_coeffs, u_dst);
} else {
VP8TransformDCUV(u_coeffs, u_dst);
if (mb_y > 0) {
memcpy(y_dst - BPS, top_yuv[0].y, 16);
memcpy(u_dst - BPS, top_yuv[0].u, 8);
memcpy(v_dst - BPS, top_yuv[0].v, 8);
} else if (mb_x == 0) {
// we only need to do this init once at block (0,0).
// Afterward, it remains valid for the whole topmost row.
memset(y_dst - BPS - 1, 127, 16 + 4 + 1);
memset(u_dst - BPS - 1, 127, 8 + 1);
memset(v_dst - BPS - 1, 127, 8 + 1);
}
// predict and add residuals
if (block->is_i4x4_) { // 4x4
uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16);
if (mb_y > 0) {
if (mb_x >= dec->mb_w_ - 1) { // on rightmost border
memset(top_right, top_yuv[0].y[15], sizeof(*top_right));
} else {
memcpy(top_right, top_yuv[1].y, sizeof(*top_right));
}
}
// replicate the top-right pixels below
top_right[BPS] = top_right[2 * BPS] = top_right[3 * BPS] = top_right[0];
// predict and add residuals for all 4x4 blocks in turn.
for (n = 0; n < 16; ++n, bits <<= 2) {
uint8_t* const dst = y_dst + kScan[n];
VP8PredLuma4[block->imodes_[n]](dst);
DoTransform(bits, coeffs + n * 16, dst);
}
} else { // 16x16
const int pred_func = CheckMode(mb_x, mb_y,
block->imodes_[0]);
VP8PredLuma16[pred_func](y_dst);
if (bits != 0) {
for (n = 0; n < 16; ++n, bits <<= 2) {
DoTransform(bits, coeffs + n * 16, y_dst + kScan[n]);
}
}
}
if (dec->non_zero_ & 0xf00000) { // chroma-V
const int16_t* const v_coeffs = dec->coeffs_ + 20 * 16;
if (dec->non_zero_ac_ & 0xf00000) {
VP8TransformUV(v_coeffs, v_dst);
} else {
VP8TransformDCUV(v_coeffs, v_dst);
}
{
// Chroma
const uint32_t bits_uv = block->non_zero_uv_;
const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_);
VP8PredChroma8[pred_func](u_dst);
VP8PredChroma8[pred_func](v_dst);
DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst);
DoUVTransform(bits_uv >> 8, coeffs + 20 * 16, v_dst);
}
// stash away top samples for next block
if (dec->mb_y_ < dec->mb_h_ - 1) {
memcpy(top_y, y_dst + 15 * BPS, 16);
memcpy(top_u, u_dst + 7 * BPS, 8);
memcpy(top_v, v_dst + 7 * BPS, 8);
if (mb_y < dec->mb_h_ - 1) {
memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16);
memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8);
memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8);
}
}
}
// Transfer reconstructed samples from yuv_b_ cache to final destination.
{
const int y_offset = dec->cache_id_ * 16 * dec->cache_y_stride_;
const int uv_offset = dec->cache_id_ * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y_ + dec->mb_x_ * 16 + y_offset;
uint8_t* const u_out = dec->cache_u_ + dec->mb_x_ * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v_ + dec->mb_x_ * 8 + uv_offset;
for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
}
for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
// Transfer reconstructed samples from yuv_b_ cache to final destination.
{
const int y_offset = cache_id * 16 * dec->cache_y_stride_;
const int uv_offset = cache_id * 8 * dec->cache_uv_stride_;
uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset;
uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset;
uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset;
for (j = 0; j < 16; ++j) {
memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16);
}
for (j = 0; j < 8; ++j) {
memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8);
memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8);
}
}
}
}
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Incremental decoding
@ -13,14 +15,11 @@
#include <string.h>
#include <stdlib.h>
#include "./alphai.h"
#include "./webpi.h"
#include "./vp8i.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
// In append mode, buffer allocations increase as multiples of this value.
// Needs to be a power of 2.
#define CHUNK_SIZE 4096
@ -29,11 +28,13 @@ extern "C" {
//------------------------------------------------------------------------------
// Data structures for memory and states
// Decoding states. State normally flows like HEADER->PARTS0->DATA->DONE.
// Decoding states. State normally flows as:
// WEBP_HEADER->VP8_HEADER->VP8_PARTS0->VP8_DATA->DONE for a lossy image, and
// WEBP_HEADER->VP8L_HEADER->VP8L_DATA->DONE for a lossless image.
// If there is any error the decoder goes into state ERROR.
typedef enum {
STATE_PRE_VP8, // All data before that of the first VP8 chunk.
STATE_VP8_FRAME_HEADER, // For VP8 Frame header (within VP8 chunk).
STATE_WEBP_HEADER, // All the data before that of the VP8/VP8L chunk.
STATE_VP8_HEADER, // The VP8 Frame header (within the VP8 chunk).
STATE_VP8_PARTS0,
STATE_VP8_DATA,
STATE_VP8L_HEADER,
@ -97,6 +98,23 @@ static WEBP_INLINE size_t MemDataSize(const MemBuffer* mem) {
return (mem->end_ - mem->start_);
}
// Check if we need to preserve the compressed alpha data, as it may not have
// been decoded yet.
static int NeedCompressedAlpha(const WebPIDecoder* const idec) {
if (idec->state_ == STATE_WEBP_HEADER) {
// We haven't parsed the headers yet, so we don't know whether the image is
// lossy or lossless. This also means that we haven't parsed the ALPH chunk.
return 0;
}
if (idec->is_lossless_) {
return 0; // ALPH chunk is not present for lossless images.
} else {
const VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
assert(dec != NULL); // Must be true as idec->state_ != STATE_WEBP_HEADER.
return (dec->alpha_data_ != NULL) && !dec->is_alpha_decoded_;
}
}
static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
MemBuffer* const mem = &idec->mem_;
const uint8_t* const new_base = mem->buf_ + mem->start_;
@ -122,6 +140,22 @@ static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
}
assert(last_part >= 0);
dec->parts_[last_part].buf_end_ = mem->buf_ + mem->end_;
if (NeedCompressedAlpha(idec)) {
ALPHDecoder* const alph_dec = dec->alph_dec_;
dec->alpha_data_ += offset;
if (alph_dec != NULL) {
if (alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION) {
VP8LDecoder* const alph_vp8l_dec = alph_dec->vp8l_dec_;
assert(alph_vp8l_dec != NULL);
assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN);
VP8LBitReaderSetBuffer(&alph_vp8l_dec->br_,
dec->alpha_data_ + ALPHA_HEADER_LEN,
dec->alpha_data_size_ - ALPHA_HEADER_LEN);
} else { // alph_dec->method_ == ALPHA_NO_COMPRESSION
// Nothing special to do in this case.
}
}
}
} else { // Resize lossless bitreader
VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_;
VP8LBitReaderSetBuffer(&dec->br_, new_base, MemDataSize(mem));
@ -133,8 +167,12 @@ static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
// size if required and also updates VP8BitReader's if new memory is allocated.
static int AppendToMemBuffer(WebPIDecoder* const idec,
const uint8_t* const data, size_t data_size) {
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
MemBuffer* const mem = &idec->mem_;
const uint8_t* const old_base = mem->buf_ + mem->start_;
const int need_compressed_alpha = NeedCompressedAlpha(idec);
const uint8_t* const old_start = mem->buf_ + mem->start_;
const uint8_t* const old_base =
need_compressed_alpha ? dec->alpha_data_ : old_start;
assert(mem->mode_ == MEM_MODE_APPEND);
if (data_size > MAX_CHUNK_PAYLOAD) {
// security safeguard: trying to allocate more than what the format
@ -143,7 +181,8 @@ static int AppendToMemBuffer(WebPIDecoder* const idec,
}
if (mem->end_ + data_size > mem->buf_size_) { // Need some free memory
const size_t current_size = MemDataSize(mem);
const size_t new_mem_start = old_start - old_base;
const size_t current_size = MemDataSize(mem) + new_mem_start;
const uint64_t new_size = (uint64_t)current_size + data_size;
const uint64_t extra_size = (new_size + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1);
uint8_t* const new_buf =
@ -153,7 +192,7 @@ static int AppendToMemBuffer(WebPIDecoder* const idec,
free(mem->buf_);
mem->buf_ = new_buf;
mem->buf_size_ = (size_t)extra_size;
mem->start_ = 0;
mem->start_ = new_mem_start;
mem->end_ = current_size;
}
@ -161,14 +200,15 @@ static int AppendToMemBuffer(WebPIDecoder* const idec,
mem->end_ += data_size;
assert(mem->end_ <= mem->buf_size_);
DoRemap(idec, mem->buf_ + mem->start_ - old_base);
DoRemap(idec, mem->buf_ + mem->start_ - old_start);
return 1;
}
static int RemapMemBuffer(WebPIDecoder* const idec,
const uint8_t* const data, size_t data_size) {
MemBuffer* const mem = &idec->mem_;
const uint8_t* const old_base = mem->buf_ + mem->start_;
const uint8_t* const old_buf = mem->buf_;
const uint8_t* const old_start = old_buf + mem->start_;
assert(mem->mode_ == MEM_MODE_MAP);
if (data_size < mem->buf_size_) return 0; // can't remap to a shorter buffer!
@ -176,7 +216,7 @@ static int RemapMemBuffer(WebPIDecoder* const idec,
mem->buf_ = (uint8_t*)data;
mem->end_ = mem->buf_size_ = data_size;
DoRemap(idec, mem->buf_ + mem->start_ - old_base);
DoRemap(idec, mem->buf_ + mem->start_ - old_start);
return 1;
}
@ -242,7 +282,7 @@ static void RestoreContext(const MBContext* context, VP8Decoder* const dec,
static VP8StatusCode IDecError(WebPIDecoder* const idec, VP8StatusCode error) {
if (idec->state_ == STATE_VP8_DATA) {
VP8Io* const io = &idec->io_;
if (io->teardown) {
if (io->teardown != NULL) {
io->teardown(io);
}
}
@ -285,15 +325,9 @@ static VP8StatusCode DecodeWebPHeaders(WebPIDecoder* const idec) {
return VP8_STATUS_OUT_OF_MEMORY;
}
idec->dec_ = dec;
#ifdef WEBP_USE_THREAD
dec->use_threads_ = (idec->params_.options != NULL) &&
(idec->params_.options->use_threads > 0);
#else
dec->use_threads_ = 0;
#endif
dec->alpha_data_ = headers.alpha_data;
dec->alpha_data_size_ = headers.alpha_data_size;
ChangeState(idec, STATE_VP8_FRAME_HEADER, headers.offset);
ChangeState(idec, STATE_VP8_HEADER, headers.offset);
} else {
VP8LDecoder* const dec = VP8LNew();
if (dec == NULL) {
@ -308,13 +342,14 @@ static VP8StatusCode DecodeWebPHeaders(WebPIDecoder* const idec) {
static VP8StatusCode DecodeVP8FrameHeader(WebPIDecoder* const idec) {
const uint8_t* data = idec->mem_.buf_ + idec->mem_.start_;
const size_t curr_size = MemDataSize(&idec->mem_);
int width, height;
uint32_t bits;
if (curr_size < VP8_FRAME_HEADER_SIZE) {
// Not enough data bytes to extract VP8 Frame Header.
return VP8_STATUS_SUSPENDED;
}
if (!VP8GetInfo(data, curr_size, idec->chunk_size_, NULL, NULL)) {
if (!VP8GetInfo(data, curr_size, idec->chunk_size_, &width, &height)) {
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
}
@ -381,7 +416,10 @@ static VP8StatusCode DecodePartition0(WebPIDecoder* const idec) {
if (dec->status_ != VP8_STATUS_OK) {
return IDecError(idec, dec->status_);
}
// This change must be done before calling VP8InitFrame()
dec->mt_method_ = VP8GetThreadMethod(params->options, NULL,
io->width, io->height);
VP8InitDithering(params->options, dec);
if (!CopyParts0Data(idec)) {
return IDecError(idec, VP8_STATUS_OUT_OF_MEMORY);
}
@ -407,16 +445,11 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
VP8Io* const io = &idec->io_;
assert(dec->ready_);
for (; dec->mb_y_ < dec->mb_h_; ++dec->mb_y_) {
VP8BitReader* token_br = &dec->parts_[dec->mb_y_ & (dec->num_parts_ - 1)];
if (dec->mb_x_ == 0) {
VP8InitScanline(dec);
}
for (; dec->mb_x_ < dec->mb_w_; dec->mb_x_++) {
for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) {
MBContext context;
SaveContext(dec, token_br, &context);
if (!VP8DecodeMB(dec, token_br)) {
RestoreContext(&context, dec, token_br);
// We shouldn't fail when MAX_MB data was available
@ -425,19 +458,18 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
}
return VP8_STATUS_SUSPENDED;
}
// Reconstruct and emit samples.
VP8ReconstructBlock(dec);
// Release buffer only if there is only one partition
if (dec->num_parts_ == 1) {
idec->mem_.start_ = token_br->buf_ - idec->mem_.buf_;
assert(idec->mem_.start_ <= idec->mem_.end_);
}
}
VP8InitScanline(dec); // Prepare for next scanline
// Reconstruct, filter and emit the row.
if (!VP8ProcessRow(dec, io)) {
return IDecError(idec, VP8_STATUS_USER_ABORT);
}
dec->mb_x_ = 0;
}
// Synchronize the thread and check for errors.
if (!VP8ExitCritical(dec, io)) {
@ -449,7 +481,8 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
return VP8_STATUS_OK;
}
static int ErrorStatusLossless(WebPIDecoder* const idec, VP8StatusCode status) {
static VP8StatusCode ErrorStatusLossless(WebPIDecoder* const idec,
VP8StatusCode status) {
if (status == VP8_STATUS_SUSPENDED || status == VP8_STATUS_NOT_ENOUGH_DATA) {
return VP8_STATUS_SUSPENDED;
}
@ -506,14 +539,14 @@ static VP8StatusCode DecodeVP8LData(WebPIDecoder* const idec) {
static VP8StatusCode IDecode(WebPIDecoder* idec) {
VP8StatusCode status = VP8_STATUS_SUSPENDED;
if (idec->state_ == STATE_PRE_VP8) {
if (idec->state_ == STATE_WEBP_HEADER) {
status = DecodeWebPHeaders(idec);
} else {
if (idec->dec_ == NULL) {
return VP8_STATUS_SUSPENDED; // can't continue if we have no decoder.
}
}
if (idec->state_ == STATE_VP8_FRAME_HEADER) {
if (idec->state_ == STATE_VP8_HEADER) {
status = DecodeVP8FrameHeader(idec);
}
if (idec->state_ == STATE_VP8_PARTS0) {
@ -540,7 +573,7 @@ WebPIDecoder* WebPINewDecoder(WebPDecBuffer* output_buffer) {
return NULL;
}
idec->state_ = STATE_PRE_VP8;
idec->state_ = STATE_WEBP_HEADER;
idec->chunk_size_ = 0;
InitMemBuffer(&idec->mem_);
@ -548,7 +581,8 @@ WebPIDecoder* WebPINewDecoder(WebPDecBuffer* output_buffer) {
VP8InitIo(&idec->io_);
WebPResetDecParams(&idec->params_);
idec->params_.output = output_buffer ? output_buffer : &idec->output_;
idec->params_.output = (output_buffer != NULL) ? output_buffer
: &idec->output_;
WebPInitCustomIo(&idec->params_, &idec->io_); // Plug the I/O functions.
return idec;
@ -580,9 +614,13 @@ void WebPIDelete(WebPIDecoder* idec) {
if (idec == NULL) return;
if (idec->dec_ != NULL) {
if (!idec->is_lossless_) {
VP8Delete(idec->dec_);
if (idec->state_ == STATE_VP8_DATA) {
// Synchronize the thread, clean-up and check for errors.
VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_);
}
VP8Delete((VP8Decoder*)idec->dec_);
} else {
VP8LDelete(idec->dec_);
VP8LDelete((VP8LDecoder*)idec->dec_);
}
}
ClearMemBuffer(&idec->mem_);
@ -797,7 +835,7 @@ int WebPISetIOHooks(WebPIDecoder* const idec,
VP8IoSetupHook setup,
VP8IoTeardownHook teardown,
void* user_data) {
if (idec == NULL || idec->state_ > STATE_PRE_VP8) {
if (idec == NULL || idec->state_ > STATE_WEBP_HEADER) {
return 0;
}
@ -809,6 +847,3 @@ int WebPISetIOHooks(WebPIDecoder* const idec,
return 1;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// functions for sample output.
@ -16,10 +18,6 @@
#include "../dsp/dsp.h"
#include "../dsp/yuv.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// Main YUV<->RGB conversion functions
@ -117,7 +115,7 @@ static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) {
if (y == 0) {
// First line is special cased. We mirror the u/v samples at boundary.
upsample(NULL, cur_y, cur_u, cur_v, cur_u, cur_v, NULL, dst, mb_w);
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, mb_w);
} else {
// We can finish the left-over line from previous call.
upsample(p->tmp_y, cur_y, top_u, top_v, cur_u, cur_v,
@ -601,7 +599,7 @@ static int CustomPut(const VP8Io* io) {
return 0;
}
num_lines_out = p->emit(io, p);
if (p->emit_alpha) {
if (p->emit_alpha != NULL) {
p->emit_alpha(io, p);
}
p->last_y += num_lines_out;
@ -628,6 +626,3 @@ void WebPInitCustomIo(WebPDecParams* const params, VP8Io* const io) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Enhancement layer (for YUV444/422)
@ -14,10 +16,6 @@
#include "./vp8i.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
int VP8DecodeLayer(VP8Decoder* const dec) {
@ -30,6 +28,3 @@ int VP8DecodeLayer(VP8Decoder* const dec) {
return 1;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Quantizer initialization
@ -11,10 +13,6 @@
#include "./vp8i.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static WEBP_INLINE int clip(int v, int M) {
return v < 0 ? 0 : v > M ? M : v;
}
@ -102,12 +100,11 @@ void VP8ParseQuant(VP8Decoder* const dec) {
m->uv_mat_[0] = kDcTable[clip(q + dquv_dc, 117)];
m->uv_mat_[1] = kAcTable[clip(q + dquv_ac, 127)];
m->uv_quant_ = q + dquv_ac; // for dithering strength evaluation
}
}
}
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Coding trees and probas
@ -13,10 +15,6 @@
#define USE_GENERIC_TREE
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#ifdef USE_GENERIC_TREE
static const int8_t kYModesIntra4[18] = {
-B_DC_PRED, 1,
@ -31,61 +29,12 @@ static const int8_t kYModesIntra4[18] = {
};
#endif
#ifndef ONLY_KEYFRAME_CODE
// inter prediction modes
enum {
LEFT4 = 0, ABOVE4 = 1, ZERO4 = 2, NEW4 = 3,
NEARESTMV, NEARMV, ZEROMV, NEWMV, SPLITMV };
static const int8_t kYModesInter[8] = {
-DC_PRED, 1,
2, 3,
-V_PRED, -H_PRED,
-TM_PRED, -B_PRED
};
static const int8_t kMBSplit[6] = {
-3, 1,
-2, 2,
-0, -1
};
static const int8_t kMVRef[8] = {
-ZEROMV, 1,
-NEARESTMV, 2,
-NEARMV, 3,
-NEWMV, -SPLITMV
};
static const int8_t kMVRef4[6] = {
-LEFT4, 1,
-ABOVE4, 2,
-ZERO4, -NEW4
};
#endif
//------------------------------------------------------------------------------
// Default probabilities
// Inter
#ifndef ONLY_KEYFRAME_CODE
static const uint8_t kYModeProbaInter0[4] = { 112, 86, 140, 37 };
static const uint8_t kUVModeProbaInter0[3] = { 162, 101, 204 };
static const uint8_t kMVProba0[2][NUM_MV_PROBAS] = {
{ 162, 128, 225, 146, 172, 147, 214, 39,
156, 128, 129, 132, 75, 145, 178, 206,
239, 254, 254 },
{ 164, 128, 204, 170, 119, 235, 140, 230,
228, 128, 130, 130, 74, 148, 180, 203,
236, 254, 254 }
};
#endif
// Paragraph 13.5
static const uint8_t
CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = {
// genereated using vp8_default_coef_probs() in entropy.c:129
{ { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
@ -326,28 +275,25 @@ static const uint8_t kBModesProba[NUM_BMODES][NUM_BMODES][NUM_BMODES - 1] = {
void VP8ResetProba(VP8Proba* const proba) {
memset(proba->segments_, 255u, sizeof(proba->segments_));
memcpy(proba->coeffs_, CoeffsProba0, sizeof(CoeffsProba0));
#ifndef ONLY_KEYFRAME_CODE
memcpy(proba->mv_, kMVProba0, sizeof(kMVProba0));
memcpy(proba->ymode_, kYModeProbaInter0, sizeof(kYModeProbaInter0));
memcpy(proba->uvmode_, kUVModeProbaInter0, sizeof(kUVModeProbaInter0));
#endif
// proba->bands_[][] is initialized later
}
void VP8ParseIntraMode(VP8BitReader* const br, VP8Decoder* const dec) {
void VP8ParseIntraMode(VP8BitReader* const br, VP8Decoder* const dec) {
uint8_t* const top = dec->intra_t_ + 4 * dec->mb_x_;
uint8_t* const left = dec->intra_l_;
// Hardcoded 16x16 intra-mode decision tree.
dec->is_i4x4_ = !VP8GetBit(br, 145); // decide for B_PRED first
if (!dec->is_i4x4_) {
VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
block->is_i4x4_ = !VP8GetBit(br, 145); // decide for B_PRED first
if (!block->is_i4x4_) {
// Hardcoded 16x16 intra-mode decision tree.
const int ymode =
VP8GetBit(br, 156) ? (VP8GetBit(br, 128) ? TM_PRED : H_PRED)
: (VP8GetBit(br, 163) ? V_PRED : DC_PRED);
dec->imodes_[0] = ymode;
memset(top, ymode, 4 * sizeof(top[0]));
memset(left, ymode, 4 * sizeof(left[0]));
block->imodes_[0] = ymode;
memset(top, ymode, 4 * sizeof(*top));
memset(left, ymode, 4 * sizeof(*left));
} else {
uint8_t* modes = dec->imodes_;
uint8_t* modes = block->imodes_;
int y;
for (y = 0; y < 4; ++y) {
int ymode = left[y];
@ -356,10 +302,10 @@ void VP8ParseIntraMode(VP8BitReader* const br, VP8Decoder* const dec) {
const uint8_t* const prob = kBModesProba[top[x]][ymode];
#ifdef USE_GENERIC_TREE
// Generic tree-parsing
int i = 0;
do {
int i = kYModesIntra4[VP8GetBit(br, prob[0])];
while (i > 0) {
i = kYModesIntra4[2 * i + VP8GetBit(br, prob[i])];
} while (i > 0);
}
ymode = -i;
#else
// Hardcoded tree parsing
@ -374,15 +320,16 @@ void VP8ParseIntraMode(VP8BitReader* const br, VP8Decoder* const dec) {
(!VP8GetBit(br, prob[8]) ? B_HD_PRED : B_HU_PRED)));
#endif // USE_GENERIC_TREE
top[x] = ymode;
*modes++ = ymode;
}
memcpy(modes, top, 4 * sizeof(*top));
modes += 4;
left[y] = ymode;
}
}
// Hardcoded UVMode decision tree
dec->uvmode_ = !VP8GetBit(br, 142) ? DC_PRED
: !VP8GetBit(br, 114) ? V_PRED
: VP8GetBit(br, 183) ? TM_PRED : H_PRED;
block->uvmode_ = !VP8GetBit(br, 142) ? DC_PRED
: !VP8GetBit(br, 114) ? V_PRED
: VP8GetBit(br, 183) ? TM_PRED : H_PRED;
}
//------------------------------------------------------------------------------
@ -524,17 +471,6 @@ static const uint8_t
}
};
#ifndef ONLY_KEYFRAME_CODE
static const uint8_t MVUpdateProba[2][NUM_MV_PROBAS] = {
{ 237, 246, 253, 253, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 250, 250,
252, 254, 254 },
{ 231, 243, 245, 253, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 251, 251,
254, 254, 254 }
};
#endif
// Paragraph 9.9
void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
VP8Proba* const proba = &dec->proba_;
@ -543,9 +479,9 @@ void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
for (b = 0; b < NUM_BANDS; ++b) {
for (c = 0; c < NUM_CTX; ++c) {
for (p = 0; p < NUM_PROBAS; ++p) {
if (VP8GetBit(br, CoeffsUpdateProba[t][b][c][p])) {
proba->coeffs_[t][b][c][p] = VP8GetValue(br, 8);
}
const int v = VP8GetBit(br, CoeffsUpdateProba[t][b][c][p]) ?
VP8GetValue(br, 8) : CoeffsProba0[t][b][c][p];
proba->bands_[t][b].probas_[c][p] = v;
}
}
}
@ -554,36 +490,5 @@ void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) {
if (dec->use_skip_proba_) {
dec->skip_p_ = VP8GetValue(br, 8);
}
#ifndef ONLY_KEYFRAME_CODE
if (!dec->frm_hdr_.key_frame_) {
int i;
dec->intra_p_ = VP8GetValue(br, 8);
dec->last_p_ = VP8GetValue(br, 8);
dec->golden_p_ = VP8GetValue(br, 8);
if (VP8Get(br)) { // update y-mode
for (i = 0; i < 4; ++i) {
proba->ymode_[i] = VP8GetValue(br, 8);
}
}
if (VP8Get(br)) { // update uv-mode
for (i = 0; i < 3; ++i) {
proba->uvmode_[i] = VP8GetValue(br, 8);
}
}
// update MV
for (i = 0; i < 2; ++i) {
int k;
for (k = 0; k < NUM_MV_PROBAS; ++k) {
if (VP8GetBit(br, MVUpdateProba[i][k])) {
const int v = VP8GetValue(br, 7);
proba->mv_[i][k] = v ? v << 1 : 1;
}
}
}
}
#endif
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// main entry for the decoder
@ -11,15 +13,12 @@
#include <stdlib.h>
#include "./alphai.h"
#include "./vp8i.h"
#include "./vp8li.h"
#include "./webpi.h"
#include "../utils/bit_reader.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
int WebPGetDecoderVersion(void) {
@ -121,6 +120,9 @@ int VP8GetInfo(const uint8_t* data, size_t data_size, size_t chunk_size,
if (((bits >> 5)) >= chunk_size) { // partition_length
return 0; // inconsistent size information.
}
if (w == 0 || h == 0) {
return 0; // We don't support both width and height to be zero.
}
if (width) {
*width = w;
@ -247,7 +249,6 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
VP8PictureHeader* pic_hdr;
VP8BitReader* br;
VP8StatusCode status;
WebPHeaderStructure headers;
if (dec == NULL) {
return 0;
@ -257,33 +258,8 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
return VP8SetError(dec, VP8_STATUS_INVALID_PARAM,
"null VP8Io passed to VP8GetHeaders()");
}
// Process Pre-VP8 chunks.
headers.data = io->data;
headers.data_size = io->data_size;
status = WebPParseHeaders(&headers);
if (status != VP8_STATUS_OK) {
return VP8SetError(dec, status, "Incorrect/incomplete header.");
}
if (headers.is_lossless) {
return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
"Unexpected lossless format encountered.");
}
if (dec->alpha_data_ == NULL) {
assert(dec->alpha_data_size_ == 0);
// We have NOT set alpha data yet. Set it now.
// (This is to ensure that dec->alpha_data_ is NOT reset to NULL if
// WebPParseHeaders() is called more than once, as in incremental decoding
// case.)
dec->alpha_data_ = headers.alpha_data;
dec->alpha_data_size_ = headers.alpha_data_size;
}
// Process the VP8 frame header.
buf = headers.data + headers.offset;
buf_size = headers.data_size - headers.offset;
assert(headers.data_size >= headers.offset); // WebPParseHeaders' guarantee
buf = io->data;
buf_size = io->data_size;
if (buf_size < 4) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Truncated header.");
@ -379,38 +355,11 @@ int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) {
// Frame buffer marking
if (!frm_hdr->key_frame_) {
// Paragraph 9.7
#ifndef ONLY_KEYFRAME_CODE
dec->buffer_flags_ = VP8Get(br) << 0; // update golden
dec->buffer_flags_ |= VP8Get(br) << 1; // update alt ref
if (!(dec->buffer_flags_ & 1)) {
dec->buffer_flags_ |= VP8GetValue(br, 2) << 2;
}
if (!(dec->buffer_flags_ & 2)) {
dec->buffer_flags_ |= VP8GetValue(br, 2) << 4;
}
dec->buffer_flags_ |= VP8Get(br) << 6; // sign bias golden
dec->buffer_flags_ |= VP8Get(br) << 7; // sign bias alt ref
#else
return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
"Not a key frame.");
#endif
} else {
dec->buffer_flags_ = 0x003 | 0x100;
}
// Paragraph 9.8
#ifndef ONLY_KEYFRAME_CODE
dec->update_proba_ = VP8Get(br);
if (!dec->update_proba_) { // save for later restore
dec->proba_saved_ = dec->proba_;
}
dec->buffer_flags_ &= 1 << 8;
dec->buffer_flags_ |=
(frm_hdr->key_frame_ || VP8Get(br)) << 8; // refresh last frame
#else
VP8Get(br); // just ignore the value of update_proba_
#endif
VP8Get(br); // ignore the value of update_proba_
VP8ParseProba(br, dec);
@ -459,9 +408,6 @@ static const uint8_t kZigzag[16] = {
0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
};
typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS]; // for const-casting
typedef const uint8_t (*ProbaCtxArray)[NUM_PROBAS];
// See section 13-2: http://tools.ietf.org/html/rfc6386#section-13.2
static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) {
int v;
@ -495,19 +441,20 @@ static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) {
}
// Returns the position of the last non-zero coeff plus one
// (and 0 if there's no coeff at all)
static int GetCoeffs(VP8BitReader* const br, ProbaArray prob,
static int GetCoeffs(VP8BitReader* const br, const VP8BandProbas* const prob,
int ctx, const quant_t dq, int n, int16_t* out) {
// n is either 0 or 1 here. kBands[n] is not necessary for extracting '*p'.
const uint8_t* p = prob[n][ctx];
if (!VP8GetBit(br, p[0])) { // first EOB is more a 'CBP' bit.
return 0;
}
const uint8_t* p = prob[n].probas_[ctx];
for (; n < 16; ++n) {
const ProbaCtxArray p_ctx = prob[kBands[n + 1]];
if (!VP8GetBit(br, p[1])) {
p = p_ctx[0];
} else { // non zero coeff
if (!VP8GetBit(br, p[0])) {
return n; // previous coeff was last non-zero coeff
}
while (!VP8GetBit(br, p[1])) { // sequence of zero coeffs
p = prob[kBands[++n]].probas_[0];
if (n == 16) return 16;
}
{ // non zero coeff
const VP8ProbaArray* const p_ctx = &prob[kBands[n + 1]].probas_[0];
int v;
if (!VP8GetBit(br, p[2])) {
v = 1;
@ -517,115 +464,107 @@ static int GetCoeffs(VP8BitReader* const br, ProbaArray prob,
p = p_ctx[2];
}
out[kZigzag[n]] = VP8GetSigned(br, v) * dq[n > 0];
if (n < 15 && !VP8GetBit(br, p[0])) { // EOB
return n + 1;
}
}
}
return 16;
}
// Alias-safe way of converting 4bytes to 32bits.
typedef union {
uint8_t i8[4];
uint32_t i32;
} PackedNz;
static WEBP_INLINE uint32_t NzCodeBits(uint32_t nz_coeffs, int nz, int dc_nz) {
nz_coeffs <<= 2;
nz_coeffs |= (nz > 3) ? 3 : (nz > 1) ? 2 : dc_nz;
return nz_coeffs;
}
// Table to unpack four bits into four bytes
static const PackedNz kUnpackTab[16] = {
{{0, 0, 0, 0}}, {{1, 0, 0, 0}}, {{0, 1, 0, 0}}, {{1, 1, 0, 0}},
{{0, 0, 1, 0}}, {{1, 0, 1, 0}}, {{0, 1, 1, 0}}, {{1, 1, 1, 0}},
{{0, 0, 0, 1}}, {{1, 0, 0, 1}}, {{0, 1, 0, 1}}, {{1, 1, 0, 1}},
{{0, 0, 1, 1}}, {{1, 0, 1, 1}}, {{0, 1, 1, 1}}, {{1, 1, 1, 1}} };
// Macro to pack four LSB of four bytes into four bits.
#if defined(__PPC__) || defined(_M_PPC) || defined(_ARCH_PPC) || \
defined(__BIG_ENDIAN__)
#define PACK_CST 0x08040201U
#else
#define PACK_CST 0x01020408U
#endif
#define PACK(X, S) ((((X).i32 * PACK_CST) & 0xff000000) >> (S))
static void ParseResiduals(VP8Decoder* const dec,
VP8MB* const mb, VP8BitReader* const token_br) {
int out_t_nz, out_l_nz, first;
ProbaArray ac_prob;
const VP8QuantMatrix* q = &dec->dqm_[dec->segment_];
int16_t* dst = dec->coeffs_;
static int ParseResiduals(VP8Decoder* const dec,
VP8MB* const mb, VP8BitReader* const token_br) {
VP8BandProbas (* const bands)[NUM_BANDS] = dec->proba_.bands_;
const VP8BandProbas* ac_proba;
const VP8QuantMatrix* const q = &dec->dqm_[dec->segment_];
VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
int16_t* dst = block->coeffs_;
VP8MB* const left_mb = dec->mb_info_ - 1;
PackedNz nz_ac, nz_dc;
PackedNz tnz, lnz;
uint32_t non_zero_ac = 0;
uint32_t non_zero_dc = 0;
uint8_t tnz, lnz;
uint32_t non_zero_y = 0;
uint32_t non_zero_uv = 0;
int x, y, ch;
uint32_t out_t_nz, out_l_nz;
int first;
nz_dc.i32 = nz_ac.i32 = 0;
memset(dst, 0, 384 * sizeof(*dst));
if (!dec->is_i4x4_) { // parse DC
if (!block->is_i4x4_) { // parse DC
int16_t dc[16] = { 0 };
const int ctx = mb->dc_nz_ + left_mb->dc_nz_;
mb->dc_nz_ = left_mb->dc_nz_ =
(GetCoeffs(token_br, (ProbaArray)dec->proba_.coeffs_[1],
ctx, q->y2_mat_, 0, dc) > 0);
const int ctx = mb->nz_dc_ + left_mb->nz_dc_;
const int nz = GetCoeffs(token_br, bands[1], ctx, q->y2_mat_, 0, dc);
mb->nz_dc_ = left_mb->nz_dc_ = (nz > 0);
if (nz > 1) { // more than just the DC -> perform the full transform
VP8TransformWHT(dc, dst);
} else { // only DC is non-zero -> inlined simplified transform
int i;
const int dc0 = (dc[0] + 3) >> 3;
for (i = 0; i < 16 * 16; i += 16) dst[i] = dc0;
}
first = 1;
ac_prob = (ProbaArray)dec->proba_.coeffs_[0];
VP8TransformWHT(dc, dst);
ac_proba = bands[0];
} else {
first = 0;
ac_prob = (ProbaArray)dec->proba_.coeffs_[3];
ac_proba = bands[3];
}
tnz = kUnpackTab[mb->nz_ & 0xf];
lnz = kUnpackTab[left_mb->nz_ & 0xf];
tnz = mb->nz_ & 0x0f;
lnz = left_mb->nz_ & 0x0f;
for (y = 0; y < 4; ++y) {
int l = lnz.i8[y];
int l = lnz & 1;
uint32_t nz_coeffs = 0;
for (x = 0; x < 4; ++x) {
const int ctx = l + tnz.i8[x];
const int nz = GetCoeffs(token_br, ac_prob, ctx,
q->y1_mat_, first, dst);
tnz.i8[x] = l = (nz > 0);
nz_dc.i8[x] = (dst[0] != 0);
nz_ac.i8[x] = (nz > 1);
const int ctx = l + (tnz & 1);
const int nz = GetCoeffs(token_br, ac_proba, ctx, q->y1_mat_, first, dst);
l = (nz > first);
tnz = (tnz >> 1) | (l << 7);
nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0);
dst += 16;
}
lnz.i8[y] = l;
non_zero_dc |= PACK(nz_dc, 24 - y * 4);
non_zero_ac |= PACK(nz_ac, 24 - y * 4);
tnz >>= 4;
lnz = (lnz >> 1) | (l << 7);
non_zero_y = (non_zero_y << 8) | nz_coeffs;
}
out_t_nz = PACK(tnz, 24);
out_l_nz = PACK(lnz, 24);
out_t_nz = tnz;
out_l_nz = lnz >> 4;
tnz = kUnpackTab[mb->nz_ >> 4];
lnz = kUnpackTab[left_mb->nz_ >> 4];
for (ch = 0; ch < 4; ch += 2) {
uint32_t nz_coeffs = 0;
tnz = mb->nz_ >> (4 + ch);
lnz = left_mb->nz_ >> (4 + ch);
for (y = 0; y < 2; ++y) {
int l = lnz.i8[ch + y];
int l = lnz & 1;
for (x = 0; x < 2; ++x) {
const int ctx = l + tnz.i8[ch + x];
const int nz =
GetCoeffs(token_br, (ProbaArray)dec->proba_.coeffs_[2],
ctx, q->uv_mat_, 0, dst);
tnz.i8[ch + x] = l = (nz > 0);
nz_dc.i8[y * 2 + x] = (dst[0] != 0);
nz_ac.i8[y * 2 + x] = (nz > 1);
const int ctx = l + (tnz & 1);
const int nz = GetCoeffs(token_br, bands[2], ctx, q->uv_mat_, 0, dst);
l = (nz > 0);
tnz = (tnz >> 1) | (l << 3);
nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0);
dst += 16;
}
lnz.i8[ch + y] = l;
tnz >>= 2;
lnz = (lnz >> 1) | (l << 5);
}
non_zero_dc |= PACK(nz_dc, 8 - ch * 2);
non_zero_ac |= PACK(nz_ac, 8 - ch * 2);
// Note: we don't really need the per-4x4 details for U/V blocks.
non_zero_uv |= nz_coeffs << (4 * ch);
out_t_nz |= (tnz << 4) << ch;
out_l_nz |= (lnz & 0xf0) << ch;
}
out_t_nz |= PACK(tnz, 20);
out_l_nz |= PACK(lnz, 20);
mb->nz_ = out_t_nz;
left_mb->nz_ = out_l_nz;
dec->non_zero_ac_ = non_zero_ac;
dec->non_zero_ = non_zero_ac | non_zero_dc;
mb->skip_ = !dec->non_zero_;
block->non_zero_y_ = non_zero_y;
block->non_zero_uv_ = non_zero_uv;
// We look at the mode-code of each block and check if some blocks have less
// than three non-zero coeffs (code < 2). This is to avoid dithering flat and
// empty blocks.
block->dither_ = (non_zero_uv & 0xaaaa) ? 0 : q->dither_;
return !(non_zero_y | non_zero_uv); // will be used for further optimization
}
#undef PACK
//------------------------------------------------------------------------------
// Main loop
@ -633,7 +572,9 @@ static void ParseResiduals(VP8Decoder* const dec,
int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) {
VP8BitReader* const br = &dec->br_;
VP8MB* const left = dec->mb_info_ - 1;
VP8MB* const info = dec->mb_info_ + dec->mb_x_;
VP8MB* const mb = dec->mb_info_ + dec->mb_x_;
VP8MBData* const block = dec->mb_data_ + dec->mb_x_;
int skip;
// Note: we don't save segment map (yet), as we don't expect
// to decode more than 1 keyframe.
@ -643,71 +584,64 @@ int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) {
VP8GetBit(br, dec->proba_.segments_[1]) :
2 + VP8GetBit(br, dec->proba_.segments_[2]);
}
info->skip_ = dec->use_skip_proba_ ? VP8GetBit(br, dec->skip_p_) : 0;
skip = dec->use_skip_proba_ ? VP8GetBit(br, dec->skip_p_) : 0;
VP8ParseIntraMode(br, dec);
if (br->eof_) {
return 0;
}
if (!info->skip_) {
ParseResiduals(dec, info, token_br);
if (!skip) {
skip = ParseResiduals(dec, mb, token_br);
} else {
left->nz_ = info->nz_ = 0;
if (!dec->is_i4x4_) {
left->dc_nz_ = info->dc_nz_ = 0;
left->nz_ = mb->nz_ = 0;
if (!block->is_i4x4_) {
left->nz_dc_ = mb->nz_dc_ = 0;
}
dec->non_zero_ = 0;
dec->non_zero_ac_ = 0;
block->non_zero_y_ = 0;
block->non_zero_uv_ = 0;
}
if (dec->filter_type_ > 0) { // store filter info
VP8FInfo* const finfo = dec->f_info_ + dec->mb_x_;
*finfo = dec->fstrengths_[dec->segment_][dec->is_i4x4_];
finfo->f_inner_ = (!info->skip_ || dec->is_i4x4_);
*finfo = dec->fstrengths_[dec->segment_][block->is_i4x4_];
finfo->f_inner_ |= !skip;
}
return (!token_br->eof_);
return !token_br->eof_;
}
void VP8InitScanline(VP8Decoder* const dec) {
VP8MB* const left = dec->mb_info_ - 1;
left->nz_ = 0;
left->dc_nz_ = 0;
left->nz_dc_ = 0;
memset(dec->intra_l_, B_DC_PRED, sizeof(dec->intra_l_));
dec->filter_row_ =
(dec->filter_type_ > 0) &&
(dec->mb_y_ >= dec->tl_mb_y_) && (dec->mb_y_ <= dec->br_mb_y_);
dec->mb_x_ = 0;
}
static int ParseFrame(VP8Decoder* const dec, VP8Io* io) {
for (dec->mb_y_ = 0; dec->mb_y_ < dec->br_mb_y_; ++dec->mb_y_) {
// Parse bitstream for this row.
VP8BitReader* const token_br =
&dec->parts_[dec->mb_y_ & (dec->num_parts_ - 1)];
VP8InitScanline(dec);
for (dec->mb_x_ = 0; dec->mb_x_ < dec->mb_w_; dec->mb_x_++) {
for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) {
if (!VP8DecodeMB(dec, token_br)) {
return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
"Premature end-of-file encountered.");
}
// Reconstruct and emit samples.
VP8ReconstructBlock(dec);
}
VP8InitScanline(dec); // Prepare for next scanline
// Reconstruct, filter and emit the row.
if (!VP8ProcessRow(dec, io)) {
return VP8SetError(dec, VP8_STATUS_USER_ABORT, "Output aborted.");
}
}
if (dec->use_threads_ && !WebPWorkerSync(&dec->worker_)) {
return 0;
if (dec->mt_method_ > 0) {
if (!WebPWorkerSync(&dec->worker_)) return 0;
}
// Finish
#ifndef ONLY_KEYFRAME_CODE
if (!dec->update_proba_) {
dec->proba_ = dec->proba_saved_;
}
#endif
#ifdef WEBP_EXPERIMENTAL_FEATURES
if (dec->layer_data_size_ > 0) {
if (!VP8DecodeLayer(dec)) {
@ -763,12 +697,12 @@ void VP8Clear(VP8Decoder* const dec) {
if (dec == NULL) {
return;
}
if (dec->use_threads_) {
if (dec->mt_method_ > 0) {
WebPWorkerEnd(&dec->worker_);
}
if (dec->mem_) {
free(dec->mem_);
}
ALPHDelete(dec->alph_dec_);
dec->alph_dec_ = NULL;
free(dec->mem_);
dec->mem_ = NULL;
dec->mem_size_ = 0;
memset(&dec->br_, 0, sizeof(dec->br_));
@ -777,6 +711,3 @@ void VP8Clear(VP8Decoder* const dec) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// VP8 decoder: internal header.
@ -15,10 +17,11 @@
#include <string.h> // for memcpy()
#include "./vp8li.h"
#include "../utils/bit_reader.h"
#include "../utils/random.h"
#include "../utils/thread.h"
#include "../dsp/dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -27,11 +30,9 @@ extern "C" {
// version numbers
#define DEC_MAJ_VERSION 0
#define DEC_MIN_VERSION 3
#define DEC_MIN_VERSION 4
#define DEC_REV_VERSION 0
#define ONLY_KEYFRAME_CODE // to remove any code related to P-Frames
// intra prediction modes
enum { B_DC_PRED = 0, // 4x4 modes
B_TM_PRED,
@ -98,6 +99,9 @@ enum { MB_FEATURE_TREE_PROBS = 3,
#define U_OFF (Y_OFF + BPS * 16 + BPS)
#define V_OFF (U_OFF + 16)
// minimal width under which lossy multi-threading is always disabled
#define MIN_WIDTH_FOR_THREADS 512
//------------------------------------------------------------------------------
// Headers
@ -126,15 +130,19 @@ typedef struct {
int8_t filter_strength_[NUM_MB_SEGMENTS]; // filter strength for segments
} VP8SegmentHeader;
// probas associated to one of the contexts
typedef uint8_t VP8ProbaArray[NUM_PROBAS];
typedef struct { // all the probas associated to one band
VP8ProbaArray probas_[NUM_CTX];
} VP8BandProbas;
// Struct collecting all frame-persistent probabilities.
typedef struct {
uint8_t segments_[MB_FEATURE_TREE_PROBS];
// Type: 0:Intra16-AC 1:Intra16-DC 2:Chroma 3:Intra4
uint8_t coeffs_[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS];
#ifndef ONLY_KEYFRAME_CODE
uint8_t ymode_[4], uvmode_[3];
uint8_t mv_[2][NUM_MV_PROBAS];
#endif
VP8BandProbas bands_[NUM_TYPES][NUM_BANDS];
} VP8Proba;
// Filter parameters
@ -151,32 +159,59 @@ typedef struct {
// Informations about the macroblocks.
typedef struct { // filter specs
unsigned int f_level_:6; // filter strength: 0..63
unsigned int f_ilevel_:6; // inner limit: 1..63
unsigned int f_inner_:1; // do inner filtering?
uint8_t f_limit_; // filter limit in [3..189], or 0 if no filtering
uint8_t f_ilevel_; // inner limit in [1..63]
uint8_t f_inner_; // do inner filtering?
uint8_t hev_thresh_; // high edge variance threshold in [0..2]
} VP8FInfo;
typedef struct { // used for syntax-parsing
unsigned int nz_:24; // non-zero AC/DC coeffs (24bit)
unsigned int dc_nz_:1; // non-zero DC coeffs
unsigned int skip_:1; // block type
typedef struct { // Top/Left Contexts used for syntax-parsing
uint8_t nz_; // non-zero AC/DC coeffs (4bit for luma + 4bit for chroma)
uint8_t nz_dc_; // non-zero DC coeff (1bit)
} VP8MB;
// Dequantization matrices
typedef int quant_t[2]; // [DC / AC]. Can be 'uint16_t[2]' too (~slower).
typedef struct {
quant_t y1_mat_, y2_mat_, uv_mat_;
int uv_quant_; // U/V quantizer value
int dither_; // dithering amplitude (0 = off, max=255)
} VP8QuantMatrix;
// Data needed to reconstruct a macroblock
typedef struct {
int16_t coeffs_[384]; // 384 coeffs = (16+4+4) * 4*4
uint8_t is_i4x4_; // true if intra4x4
uint8_t imodes_[16]; // one 16x16 mode (#0) or sixteen 4x4 modes
uint8_t uvmode_; // chroma prediction mode
// bit-wise info about the content of each sub-4x4 blocks (in decoding order).
// Each of the 4x4 blocks for y/u/v is associated with a 2b code according to:
// code=0 -> no coefficient
// code=1 -> only DC
// code=2 -> first three coefficients are non-zero
// code=3 -> more than three coefficients are non-zero
// This allows to call specialized transform functions.
uint32_t non_zero_y_;
uint32_t non_zero_uv_;
uint8_t dither_; // local dithering strength (deduced from non_zero_*)
} VP8MBData;
// Persistent information needed by the parallel processing
typedef struct {
int id_; // cache row to process (in [0..2])
int mb_y_; // macroblock position of the row
int filter_row_; // true if row-filtering is needed
VP8FInfo* f_info_; // filter strengths
VP8Io io_; // copy of the VP8Io to pass to put()
int id_; // cache row to process (in [0..2])
int mb_y_; // macroblock position of the row
int filter_row_; // true if row-filtering is needed
VP8FInfo* f_info_; // filter strengths (swapped with dec->f_info_)
VP8MBData* mb_data_; // reconstruction data (swapped with dec->mb_data_)
VP8Io io_; // copy of the VP8Io to pass to put()
} VP8ThreadContext;
// Saved top samples, per macroblock. Fits into a cache-line.
typedef struct {
uint8_t y[16], u[8], v[8];
} VP8TopSamples;
//------------------------------------------------------------------------------
// VP8Decoder: the main opaque structure handed over to user
@ -196,7 +231,8 @@ struct VP8Decoder {
// Worker
WebPWorker worker_;
int use_threads_; // use multi-thread
int mt_method_; // multi-thread method: 0=off, 1=[parse+recon][filter]
// 2=[parse][recon+filter]
int cache_id_; // current cache row
int num_caches_; // number of cached rows of 16 pixels (1, 2 or 3)
VP8ThreadContext thread_ctx_; // Thread context
@ -213,12 +249,9 @@ struct VP8Decoder {
// per-partition boolean decoders.
VP8BitReader parts_[MAX_NUM_PARTITIONS];
// buffer refresh flags
// bit 0: refresh Gold, bit 1: refresh Alt
// bit 2-3: copy to Gold, bit 4-5: copy to Alt
// bit 6: Gold sign bias, bit 7: Alt sign bias
// bit 8: refresh last frame
uint32_t buffer_flags_;
// Dithering strength, deduced from decoding options
int dither_; // whether to use dithering or not
VP8Random dithering_rg_; // random generator for dithering
// dequantization (one set of DC/AC dequant factor per segment)
VP8QuantMatrix dqm_[NUM_MB_SEGMENTS];
@ -227,24 +260,19 @@ struct VP8Decoder {
VP8Proba proba_;
int use_skip_proba_;
uint8_t skip_p_;
#ifndef ONLY_KEYFRAME_CODE
uint8_t intra_p_, last_p_, golden_p_;
VP8Proba proba_saved_;
int update_proba_;
#endif
// Boundary data cache and persistent buffers.
uint8_t* intra_t_; // top intra modes values: 4 * mb_w_
uint8_t intra_l_[4]; // left intra modes values
uint8_t* y_t_; // top luma samples: 16 * mb_w_
uint8_t* u_t_, *v_t_; // top u/v samples: 8 * mb_w_ each
uint8_t* intra_t_; // top intra modes values: 4 * mb_w_
uint8_t intra_l_[4]; // left intra modes values
VP8MB* mb_info_; // contextual macroblock info (mb_w_ + 1)
VP8FInfo* f_info_; // filter strength info
uint8_t* yuv_b_; // main block for Y/U/V (size = YUV_SIZE)
int16_t* coeffs_; // 384 coeffs = (16+8+8) * 4*4
uint8_t segment_; // segment of the currently parsed block
VP8TopSamples* yuv_t_; // top y/u/v samples
uint8_t* cache_y_; // macroblock row for storing unfiltered samples
VP8MB* mb_info_; // contextual macroblock info (mb_w_ + 1)
VP8FInfo* f_info_; // filter strength info
uint8_t* yuv_b_; // main block for Y/U/V (size = YUV_SIZE)
uint8_t* cache_y_; // macroblock row for storing unfiltered samples
uint8_t* cache_u_;
uint8_t* cache_v_;
int cache_y_stride_;
@ -256,28 +284,20 @@ struct VP8Decoder {
// Per macroblock non-persistent infos.
int mb_x_, mb_y_; // current position, in macroblock units
uint8_t is_i4x4_; // true if intra4x4
uint8_t imodes_[16]; // one 16x16 mode (#0) or sixteen 4x4 modes
uint8_t uvmode_; // chroma prediction mode
uint8_t segment_; // block's segment
// bit-wise info about the content of each sub-4x4 blocks: there are 16 bits
// for luma (bits #0->#15), then 4 bits for chroma-u (#16->#19) and 4 bits for
// chroma-v (#20->#23), each corresponding to one 4x4 block in decoding order.
// If the bit is set, the 4x4 block contains some non-zero coefficients.
uint32_t non_zero_;
uint32_t non_zero_ac_;
VP8MBData* mb_data_; // parsed reconstruction data
// Filtering side-info
int filter_type_; // 0=off, 1=simple, 2=complex
int filter_row_; // per-row flag
VP8FInfo fstrengths_[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type
// extensions
const uint8_t* alpha_data_; // compressed alpha data (if present)
// Alpha
struct ALPHDecoder* alph_dec_; // alpha-plane decoder object
const uint8_t* alpha_data_; // compressed alpha data (if present)
size_t alpha_data_size_;
int is_alpha_decoded_; // true if alpha_data_ is decoded in alpha_plane_
uint8_t* alpha_plane_; // output. Persistent, contains the whole data.
// extensions
int layer_colorspace_;
const uint8_t* layer_data_; // compressed layer data (if present)
size_t layer_data_size_;
@ -300,8 +320,6 @@ void VP8ParseQuant(VP8Decoder* const dec);
// in frame.c
int VP8InitFrame(VP8Decoder* const dec, VP8Io* io);
// Predict a block and add residual
void VP8ReconstructBlock(VP8Decoder* const dec);
// Call io->setup() and finish setting up scan parameters.
// After this call returns, one must always call VP8ExitCritical() with the
// same parameters. Both functions should be used in pair. Returns VP8_STATUS_OK
@ -310,7 +328,15 @@ VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io);
// Must always be called in pair with VP8EnterCritical().
// Returns false in case of error.
int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io);
// Process the last decoded row (filtering + output)
// Return the multi-threading method to use (0=off), depending
// on options and bitstream size. Only for lossy decoding.
int VP8GetThreadMethod(const WebPDecoderOptions* const options,
const WebPHeaderStructure* const headers,
int width, int height);
// Initialize dithering post-process if needed.
void VP8InitDithering(const WebPDecoderOptions* const options,
VP8Decoder* const dec);
// Process the last decoded row (filtering + output).
int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io);
// To be called at the start of a new scanline, to initialize predictors.
void VP8InitScanline(VP8Decoder* const dec);
@ -326,7 +352,7 @@ int VP8DecodeLayer(VP8Decoder* const dec);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// main entry for the decoder
@ -12,16 +14,14 @@
#include <stdio.h>
#include <stdlib.h>
#include "./alphai.h"
#include "./vp8li.h"
#include "../dsp/lossless.h"
#include "../dsp/yuv.h"
#include "../utils/alpha_processing.h"
#include "../utils/huffman.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define NUM_ARGB_CACHE_ROWS 16
static const int kCodeLengthLiterals = 16;
@ -57,7 +57,7 @@ static const uint8_t kCodeLengthCodeOrder[NUM_CODE_LENGTH_CODES] = {
};
#define CODE_TO_PLANE_CODES 120
static const uint8_t code_to_plane_lut[CODE_TO_PLANE_CODES] = {
static const uint8_t kCodeToPlane[CODE_TO_PLANE_CODES] = {
0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a,
0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a,
0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b,
@ -80,20 +80,19 @@ static int DecodeImageStream(int xsize, int ysize,
//------------------------------------------------------------------------------
int VP8LCheckSignature(const uint8_t* const data, size_t size) {
return (size >= 1) && (data[0] == VP8L_MAGIC_BYTE);
return (size >= VP8L_FRAME_HEADER_SIZE &&
data[0] == VP8L_MAGIC_BYTE &&
(data[4] >> 5) == 0); // version
}
static int ReadImageInfo(VP8LBitReader* const br,
int* const width, int* const height,
int* const has_alpha) {
const uint8_t signature = VP8LReadBits(br, 8);
if (!VP8LCheckSignature(&signature, 1)) {
return 0;
}
if (VP8LReadBits(br, 8) != VP8L_MAGIC_BYTE) return 0;
*width = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1;
*height = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1;
*has_alpha = VP8LReadBits(br, 1);
VP8LReadBits(br, VP8L_VERSION_BITS); // Read/ignore the version number.
if (VP8LReadBits(br, VP8L_VERSION_BITS) != 0) return 0;
return 1;
}
@ -101,6 +100,8 @@ int VP8LGetInfo(const uint8_t* data, size_t data_size,
int* const width, int* const height, int* const has_alpha) {
if (data == NULL || data_size < VP8L_FRAME_HEADER_SIZE) {
return 0; // not enough data
} else if (!VP8LCheckSignature(data, data_size)) {
return 0; // bad signature
} else {
int w, h, a;
VP8LBitReader br;
@ -138,11 +139,11 @@ static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) {
if (plane_code > CODE_TO_PLANE_CODES) {
return plane_code - CODE_TO_PLANE_CODES;
} else {
const int dist_code = code_to_plane_lut[plane_code - 1];
const int dist_code = kCodeToPlane[plane_code - 1];
const int yoffset = dist_code >> 4;
const int xoffset = 8 - (dist_code & 0xf);
const int dist = yoffset * xsize + xoffset;
return (dist >= 1) ? dist : 1;
return (dist >= 1) ? dist : 1; // dist<1 can happen if xsize is very small
}
}
@ -153,15 +154,27 @@ static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) {
static WEBP_INLINE int ReadSymbol(const HuffmanTree* tree,
VP8LBitReader* const br) {
const HuffmanTreeNode* node = tree->root_;
int num_bits = 0;
uint32_t bits = VP8LPrefetchBits(br);
int bitpos = br->bit_pos_;
// Check if we find the bit combination from the Huffman lookup table.
const int lut_ix = bits & (HUFF_LUT - 1);
const int lut_bits = tree->lut_bits_[lut_ix];
if (lut_bits <= HUFF_LUT_BITS) {
VP8LSetBitPos(br, bitpos + lut_bits);
return tree->lut_symbol_[lut_ix];
}
node += tree->lut_jump_[lut_ix];
bitpos += HUFF_LUT_BITS;
bits >>= HUFF_LUT_BITS;
// Decode the value from a binary tree.
assert(node != NULL);
while (!HuffmanTreeNodeIsLeaf(node)) {
do {
node = HuffmanTreeNextNode(node, bits & 1);
bits >>= 1;
++num_bits;
}
VP8LDiscardBits(br, num_bits);
++bitpos;
} while (HuffmanTreeNodeIsNotLeaf(node));
VP8LSetBitPos(br, bitpos);
return node->symbol_;
}
@ -402,12 +415,13 @@ static int AllocateAndInitRescaler(VP8LDecoder* const dec, VP8Io* const io) {
// We have special "export" function since we need to convert from BGRA
static int Export(WebPRescaler* const rescaler, WEBP_CSP_MODE colorspace,
int rgba_stride, uint8_t* const rgba) {
const uint32_t* const src = (const uint32_t*)rescaler->dst;
uint32_t* const src = (uint32_t*)rescaler->dst;
const int dst_width = rescaler->dst_width;
int num_lines_out = 0;
while (WebPRescalerHasPendingOutput(rescaler)) {
uint8_t* const dst = rgba + num_lines_out * rgba_stride;
WebPRescalerExportRow(rescaler);
WebPMultARGBRow(src, dst_width, 1);
VP8LConvertFromBGRA(src, dst_width, colorspace, dst);
++num_lines_out;
}
@ -415,18 +429,22 @@ static int Export(WebPRescaler* const rescaler, WEBP_CSP_MODE colorspace,
}
// Emit scaled rows.
static int EmitRescaledRows(const VP8LDecoder* const dec,
const uint32_t* const data, int in_stride, int mb_h,
uint8_t* const out, int out_stride) {
static int EmitRescaledRowsRGBA(const VP8LDecoder* const dec,
uint8_t* in, int in_stride, int mb_h,
uint8_t* const out, int out_stride) {
const WEBP_CSP_MODE colorspace = dec->output_->colorspace;
const uint8_t* const in = (const uint8_t*)data;
int num_lines_in = 0;
int num_lines_out = 0;
while (num_lines_in < mb_h) {
const uint8_t* const row_in = in + num_lines_in * in_stride;
uint8_t* const row_in = in + num_lines_in * in_stride;
uint8_t* const row_out = out + num_lines_out * out_stride;
num_lines_in += WebPRescalerImport(dec->rescaler, mb_h - num_lines_in,
row_in, in_stride);
const int lines_left = mb_h - num_lines_in;
const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left);
assert(needed_lines > 0 && needed_lines <= lines_left);
WebPMultARGBRows(row_in, in_stride,
dec->rescaler->src_width, needed_lines, 0);
WebPRescalerImport(dec->rescaler, lines_left, row_in, in_stride);
num_lines_in += needed_lines;
num_lines_out += Export(dec->rescaler, colorspace, out_stride, row_out);
}
return num_lines_out;
@ -434,11 +452,10 @@ static int EmitRescaledRows(const VP8LDecoder* const dec,
// Emit rows without any scaling.
static int EmitRows(WEBP_CSP_MODE colorspace,
const uint32_t* const data, int in_stride,
const uint8_t* row_in, int in_stride,
int mb_w, int mb_h,
uint8_t* const out, int out_stride) {
int lines = mb_h;
const uint8_t* row_in = (const uint8_t*)data;
uint8_t* row_out = out;
while (lines-- > 0) {
VP8LConvertFromBGRA((const uint32_t*)row_in, mb_w, colorspace, row_out);
@ -460,7 +477,8 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
uint8_t* const y = buf->y + y_pos * buf->y_stride;
for (i = 0; i < width; ++i) {
const uint32_t p = src[i];
y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff);
y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff,
YUV_HALF);
}
}
@ -479,11 +497,11 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
const int g = ((v0 >> 7) & 0x1fe) + ((v1 >> 7) & 0x1fe);
const int b = ((v0 << 1) & 0x1fe) + ((v1 << 1) & 0x1fe);
if (!(y_pos & 1)) { // even lines: store values
u[i] = VP8RGBToU(r, g, b);
v[i] = VP8RGBToV(r, g, b);
u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
} else { // odd lines: average with previous values
const int tmp_u = VP8RGBToU(r, g, b);
const int tmp_v = VP8RGBToV(r, g, b);
const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2);
const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2);
// Approximated average-of-four. But it's an acceptable diff.
u[i] = (u[i] + tmp_u + 1) >> 1;
v[i] = (v[i] + tmp_v + 1) >> 1;
@ -495,11 +513,11 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
const int g = (v0 >> 6) & 0x3fc;
const int b = (v0 << 2) & 0x3fc;
if (!(y_pos & 1)) { // even lines
u[i] = VP8RGBToU(r, g, b);
v[i] = VP8RGBToV(r, g, b);
u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2);
v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2);
} else { // odd lines (note: we could just skip this)
const int tmp_u = VP8RGBToU(r, g, b);
const int tmp_v = VP8RGBToV(r, g, b);
const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2);
const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2);
u[i] = (u[i] + tmp_u + 1) >> 1;
v[i] = (v[i] + tmp_v + 1) >> 1;
}
@ -515,11 +533,12 @@ static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos,
static int ExportYUVA(const VP8LDecoder* const dec, int y_pos) {
WebPRescaler* const rescaler = dec->rescaler;
const uint32_t* const src = (const uint32_t*)rescaler->dst;
uint32_t* const src = (uint32_t*)rescaler->dst;
const int dst_width = rescaler->dst_width;
int num_lines_out = 0;
while (WebPRescalerHasPendingOutput(rescaler)) {
WebPRescalerExportRow(rescaler);
WebPMultARGBRow(src, dst_width, 1);
ConvertToYUVA(src, dst_width, y_pos, dec->output_);
++y_pos;
++num_lines_out;
@ -528,28 +547,28 @@ static int ExportYUVA(const VP8LDecoder* const dec, int y_pos) {
}
static int EmitRescaledRowsYUVA(const VP8LDecoder* const dec,
const uint32_t* const data,
int in_stride, int mb_h) {
const uint8_t* const in = (const uint8_t*)data;
uint8_t* in, int in_stride, int mb_h) {
int num_lines_in = 0;
int y_pos = dec->last_out_row_;
while (num_lines_in < mb_h) {
const uint8_t* const row_in = in + num_lines_in * in_stride;
num_lines_in += WebPRescalerImport(dec->rescaler, mb_h - num_lines_in,
row_in, in_stride);
const int lines_left = mb_h - num_lines_in;
const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left);
WebPMultARGBRows(in, in_stride, dec->rescaler->src_width, needed_lines, 0);
WebPRescalerImport(dec->rescaler, lines_left, in, in_stride);
num_lines_in += needed_lines;
in += needed_lines * in_stride;
y_pos += ExportYUVA(dec, y_pos);
}
return y_pos;
}
static int EmitRowsYUVA(const VP8LDecoder* const dec,
const uint32_t* const data, int in_stride,
const uint8_t* in, int in_stride,
int mb_w, int num_rows) {
int y_pos = dec->last_out_row_;
const uint8_t* row_in = (const uint8_t*)data;
while (num_rows-- > 0) {
ConvertToYUVA((const uint32_t*)row_in, mb_w, y_pos, dec->output_);
row_in += in_stride;
ConvertToYUVA((const uint32_t*)in, mb_w, y_pos, dec->output_);
in += in_stride;
++y_pos;
}
return y_pos;
@ -560,11 +579,11 @@ static int EmitRowsYUVA(const VP8LDecoder* const dec,
// Sets io->mb_y, io->mb_h & io->mb_w according to start row, end row and
// crop options. Also updates the input data pointer, so that it points to the
// start of the cropped window.
// Note that 'pixel_stride' is in units of 'uint32_t' (and not 'bytes).
// start of the cropped window. Note that pixels are in ARGB format even if
// 'in_data' is uint8_t*.
// Returns true if the crop window is not empty.
static int SetCropWindow(VP8Io* const io, int y_start, int y_end,
const uint32_t** const in_data, int pixel_stride) {
uint8_t** const in_data, int pixel_stride) {
assert(y_start < y_end);
assert(io->crop_left < io->crop_right);
if (y_end > io->crop_bottom) {
@ -573,11 +592,11 @@ static int SetCropWindow(VP8Io* const io, int y_start, int y_end,
if (y_start < io->crop_top) {
const int delta = io->crop_top - y_start;
y_start = io->crop_top;
*in_data += pixel_stride * delta;
*in_data += delta * pixel_stride;
}
if (y_start >= y_end) return 0; // Crop window is empty.
*in_data += io->crop_left;
*in_data += io->crop_left * sizeof(uint32_t);
io->mb_y = y_start - io->crop_top;
io->mb_w = io->crop_right - io->crop_left;
@ -625,10 +644,24 @@ static void ApplyInverseTransforms(VP8LDecoder* const dec, int num_rows,
}
}
// Special method for paletted alpha data.
static void ApplyInverseTransformsAlpha(VP8LDecoder* const dec, int num_rows,
const uint8_t* const rows) {
const int start_row = dec->last_row_;
const int end_row = start_row + num_rows;
const uint8_t* rows_in = rows;
uint8_t* rows_out = (uint8_t*)dec->io_->opaque + dec->io_->width * start_row;
VP8LTransform* const transform = &dec->transforms_[0];
assert(dec->next_transform_ == 1);
assert(transform->type_ == COLOR_INDEXING_TRANSFORM);
VP8LColorIndexInverseTransformAlpha(transform, start_row, end_row, rows_in,
rows_out);
}
// Processes (transforms, scales & color-converts) the rows decoded after the
// last call.
static void ProcessRows(VP8LDecoder* const dec, int row) {
const uint32_t* const rows = dec->argb_ + dec->width_ * dec->last_row_;
const uint32_t* const rows = dec->pixels_ + dec->width_ * dec->last_row_;
const int num_rows = row - dec->last_row_;
if (num_rows <= 0) return; // Nothing to be done.
@ -637,18 +670,18 @@ static void ProcessRows(VP8LDecoder* const dec, int row) {
// Emit output.
{
VP8Io* const io = dec->io_;
const uint32_t* rows_data = dec->argb_cache_;
if (!SetCropWindow(io, dec->last_row_, row, &rows_data, io->width)) {
uint8_t* rows_data = (uint8_t*)dec->argb_cache_;
const int in_stride = io->width * sizeof(uint32_t); // in unit of RGBA
if (!SetCropWindow(io, dec->last_row_, row, &rows_data, in_stride)) {
// Nothing to output (this time).
} else {
const WebPDecBuffer* const output = dec->output_;
const int in_stride = io->width * sizeof(*rows_data);
if (output->colorspace < MODE_YUV) { // convert to RGBA
const WebPRGBABuffer* const buf = &output->u.RGBA;
uint8_t* const rgba = buf->rgba + dec->last_out_row_ * buf->stride;
const int num_rows_out = io->use_scaling ?
EmitRescaledRows(dec, rows_data, in_stride, io->mb_h,
rgba, buf->stride) :
EmitRescaledRowsRGBA(dec, rows_data, in_stride, io->mb_h,
rgba, buf->stride) :
EmitRows(output->colorspace, rows_data, in_stride,
io->mb_w, io->mb_h, rgba, buf->stride);
// Update 'last_out_row_'.
@ -667,50 +700,163 @@ static void ProcessRows(VP8LDecoder* const dec, int row) {
assert(dec->last_row_ <= dec->height_);
}
static int DecodeImageData(VP8LDecoder* const dec,
uint32_t* const data, int width, int height,
ProcessRowsFunc process_func) {
// Row-processing for the special case when alpha data contains only one
// transform (color indexing), and trivial non-green literals.
static int Is8bOptimizable(const VP8LMetadata* const hdr) {
int i;
if (hdr->color_cache_size_ > 0) return 0;
// When the Huffman tree contains only one symbol, we can skip the
// call to ReadSymbol() for red/blue/alpha channels.
for (i = 0; i < hdr->num_htree_groups_; ++i) {
const HuffmanTree* const htrees = hdr->htree_groups_[i].htrees_;
if (htrees[RED].num_nodes_ > 1) return 0;
if (htrees[BLUE].num_nodes_ > 1) return 0;
if (htrees[ALPHA].num_nodes_ > 1) return 0;
}
return 1;
}
static void ExtractPalettedAlphaRows(VP8LDecoder* const dec, int row) {
const int num_rows = row - dec->last_row_;
const uint8_t* const in =
(uint8_t*)dec->pixels_ + dec->width_ * dec->last_row_;
if (num_rows > 0) {
ApplyInverseTransformsAlpha(dec, num_rows, in);
}
dec->last_row_ = dec->last_out_row_ = row;
}
static int DecodeAlphaData(VP8LDecoder* const dec, uint8_t* const data,
int width, int height, int last_row) {
int ok = 1;
int col = 0, row = 0;
int row = dec->last_pixel_ / width;
int col = dec->last_pixel_ % width;
VP8LBitReader* const br = &dec->br_;
VP8LMetadata* const hdr = &dec->hdr_;
HTreeGroup* htree_group = hdr->htree_groups_;
uint32_t* src = data;
uint32_t* last_cached = data;
uint32_t* const src_end = data + width * height;
const HTreeGroup* htree_group = GetHtreeGroupForPos(hdr, col, row);
int pos = dec->last_pixel_; // current position
const int end = width * height; // End of data
const int last = width * last_row; // Last pixel to decode
const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES;
const int color_cache_limit = len_code_limit + hdr->color_cache_size_;
VP8LColorCache* const color_cache =
(hdr->color_cache_size_ > 0) ? &hdr->color_cache_ : NULL;
const int mask = hdr->huffman_mask_;
assert(htree_group != NULL);
assert(last_row <= height);
assert(Is8bOptimizable(hdr));
while (!br->eos_ && src < src_end) {
while (!br->eos_ && pos < last) {
int code;
// Only update when changing tile. Note we could use the following test:
// if "((((prev_col ^ col) | prev_row ^ row)) > mask)" -> tile changed
// but that's actually slower and requires storing the previous col/row
// Only update when changing tile.
if ((col & mask) == 0) {
htree_group = GetHtreeGroupForPos(hdr, col, row);
}
VP8LFillBitWindow(br);
code = ReadSymbol(&htree_group->htrees_[GREEN], br);
if (code < NUM_LITERAL_CODES) { // Literal.
if (code < NUM_LITERAL_CODES) { // Literal
data[pos] = code;
++pos;
++col;
if (col >= width) {
col = 0;
++row;
if (row % NUM_ARGB_CACHE_ROWS == 0) {
ExtractPalettedAlphaRows(dec, row);
}
}
} else if (code < len_code_limit) { // Backward reference
int dist_code, dist;
const int length_sym = code - NUM_LITERAL_CODES;
const int length = GetCopyLength(length_sym, br);
const int dist_symbol = ReadSymbol(&htree_group->htrees_[DIST], br);
VP8LFillBitWindow(br);
dist_code = GetCopyDistance(dist_symbol, br);
dist = PlaneCodeToDistance(width, dist_code);
if (pos >= dist && end - pos >= length) {
int i;
for (i = 0; i < length; ++i) data[pos + i] = data[pos + i - dist];
} else {
ok = 0;
goto End;
}
pos += length;
col += length;
while (col >= width) {
col -= width;
++row;
if (row % NUM_ARGB_CACHE_ROWS == 0) {
ExtractPalettedAlphaRows(dec, row);
}
}
if (pos < last && (col & mask)) {
htree_group = GetHtreeGroupForPos(hdr, col, row);
}
} else { // Not reached
ok = 0;
goto End;
}
ok = !br->error_;
if (!ok) goto End;
}
// Process the remaining rows corresponding to last row-block.
ExtractPalettedAlphaRows(dec, row);
End:
if (br->error_ || !ok || (br->eos_ && pos < end)) {
ok = 0;
dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED
: VP8_STATUS_BITSTREAM_ERROR;
} else {
dec->last_pixel_ = (int)pos;
if (pos == end) dec->state_ = READ_DATA;
}
return ok;
}
static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data,
int width, int height, int last_row,
ProcessRowsFunc process_func) {
int ok = 1;
int row = dec->last_pixel_ / width;
int col = dec->last_pixel_ % width;
VP8LBitReader* const br = &dec->br_;
VP8LMetadata* const hdr = &dec->hdr_;
HTreeGroup* htree_group = GetHtreeGroupForPos(hdr, col, row);
uint32_t* src = data + dec->last_pixel_;
uint32_t* last_cached = src;
uint32_t* const src_end = data + width * height; // End of data
uint32_t* const src_last = data + width * last_row; // Last pixel to decode
const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES;
const int color_cache_limit = len_code_limit + hdr->color_cache_size_;
VP8LColorCache* const color_cache =
(hdr->color_cache_size_ > 0) ? &hdr->color_cache_ : NULL;
const int mask = hdr->huffman_mask_;
assert(htree_group != NULL);
assert(src_last <= src_end);
while (!br->eos_ && src < src_last) {
int code;
// Only update when changing tile. Note we could use this test:
// if "((((prev_col ^ col) | prev_row ^ row)) > mask)" -> tile changed
// but that's actually slower and needs storing the previous col/row.
if ((col & mask) == 0) {
htree_group = GetHtreeGroupForPos(hdr, col, row);
}
VP8LFillBitWindow(br);
code = ReadSymbol(&htree_group->htrees_[GREEN], br);
if (code < NUM_LITERAL_CODES) { // Literal
int red, green, blue, alpha;
red = ReadSymbol(&htree_group->htrees_[RED], br);
green = code;
VP8LFillBitWindow(br);
blue = ReadSymbol(&htree_group->htrees_[BLUE], br);
alpha = ReadSymbol(&htree_group->htrees_[ALPHA], br);
*src = (alpha << 24) + (red << 16) + (green << 8) + blue;
AdvanceByOne:
*src = (alpha << 24) | (red << 16) | (green << 8) | blue;
AdvanceByOne:
++src;
++col;
if (col >= width) {
col = 0;
++row;
if ((process_func != NULL) && (row % NUM_ARGB_CACHE_ROWS == 0)) {
if ((row % NUM_ARGB_CACHE_ROWS == 0) && (process_func != NULL)) {
process_func(dec, row);
}
if (color_cache != NULL) {
@ -719,7 +865,7 @@ static int DecodeImageData(VP8LDecoder* const dec,
}
}
}
} else if (code < len_code_limit) { // Backward reference
} else if (code < len_code_limit) { // Backward reference
int dist_code, dist;
const int length_sym = code - NUM_LITERAL_CODES;
const int length = GetCopyLength(length_sym, br);
@ -727,11 +873,10 @@ static int DecodeImageData(VP8LDecoder* const dec,
VP8LFillBitWindow(br);
dist_code = GetCopyDistance(dist_symbol, br);
dist = PlaneCodeToDistance(width, dist_code);
if (src - data < dist || src_end - src < length) {
if (src - data < (ptrdiff_t)dist || src_end - src < (ptrdiff_t)length) {
ok = 0;
goto End;
}
{
} else {
int i;
for (i = 0; i < length; ++i) src[i] = src[i - dist];
src += length;
@ -740,19 +885,19 @@ static int DecodeImageData(VP8LDecoder* const dec,
while (col >= width) {
col -= width;
++row;
if ((process_func != NULL) && (row % NUM_ARGB_CACHE_ROWS == 0)) {
if ((row % NUM_ARGB_CACHE_ROWS == 0) && (process_func != NULL)) {
process_func(dec, row);
}
}
if (src < src_end) {
htree_group = GetHtreeGroupForPos(hdr, col, row);
if (src < src_last) {
if (col & mask) htree_group = GetHtreeGroupForPos(hdr, col, row);
if (color_cache != NULL) {
while (last_cached < src) {
VP8LColorCacheInsert(color_cache, *last_cached++);
}
}
}
} else if (code < color_cache_limit) { // Color cache.
} else if (code < color_cache_limit) { // Color cache
const int key = code - len_code_limit;
assert(color_cache != NULL);
while (last_cached < src) {
@ -760,7 +905,7 @@ static int DecodeImageData(VP8LDecoder* const dec,
}
*src = VP8LColorCacheLookup(color_cache, key);
goto AdvanceByOne;
} else { // Not reached.
} else { // Not reached
ok = 0;
goto End;
}
@ -773,12 +918,12 @@ static int DecodeImageData(VP8LDecoder* const dec,
End:
if (br->error_ || !ok || (br->eos_ && src < src_end)) {
ok = 0;
dec->status_ = (!br->eos_) ?
VP8_STATUS_BITSTREAM_ERROR : VP8_STATUS_SUSPENDED;
} else if (src == src_end) {
dec->state_ = READ_DATA;
dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED
: VP8_STATUS_BITSTREAM_ERROR;
} else {
dec->last_pixel_ = (int)(src - data);
if (src == src_end) dec->state_ = READ_DATA;
}
return ok;
}
@ -895,6 +1040,9 @@ VP8LDecoder* VP8LNew(void) {
dec->status_ = VP8_STATUS_OK;
dec->action_ = READ_DIM;
dec->state_ = READ_DIM;
VP8LDspInit(); // Init critical function pointers.
return dec;
}
@ -903,8 +1051,8 @@ void VP8LClear(VP8LDecoder* const dec) {
if (dec == NULL) return;
ClearMetadata(&dec->hdr_);
free(dec->argb_);
dec->argb_ = NULL;
free(dec->pixels_);
dec->pixels_ = NULL;
for (i = 0; i < dec->next_transform_; ++i) {
ClearTransform(&dec->transforms_[i]);
}
@ -1000,7 +1148,8 @@ static int DecodeImageStream(int xsize, int ysize,
}
// Use the Huffman trees to decode the LZ77 encoded data.
ok = DecodeImageData(dec, data, transform_xsize, transform_ysize, NULL);
ok = DecodeImageData(dec, data, transform_xsize, transform_ysize,
transform_ysize, NULL);
ok = ok && !br->error_;
End:
@ -1022,41 +1171,52 @@ static int DecodeImageStream(int xsize, int ysize,
assert(data == NULL);
assert(is_level0);
}
dec->last_pixel_ = 0; // Reset for future DECODE_DATA_FUNC() calls.
if (!is_level0) ClearMetadata(hdr); // Clean up temporary data behind.
}
return ok;
}
//------------------------------------------------------------------------------
// Allocate dec->argb_ and dec->argb_cache_ using dec->width_ and dec->height_
static int AllocateARGBBuffers(VP8LDecoder* const dec, int final_width) {
// Allocate internal buffers dec->pixels_ and dec->argb_cache_.
static int AllocateInternalBuffers32b(VP8LDecoder* const dec, int final_width) {
const uint64_t num_pixels = (uint64_t)dec->width_ * dec->height_;
// Scratch buffer corresponding to top-prediction row for transforming the
// first row in the row-blocks.
const uint64_t cache_top_pixels = final_width;
// Scratch buffer for temporary BGRA storage.
// first row in the row-blocks. Not needed for paletted alpha.
const uint64_t cache_top_pixels = (uint16_t)final_width;
// Scratch buffer for temporary BGRA storage. Not needed for paletted alpha.
const uint64_t cache_pixels = (uint64_t)final_width * NUM_ARGB_CACHE_ROWS;
const uint64_t total_num_pixels =
num_pixels + cache_top_pixels + cache_pixels;
assert(dec->width_ <= final_width);
dec->argb_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(*dec->argb_));
if (dec->argb_ == NULL) {
dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint32_t));
if (dec->pixels_ == NULL) {
dec->argb_cache_ = NULL; // for sanity check
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
return 0;
}
dec->argb_cache_ = dec->argb_ + num_pixels + cache_top_pixels;
dec->argb_cache_ = dec->pixels_ + num_pixels + cache_top_pixels;
return 1;
}
static int AllocateInternalBuffers8b(VP8LDecoder* const dec) {
const uint64_t total_num_pixels = (uint64_t)dec->width_ * dec->height_;
dec->argb_cache_ = NULL; // for sanity check
dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint8_t));
if (dec->pixels_ == NULL) {
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
return 0;
}
return 1;
}
//------------------------------------------------------------------------------
// Special row-processing that only stores the alpha data.
// Special row-processing that only stores the alpha data.
static void ExtractAlphaRows(VP8LDecoder* const dec, int row) {
const int num_rows = row - dec->last_row_;
const uint32_t* const in = dec->argb_ + dec->width_ * dec->last_row_;
const uint32_t* const in = dec->pixels_ + dec->width_ * dec->last_row_;
if (num_rows <= 0) return; // Nothing to be done.
ApplyInverseTransforms(dec, num_rows, in);
@ -1070,44 +1230,76 @@ static void ExtractAlphaRows(VP8LDecoder* const dec, int row) {
int i;
for (i = 0; i < cache_pixs; ++i) dst[i] = (src[i] >> 8) & 0xff;
}
dec->last_row_ = dec->last_out_row_ = row;
}
int VP8LDecodeAlphaImageStream(int width, int height, const uint8_t* const data,
size_t data_size, uint8_t* const output) {
VP8Io io;
int VP8LDecodeAlphaHeader(ALPHDecoder* const alph_dec,
const uint8_t* const data, size_t data_size,
uint8_t* const output) {
int ok = 0;
VP8LDecoder* const dec = VP8LNew();
if (dec == NULL) return 0;
VP8LDecoder* dec;
VP8Io* io;
assert(alph_dec != NULL);
alph_dec->vp8l_dec_ = VP8LNew();
if (alph_dec->vp8l_dec_ == NULL) return 0;
dec = alph_dec->vp8l_dec_;
dec->width_ = width;
dec->height_ = height;
dec->io_ = &io;
dec->width_ = alph_dec->width_;
dec->height_ = alph_dec->height_;
dec->io_ = &alph_dec->io_;
io = dec->io_;
VP8InitIo(&io);
WebPInitCustomIo(NULL, &io); // Just a sanity Init. io won't be used.
io.opaque = output;
io.width = width;
io.height = height;
VP8InitIo(io);
WebPInitCustomIo(NULL, io); // Just a sanity Init. io won't be used.
io->opaque = output;
io->width = alph_dec->width_;
io->height = alph_dec->height_;
dec->status_ = VP8_STATUS_OK;
VP8LInitBitReader(&dec->br_, data, data_size);
dec->action_ = READ_HDR;
if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Err;
if (!DecodeImageStream(alph_dec->width_, alph_dec->height_, 1, dec, NULL)) {
goto Err;
}
// Allocate output (note that dec->width_ may have changed here).
if (!AllocateARGBBuffers(dec, width)) goto Err;
// Special case: if alpha data uses only the color indexing transform and
// doesn't use color cache (a frequent case), we will use DecodeAlphaData()
// method that only needs allocation of 1 byte per pixel (alpha channel).
if (dec->next_transform_ == 1 &&
dec->transforms_[0].type_ == COLOR_INDEXING_TRANSFORM &&
Is8bOptimizable(&dec->hdr_)) {
alph_dec->use_8b_decode = 1;
ok = AllocateInternalBuffers8b(dec);
} else {
// Allocate internal buffers (note that dec->width_ may have changed here).
alph_dec->use_8b_decode = 0;
ok = AllocateInternalBuffers32b(dec, alph_dec->width_);
}
if (!ok) goto Err;
// Decode (with special row processing).
dec->action_ = READ_DATA;
ok = DecodeImageData(dec, dec->argb_, dec->width_, dec->height_,
ExtractAlphaRows);
return 1;
Err:
VP8LDelete(dec);
return ok;
VP8LDelete(alph_dec->vp8l_dec_);
alph_dec->vp8l_dec_ = NULL;
return 0;
}
int VP8LDecodeAlphaImageStream(ALPHDecoder* const alph_dec, int last_row) {
VP8LDecoder* const dec = alph_dec->vp8l_dec_;
assert(dec != NULL);
assert(dec->action_ == READ_DATA);
assert(last_row <= dec->height_);
// Decode (with special row processing).
return alph_dec->use_8b_decode ?
DecodeAlphaData(dec, (uint8_t*)dec->pixels_, dec->width_, dec->height_,
last_row) :
DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_,
last_row, ExtractAlphaRows);
}
//------------------------------------------------------------------------------
@ -1162,14 +1354,14 @@ int VP8LDecodeImage(VP8LDecoder* const dec) {
goto Err;
}
if (!AllocateARGBBuffers(dec, io->width)) goto Err;
if (!AllocateInternalBuffers32b(dec, io->width)) goto Err;
if (io->use_scaling && !AllocateAndInitRescaler(dec, io)) goto Err;
// Decode.
dec->action_ = READ_DATA;
if (!DecodeImageData(dec, dec->argb_, dec->width_, dec->height_,
ProcessRows)) {
if (!DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_,
dec->height_, ProcessRows)) {
goto Err;
}
@ -1186,6 +1378,3 @@ int VP8LDecodeImage(VP8LDecoder* const dec) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Lossless decoder: internal header.
@ -20,7 +22,7 @@
#include "../utils/huffman.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -55,7 +57,8 @@ typedef struct {
HTreeGroup *htree_groups_;
} VP8LMetadata;
typedef struct {
typedef struct VP8LDecoder VP8LDecoder;
struct VP8LDecoder {
VP8StatusCode status_;
VP8LDecodeState action_;
VP8LDecodeState state_;
@ -63,7 +66,8 @@ typedef struct {
const WebPDecBuffer *output_; // shortcut to io->opaque->output
uint32_t *argb_; // Internal data: always in BGRA color mode.
uint32_t *pixels_; // Internal data: either uint8_t* for alpha
// or uint32_t* for BGRA.
uint32_t *argb_cache_; // Scratch buffer for temporary BGRA storage.
VP8LBitReader br_;
@ -71,6 +75,9 @@ typedef struct {
int width_;
int height_;
int last_row_; // last input row decoded so far.
int last_pixel_; // last pixel decoded so far. However, it may
// not be transformed, scaled and
// color-converted yet.
int last_out_row_; // last row output so far.
VP8LMetadata hdr_;
@ -82,18 +89,27 @@ typedef struct {
uint8_t *rescaler_memory; // Working memory for rescaling work.
WebPRescaler *rescaler; // Common rescaler for all channels.
} VP8LDecoder;
};
//------------------------------------------------------------------------------
// internal functions. Not public.
struct ALPHDecoder; // Defined in dec/alphai.h.
// in vp8l.c
// Decodes a raw image stream (without header) and store the alpha data
// into *output, which must be of size width x height. Returns false in case
// of error.
int VP8LDecodeAlphaImageStream(int width, int height, const uint8_t* const data,
size_t data_size, uint8_t* const output);
// Decodes image header for alpha data stored using lossless compression.
// Returns false in case of error.
int VP8LDecodeAlphaHeader(struct ALPHDecoder* const alph_dec,
const uint8_t* const data, size_t data_size,
uint8_t* const output);
// Decodes *at least* 'last_row' rows of alpha. If some of the initial rows are
// already decoded in previous call(s), it will resume decoding from where it
// was paused.
// Returns false in case of bitstream error.
int VP8LDecodeAlphaImageStream(struct ALPHDecoder* const alph_dec,
int last_row);
// Allocates and initialize a new lossless decoder instance.
VP8LDecoder* VP8LNew(void);
@ -114,7 +130,7 @@ void VP8LDelete(VP8LDecoder* const dec);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Main decoding functions for WEBP images.
@ -16,10 +18,6 @@
#include "./webpi.h"
#include "../webp/mux_types.h" // ALPHA_FLAG
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// RIFF layout is:
// Offset tag
@ -192,6 +190,15 @@ static VP8StatusCode ParseOptionalChunks(const uint8_t** const data,
return VP8_STATUS_BITSTREAM_ERROR; // Not a valid chunk size.
}
// Start of a (possibly incomplete) VP8/VP8L chunk implies that we have
// parsed all the optional chunks.
// Note: This check must occur before the check 'buf_size < disk_chunk_size'
// below to allow incomplete VP8/VP8L chunks.
if (!memcmp(buf, "VP8 ", TAG_SIZE) ||
!memcmp(buf, "VP8L", TAG_SIZE)) {
return VP8_STATUS_OK;
}
if (buf_size < disk_chunk_size) { // Insufficient data.
return VP8_STATUS_NOT_ENOUGH_DATA;
}
@ -199,9 +206,6 @@ static VP8StatusCode ParseOptionalChunks(const uint8_t** const data,
if (!memcmp(buf, "ALPH", TAG_SIZE)) { // A valid ALPH header.
*alpha_data = buf + CHUNK_HEADER_SIZE;
*alpha_size = chunk_size;
} else if (!memcmp(buf, "VP8 ", TAG_SIZE) ||
!memcmp(buf, "VP8L", TAG_SIZE)) { // A valid VP8/VP8L header.
return VP8_STATUS_OK; // Found.
}
// We have a full and valid chunk; skip it.
@ -277,9 +281,17 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
int* const height,
int* const has_alpha,
int* const has_animation,
int* const format,
WebPHeaderStructure* const headers) {
int canvas_width = 0;
int canvas_height = 0;
int image_width = 0;
int image_height = 0;
int found_riff = 0;
int found_vp8x = 0;
int animation_present = 0;
int fragments_present = 0;
VP8StatusCode status;
WebPHeaderStructure hdrs;
@ -300,23 +312,35 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
// Skip over VP8X.
{
uint32_t flags = 0;
status = ParseVP8X(&data, &data_size, &found_vp8x, width, height, &flags);
status = ParseVP8X(&data, &data_size, &found_vp8x,
&canvas_width, &canvas_height, &flags);
if (status != VP8_STATUS_OK) {
return status; // Wrong VP8X / insufficient data.
}
animation_present = !!(flags & ANIMATION_FLAG);
fragments_present = !!(flags & FRAGMENTS_FLAG);
if (!found_riff && found_vp8x) {
// Note: This restriction may be removed in the future, if it becomes
// necessary to send VP8X chunk to the decoder.
return VP8_STATUS_BITSTREAM_ERROR;
}
if (has_alpha != NULL) *has_alpha = !!(flags & ALPHA_FLAG);
if (has_animation != NULL) *has_animation = !!(flags & ANIMATION_FLAG);
if (found_vp8x && headers == NULL) {
return VP8_STATUS_OK; // Return features from VP8X header.
if (has_animation != NULL) *has_animation = animation_present;
if (format != NULL) *format = 0; // default = undefined
image_width = canvas_width;
image_height = canvas_height;
if (found_vp8x && (animation_present || fragments_present) &&
headers == NULL) {
status = VP8_STATUS_OK;
goto ReturnWidthHeight; // Just return features from VP8X header.
}
}
if (data_size < TAG_SIZE) return VP8_STATUS_NOT_ENOUGH_DATA;
if (data_size < TAG_SIZE) {
status = VP8_STATUS_NOT_ENOUGH_DATA;
goto ReturnWidthHeight;
}
// Skip over optional chunks if data started with "RIFF + VP8X" or "ALPH".
if ((found_riff && found_vp8x) ||
@ -324,7 +348,7 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
status = ParseOptionalChunks(&data, &data_size, hdrs.riff_size,
&hdrs.alpha_data, &hdrs.alpha_data_size);
if (status != VP8_STATUS_OK) {
return status; // Found an invalid chunk size / insufficient data.
goto ReturnWidthHeight; // Invalid chunk size / insufficient data.
}
}
@ -332,35 +356,41 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
status = ParseVP8Header(&data, &data_size, hdrs.riff_size,
&hdrs.compressed_size, &hdrs.is_lossless);
if (status != VP8_STATUS_OK) {
return status; // Wrong VP8/VP8L chunk-header / insufficient data.
goto ReturnWidthHeight; // Wrong VP8/VP8L chunk-header / insufficient data.
}
if (hdrs.compressed_size > MAX_CHUNK_PAYLOAD) {
return VP8_STATUS_BITSTREAM_ERROR;
}
if (format != NULL && !(animation_present || fragments_present)) {
*format = hdrs.is_lossless ? 2 : 1;
}
if (!hdrs.is_lossless) {
if (data_size < VP8_FRAME_HEADER_SIZE) {
return VP8_STATUS_NOT_ENOUGH_DATA;
status = VP8_STATUS_NOT_ENOUGH_DATA;
goto ReturnWidthHeight;
}
// Validates raw VP8 data.
if (!VP8GetInfo(data, data_size,
(uint32_t)hdrs.compressed_size, width, height)) {
if (!VP8GetInfo(data, data_size, (uint32_t)hdrs.compressed_size,
&image_width, &image_height)) {
return VP8_STATUS_BITSTREAM_ERROR;
}
} else {
if (data_size < VP8L_FRAME_HEADER_SIZE) {
return VP8_STATUS_NOT_ENOUGH_DATA;
status = VP8_STATUS_NOT_ENOUGH_DATA;
goto ReturnWidthHeight;
}
// Validates raw VP8L data.
if (!VP8LGetInfo(data, data_size, width, height, has_alpha)) {
if (!VP8LGetInfo(data, data_size, &image_width, &image_height, has_alpha)) {
return VP8_STATUS_BITSTREAM_ERROR;
}
}
if (has_alpha != NULL) {
// If the data did not contain a VP8X/VP8L chunk the only definitive way
// to set this is by looking for alpha data (from an ALPH chunk).
*has_alpha |= (hdrs.alpha_data != NULL);
// Validates image size coherency.
if (found_vp8x) {
if (canvas_width != image_width || canvas_height != image_height) {
return VP8_STATUS_BITSTREAM_ERROR;
}
}
if (headers != NULL) {
*headers = hdrs;
@ -368,7 +398,20 @@ static VP8StatusCode ParseHeadersInternal(const uint8_t* data,
assert((uint64_t)(data - headers->data) < MAX_CHUNK_PAYLOAD);
assert(headers->offset == headers->data_size - data_size);
}
return VP8_STATUS_OK; // Return features from VP8 header.
ReturnWidthHeight:
if (status == VP8_STATUS_OK ||
(status == VP8_STATUS_NOT_ENOUGH_DATA && found_vp8x && headers == NULL)) {
if (has_alpha != NULL) {
// If the data did not contain a VP8X/VP8L chunk the only definitive way
// to set this is by looking for alpha data (from an ALPH chunk).
*has_alpha |= (hdrs.alpha_data != NULL);
}
if (width != NULL) *width = image_width;
if (height != NULL) *height = image_height;
return VP8_STATUS_OK;
} else {
return status;
}
}
VP8StatusCode WebPParseHeaders(WebPHeaderStructure* const headers) {
@ -377,7 +420,8 @@ VP8StatusCode WebPParseHeaders(WebPHeaderStructure* const headers) {
assert(headers != NULL);
// fill out headers, ignore width/height/has_alpha.
status = ParseHeadersInternal(headers->data, headers->data_size,
NULL, NULL, NULL, &has_animation, headers);
NULL, NULL, NULL, &has_animation,
NULL, headers);
if (status == VP8_STATUS_OK || status == VP8_STATUS_NOT_ENOUGH_DATA) {
// TODO(jzern): full support of animation frames will require API additions.
if (has_animation) {
@ -391,7 +435,7 @@ VP8StatusCode WebPParseHeaders(WebPHeaderStructure* const headers) {
// WebPDecParams
void WebPResetDecParams(WebPDecParams* const params) {
if (params) {
if (params != NULL) {
memset(params, 0, sizeof(*params));
}
}
@ -424,11 +468,6 @@ static VP8StatusCode DecodeInto(const uint8_t* const data, size_t data_size,
if (dec == NULL) {
return VP8_STATUS_OUT_OF_MEMORY;
}
#ifdef WEBP_USE_THREAD
dec->use_threads_ = params->options && (params->options->use_threads > 0);
#else
dec->use_threads_ = 0;
#endif
dec->alpha_data_ = headers.alpha_data;
dec->alpha_data_size_ = headers.alpha_data_size;
@ -440,6 +479,10 @@ static VP8StatusCode DecodeInto(const uint8_t* const data, size_t data_size,
status = WebPAllocateDecBuffer(io.width, io.height, params->options,
params->output);
if (status == VP8_STATUS_OK) { // Decode
// This change must be done before calling VP8Decode()
dec->mt_method_ = VP8GetThreadMethod(params->options, &headers,
io.width, io.height);
VP8InitDithering(params->options, dec);
if (!VP8Decode(dec, &io)) {
status = dec->status_;
}
@ -626,7 +669,6 @@ uint8_t* WebPDecodeYUV(const uint8_t* data, size_t data_size,
static void DefaultFeatures(WebPBitstreamFeatures* const features) {
assert(features != NULL);
memset(features, 0, sizeof(*features));
features->bitstream_version = 0;
}
static VP8StatusCode GetFeatures(const uint8_t* const data, size_t data_size,
@ -640,7 +682,7 @@ static VP8StatusCode GetFeatures(const uint8_t* const data, size_t data_size,
return ParseHeadersInternal(data, data_size,
&features->width, &features->height,
&features->has_alpha, &features->has_animation,
NULL);
&features->format, NULL);
}
//------------------------------------------------------------------------------
@ -778,6 +820,3 @@ int WebPIoInitFromOptions(const WebPDecoderOptions* const options,
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Internal header: WebP decoding parameters and custom IO on buffer
@ -12,7 +14,7 @@
#ifndef WEBP_DEC_WEBPI_H_
#define WEBP_DEC_WEBPI_H_
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -107,7 +109,7 @@ void WebPGrabDecBuffer(WebPDecBuffer* const src, WebPDecBuffer* const dst);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -10,6 +10,6 @@ libwebpdemuxinclude_HEADERS += ../webp/mux_types.h
libwebpdemuxinclude_HEADERS += ../webp/types.h
libwebpdemux_la_LIBADD = ../libwebp.la
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 0:0:0
libwebpdemux_la_LDFLAGS = -no-undefined -version-info 1:0:0
libwebpdemuxincludedir = $(includedir)/webp
pkgconfig_DATA = libwebpdemux.pc

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP container demux.
@ -21,12 +23,8 @@
#include "../webp/demux.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define DMUX_MAJ_VERSION 0
#define DMUX_MIN_VERSION 1
#define DMUX_MIN_VERSION 2
#define DMUX_REV_VERSION 0
typedef struct {
@ -45,8 +43,10 @@ typedef struct {
typedef struct Frame {
int x_offset_, y_offset_;
int width_, height_;
int has_alpha_;
int duration_;
WebPMuxAnimDispose dispose_method_;
WebPMuxAnimBlend blend_method_;
int is_fragment_; // this is a frame fragment (and not a full frame).
int frame_num_; // the referent frame number for use in assembling fragments.
int complete_; // img_components_ contains a full image.
@ -71,6 +71,7 @@ struct WebPDemuxer {
Frame* frames_;
Frame** frames_tail_;
Chunk* chunks_; // non-image chunks
Chunk** chunks_tail_;
};
typedef enum {
@ -175,10 +176,9 @@ static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) {
// Secondary chunk parsing
static void AddChunk(WebPDemuxer* const dmux, Chunk* const chunk) {
Chunk** c = &dmux->chunks_;
while (*c != NULL) c = &(*c)->next_;
*c = chunk;
*dmux->chunks_tail_ = chunk;
chunk->next_ = NULL;
dmux->chunks_tail_ = &chunk->next_;
}
// Add a frame to the end of the list, ensuring the last frame is complete.
@ -194,18 +194,13 @@ static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) {
}
// Store image bearing chunks to 'frame'.
// If 'has_vp8l_alpha' is not NULL, it will be set to true if the frame is a
// lossless image with alpha.
static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
MemBuffer* const mem, Frame* const frame,
int* const has_vp8l_alpha) {
MemBuffer* const mem, Frame* const frame) {
int alpha_chunks = 0;
int image_chunks = 0;
int done = (MemDataSize(mem) < min_size);
ParseStatus status = PARSE_OK;
if (has_vp8l_alpha != NULL) *has_vp8l_alpha = 0; // Default.
if (done) return PARSE_NEED_MORE_DATA;
do {
@ -227,6 +222,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
++alpha_chunks;
frame->img_components_[1].offset_ = chunk_start_offset;
frame->img_components_[1].size_ = chunk_size;
frame->has_alpha_ = 1;
frame->frame_num_ = frame_num;
Skip(mem, payload_available);
} else {
@ -256,7 +252,7 @@ static ParseStatus StoreFrame(int frame_num, uint32_t min_size,
frame->img_components_[0].size_ = chunk_size;
frame->width_ = features.width;
frame->height_ = features.height;
if (has_vp8l_alpha != NULL) *has_vp8l_alpha = features.has_alpha;
frame->has_alpha_ |= features.has_alpha;
frame->frame_num_ = frame_num;
frame->complete_ = (status == PARSE_OK);
Skip(mem, payload_available);
@ -301,9 +297,10 @@ static ParseStatus NewFrame(const MemBuffer* const mem,
// 'frame_chunk_size' is the previously validated, padded chunk size.
static ParseStatus ParseAnimationFrame(
WebPDemuxer* const dmux, uint32_t frame_chunk_size) {
const int has_frames = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const uint32_t anmf_payload_size = frame_chunk_size - ANMF_CHUNK_SIZE;
int added_frame = 0;
int bits;
MemBuffer* const mem = &dmux->mem_;
Frame* frame;
ParseStatus status =
@ -315,16 +312,19 @@ static ParseStatus ParseAnimationFrame(
frame->width_ = 1 + ReadLE24s(mem);
frame->height_ = 1 + ReadLE24s(mem);
frame->duration_ = ReadLE24s(mem);
frame->dispose_method_ = (WebPMuxAnimDispose)(ReadByte(mem) & 1);
bits = ReadByte(mem);
frame->dispose_method_ =
(bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE;
frame->blend_method_ = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND;
if (frame->width_ * (uint64_t)frame->height_ >= MAX_IMAGE_AREA) {
free(frame);
return PARSE_ERROR;
}
// Store a frame only if the animation flag is set there is some data for
// this frame is available.
status = StoreFrame(dmux->num_frames_ + 1, anmf_payload_size, mem, frame,
NULL);
if (status != PARSE_ERROR && has_frames && frame->frame_num_ > 0) {
status = StoreFrame(dmux->num_frames_ + 1, anmf_payload_size, mem, frame);
if (status != PARSE_ERROR && is_animation && frame->frame_num_ > 0) {
added_frame = AddFrame(dmux, frame);
if (added_frame) {
++dmux->num_frames_;
@ -343,7 +343,7 @@ static ParseStatus ParseAnimationFrame(
static ParseStatus ParseFragment(WebPDemuxer* const dmux,
uint32_t fragment_chunk_size) {
const int frame_num = 1; // All fragments belong to the 1st (and only) frame.
const int has_fragments = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
const int is_fragmented = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
const uint32_t frgm_payload_size = fragment_chunk_size - FRGM_CHUNK_SIZE;
int added_fragment = 0;
MemBuffer* const mem = &dmux->mem_;
@ -356,10 +356,10 @@ static ParseStatus ParseFragment(WebPDemuxer* const dmux,
frame->x_offset_ = 2 * ReadLE24s(mem);
frame->y_offset_ = 2 * ReadLE24s(mem);
// Store a fragment only if the fragments flag is set there is some data for
// this fragment is available.
status = StoreFrame(frame_num, frgm_payload_size, mem, frame, NULL);
if (status != PARSE_ERROR && has_fragments && frame->frame_num_ > 0) {
// Store a fragment only if the 'fragments' flag is set and there is some
// data available.
status = StoreFrame(frame_num, frgm_payload_size, mem, frame);
if (status != PARSE_ERROR && is_fragmented && frame->frame_num_ > 0) {
added_fragment = AddFrame(dmux, frame);
if (!added_fragment) {
status = PARSE_ERROR;
@ -391,20 +391,20 @@ static int StoreChunk(WebPDemuxer* const dmux,
// -----------------------------------------------------------------------------
// Primary chunk parsing
static int ReadHeader(MemBuffer* const mem) {
static ParseStatus ReadHeader(MemBuffer* const mem) {
const size_t min_size = RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE;
uint32_t riff_size;
// Basic file level validation.
if (MemDataSize(mem) < min_size) return 0;
if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA;
if (memcmp(GetBuffer(mem), "RIFF", CHUNK_SIZE_BYTES) ||
memcmp(GetBuffer(mem) + CHUNK_HEADER_SIZE, "WEBP", CHUNK_SIZE_BYTES)) {
return 0;
return PARSE_ERROR;
}
riff_size = GetLE32(GetBuffer(mem) + TAG_SIZE);
if (riff_size < CHUNK_HEADER_SIZE) return 0;
if (riff_size > MAX_CHUNK_PAYLOAD) return 0;
if (riff_size < CHUNK_HEADER_SIZE) return PARSE_ERROR;
if (riff_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
// There's no point in reading past the end of the RIFF chunk
mem->riff_end_ = riff_size + CHUNK_HEADER_SIZE;
@ -413,7 +413,7 @@ static int ReadHeader(MemBuffer* const mem) {
}
Skip(mem, RIFF_HEADER_SIZE);
return 1;
return PARSE_OK;
}
static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
@ -421,7 +421,7 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
MemBuffer* const mem = &dmux->mem_;
Frame* frame;
ParseStatus status;
int has_vp8l_alpha = 0; // Frame contains a lossless image with alpha.
int image_added = 0;
if (dmux->frames_ != NULL) return PARSE_ERROR;
if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR;
@ -432,14 +432,14 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
// For the single image case we allow parsing of a partial frame, but we need
// at least CHUNK_HEADER_SIZE for parsing.
status = StoreFrame(1, CHUNK_HEADER_SIZE, &dmux->mem_, frame,
&has_vp8l_alpha);
status = StoreFrame(1, CHUNK_HEADER_SIZE, &dmux->mem_, frame);
if (status != PARSE_ERROR) {
const int has_alpha = !!(dmux->feature_flags_ & ALPHA_FLAG);
// Clear any alpha when the alpha flag is missing.
if (!has_alpha && frame->img_components_[1].size_ > 0) {
frame->img_components_[1].offset_ = 0;
frame->img_components_[1].size_ = 0;
frame->has_alpha_ = 0;
}
// Use the frame width/height as the canvas values for non-vp8x files.
@ -448,47 +448,26 @@ static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) {
dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
dmux->canvas_width_ = frame->width_;
dmux->canvas_height_ = frame->height_;
dmux->feature_flags_ |= has_vp8l_alpha ? ALPHA_FLAG : 0;
dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0;
}
if (!AddFrame(dmux, frame)) {
status = PARSE_ERROR; // last frame was left incomplete
} else {
image_added = 1;
dmux->num_frames_ = 1;
}
AddFrame(dmux, frame);
dmux->num_frames_ = 1;
} else {
free(frame);
}
if (!image_added) free(frame);
return status;
}
static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) {
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
MemBuffer* const mem = &dmux->mem_;
int anim_chunks = 0;
uint32_t vp8x_size;
ParseStatus status = PARSE_OK;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
dmux->is_ext_format_ = 1;
Skip(mem, TAG_SIZE); // VP8X
vp8x_size = ReadLE32(mem);
if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
if (vp8x_size < VP8X_CHUNK_SIZE) return PARSE_ERROR;
vp8x_size += vp8x_size & 1;
if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR;
if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA;
dmux->feature_flags_ = ReadByte(mem);
Skip(mem, 3); // Reserved.
dmux->canvas_width_ = 1 + ReadLE24s(mem);
dmux->canvas_height_ = 1 + ReadLE24s(mem);
if (dmux->canvas_width_ * (uint64_t)dmux->canvas_height_ >= MAX_IMAGE_AREA) {
return PARSE_ERROR; // image final dimension is too large
}
Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data.
dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
do {
int store_chunk = 1;
const size_t chunk_start_offset = mem->start_;
@ -507,7 +486,7 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
case MKFOURCC('V', 'P', '8', ' '):
case MKFOURCC('V', 'P', '8', 'L'): {
// check that this isn't an animation (all frames should be in an ANMF).
if (anim_chunks > 0) return PARSE_ERROR;
if (anim_chunks > 0 || is_animation) return PARSE_ERROR;
Rewind(mem, CHUNK_HEADER_SIZE);
status = ParseSingleImage(dmux);
@ -544,14 +523,14 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
store_chunk = !!(dmux->feature_flags_ & ICCP_FLAG);
goto Skip;
}
case MKFOURCC('X', 'M', 'P', ' '): {
store_chunk = !!(dmux->feature_flags_ & XMP_FLAG);
goto Skip;
}
case MKFOURCC('E', 'X', 'I', 'F'): {
store_chunk = !!(dmux->feature_flags_ & EXIF_FLAG);
goto Skip;
}
case MKFOURCC('X', 'M', 'P', ' '): {
store_chunk = !!(dmux->feature_flags_ & XMP_FLAG);
goto Skip;
}
Skip:
default: {
if (chunk_size_padded <= MemDataSize(mem)) {
@ -580,6 +559,37 @@ static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
return status;
}
static ParseStatus ParseVP8X(WebPDemuxer* const dmux) {
MemBuffer* const mem = &dmux->mem_;
uint32_t vp8x_size;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
dmux->is_ext_format_ = 1;
Skip(mem, TAG_SIZE); // VP8X
vp8x_size = ReadLE32(mem);
if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR;
if (vp8x_size < VP8X_CHUNK_SIZE) return PARSE_ERROR;
vp8x_size += vp8x_size & 1;
if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR;
if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA;
dmux->feature_flags_ = ReadByte(mem);
Skip(mem, 3); // Reserved.
dmux->canvas_width_ = 1 + ReadLE24s(mem);
dmux->canvas_height_ = 1 + ReadLE24s(mem);
if (dmux->canvas_width_ * (uint64_t)dmux->canvas_height_ >= MAX_IMAGE_AREA) {
return PARSE_ERROR; // image final dimension is too large
}
Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data.
dmux->state_ = WEBP_DEMUX_PARSED_HEADER;
if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR;
if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA;
return ParseVP8XChunks(dmux);
}
// -----------------------------------------------------------------------------
// Format validation
@ -594,18 +604,42 @@ static int IsValidSimpleFormat(const WebPDemuxer* const dmux) {
return 1;
}
// If 'exact' is true, check that the image resolution matches the canvas.
// If 'exact' is false, check that the x/y offsets do not exceed the canvas.
// TODO(jzern): this is insufficient in the fragmented image case if the
// expectation is that the fragments completely cover the canvas.
static int CheckFrameBounds(const Frame* const frame, int exact,
int canvas_width, int canvas_height) {
if (exact) {
if (frame->x_offset_ != 0 || frame->y_offset_ != 0) {
return 0;
}
if (frame->width_ != canvas_width || frame->height_ != canvas_height) {
return 0;
}
} else {
if (frame->x_offset_ < 0 || frame->y_offset_ < 0) return 0;
if (frame->width_ + frame->x_offset_ > canvas_width) return 0;
if (frame->height_ + frame->y_offset_ > canvas_height) return 0;
}
return 1;
}
static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
const int has_fragments = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
const int has_frames = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const Frame* f;
const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG);
const int is_fragmented = !!(dmux->feature_flags_ & FRAGMENTS_FLAG);
const Frame* f = dmux->frames_;
if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1;
if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0;
if (dmux->loop_count_ < 0) return 0;
if (dmux->state_ == WEBP_DEMUX_DONE && dmux->frames_ == NULL) return 0;
#ifndef WEBP_EXPERIMENTAL_FEATURES
if (is_fragmented) return 0;
#endif
for (f = dmux->frames_; f != NULL; f = f->next_) {
while (f != NULL) {
const int cur_frame_set = f->frame_num_;
int frame_count = 0, fragment_count = 0;
@ -615,9 +649,10 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
const ChunkData* const image = f->img_components_;
const ChunkData* const alpha = f->img_components_ + 1;
if (!has_fragments && f->is_fragment_) return 0;
if (!has_frames && f->frame_num_ > 1) return 0;
if (f->x_offset_ < 0 || f->y_offset_ < 0) return 0;
if (is_fragmented && !f->is_fragment_) return 0;
if (!is_fragmented && f->is_fragment_) return 0;
if (!is_animation && f->frame_num_ > 1) return 0;
if (f->complete_) {
if (alpha->size_ == 0 && image->size_ == 0) return 0;
// Ensure alpha precedes image bitstream.
@ -639,12 +674,17 @@ static int IsValidExtendedFormat(const WebPDemuxer* const dmux) {
if (f->next_ != NULL) return 0;
}
if (f->width_ > 0 && f->height_ > 0 &&
!CheckFrameBounds(f, !(is_animation || is_fragmented),
dmux->canvas_width_, dmux->canvas_height_)) {
return 0;
}
fragment_count += f->is_fragment_;
++frame_count;
}
if (!has_fragments && frame_count > 1) return 0;
if (!is_fragmented && frame_count > 1) return 0;
if (fragment_count > 0 && frame_count != fragment_count) return 0;
if (f == NULL) break;
}
return 1;
}
@ -659,6 +699,7 @@ static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) {
dmux->canvas_width_ = -1;
dmux->canvas_height_ = -1;
dmux->frames_tail_ = &dmux->frames_;
dmux->chunks_tail_ = &dmux->chunks_;
dmux->mem_ = *mem;
}
@ -670,11 +711,20 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
MemBuffer mem;
WebPDemuxer* dmux;
if (state != NULL) *state = WEBP_DEMUX_PARSE_ERROR;
if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DEMUX_ABI_VERSION)) return NULL;
if (data == NULL || data->bytes == NULL || data->size == 0) return NULL;
if (!InitMemBuffer(&mem, data->bytes, data->size)) return NULL;
if (!ReadHeader(&mem)) return NULL;
status = ReadHeader(&mem);
if (status != PARSE_OK) {
if (state != NULL) {
*state = (status == PARSE_NEED_MORE_DATA) ? WEBP_DEMUX_PARSING_HEADER
: WEBP_DEMUX_PARSE_ERROR;
}
return NULL;
}
partial = (mem.buf_size_ < mem.riff_end_);
if (!allow_partial && partial) return NULL;
@ -683,16 +733,18 @@ WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial,
if (dmux == NULL) return NULL;
InitDemux(dmux, &mem);
status = PARSE_ERROR;
for (parser = kMasterChunks; parser->parse != NULL; ++parser) {
if (!memcmp(parser->id, GetBuffer(&dmux->mem_), TAG_SIZE)) {
status = parser->parse(dmux);
if (status == PARSE_OK) dmux->state_ = WEBP_DEMUX_DONE;
if (status == PARSE_NEED_MORE_DATA && !partial) status = PARSE_ERROR;
if (status != PARSE_ERROR && !parser->valid(dmux)) status = PARSE_ERROR;
if (status == PARSE_ERROR) dmux->state_ = WEBP_DEMUX_PARSE_ERROR;
break;
}
}
if (state) *state = dmux->state_;
if (state != NULL) *state = dmux->state_;
if (status == PARSE_ERROR) {
WebPDemuxDelete(dmux);
@ -809,8 +861,10 @@ static int SynthesizeFrame(const WebPDemuxer* const dmux,
iter->y_offset = fragment->y_offset_;
iter->width = fragment->width_;
iter->height = fragment->height_;
iter->has_alpha = fragment->has_alpha_;
iter->duration = fragment->duration_;
iter->dispose_method = fragment->dispose_method_;
iter->blend_method = fragment->blend_method_;
iter->complete = fragment->complete_;
iter->fragment.bytes = payload;
iter->fragment.size = payload_size;
@ -946,6 +1000,3 @@ void WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter) {
(void)iter;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// CPU detection
@ -15,10 +17,6 @@
#include <cpu-features.h>
#endif
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// SSE2 detection.
//
@ -80,6 +78,3 @@ VP8CPUInfo VP8GetCPUInfo = armCPUInfo;
VP8CPUInfo VP8GetCPUInfo = NULL;
#endif
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical decoding functions.
@ -12,10 +14,6 @@
#include "./dsp.h"
#include "../dec/vp8i.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// run-time tables (~4k)
@ -59,6 +57,14 @@ static WEBP_INLINE uint8_t clip_8b(int v) {
#define STORE(x, y, v) \
dst[x + y * BPS] = clip_8b(dst[x + y * BPS] + ((v) >> 3))
#define STORE2(y, dc, d, c) do { \
const int DC = (dc); \
STORE(0, y, DC + (d)); \
STORE(1, y, DC + (c)); \
STORE(2, y, DC - (c)); \
STORE(3, y, DC - (d)); \
} while (0)
static const int kC1 = 20091 + (1 << 16);
static const int kC2 = 35468;
#define MUL(a, b) (((a) * (b)) >> 16)
@ -101,7 +107,21 @@ static void TransformOne(const int16_t* in, uint8_t* dst) {
dst += BPS;
}
}
// Simplified transform when only in[0], in[1] and in[4] are non-zero
static void TransformAC3(const int16_t* in, uint8_t* dst) {
const int a = in[0] + 4;
const int c4 = MUL(in[4], kC2);
const int d4 = MUL(in[4], kC1);
const int c1 = MUL(in[1], kC2);
const int d1 = MUL(in[1], kC1);
STORE2(0, a + d4, d1, c1);
STORE2(1, a + c4, d1, c1);
STORE2(2, a - c4, d1, c1);
STORE2(3, a - d4, d1, c1);
}
#undef MUL
#undef STORE2
static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
TransformOne(in, dst);
@ -677,6 +697,7 @@ static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
//------------------------------------------------------------------------------
VP8DecIdct2 VP8Transform;
VP8DecIdct VP8TransformAC3;
VP8DecIdct VP8TransformUV;
VP8DecIdct VP8TransformDC;
VP8DecIdct VP8TransformDCUV;
@ -704,6 +725,7 @@ void VP8DspInit(void) {
VP8TransformUV = TransformUV;
VP8TransformDC = TransformDC;
VP8TransformDCUV = TransformDCUV;
VP8TransformAC3 = TransformAC3;
VP8VFilter16 = VFilter16;
VP8HFilter16 = HFilter16;
@ -732,6 +754,3 @@ void VP8DspInit(void) {
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// ARM NEON version of dsp functions and loop filtering.
@ -12,15 +14,11 @@
#include "./dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_USE_NEON)
#include "../dec/vp8i.h"
#define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", \
#define QRegs "q0", "q1", "q2", "q3", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
#define FLIP_SIGN_BIT2(a, b, s) \
@ -99,9 +97,9 @@ static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) {
"vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
"vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
"vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
"vld1.u8 {q4}, [%[p]] \n" // q1
"vld1.u8 {q12}, [%[p]] \n" // q1
DO_FILTER2(q1, q2, q3, q4, %[thresh])
DO_FILTER2(q1, q2, q3, q12, %[thresh])
"sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
@ -120,18 +118,18 @@ static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) {
"add r5, r4, %[stride] \n" // base2 = base1 + stride
LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
LOAD8x4(d6, d7, d8, d9, [r4], [r5], r6)
"vswp d3, d6 \n" // p1:q1 p0:q3
"vswp d5, d8 \n" // q0:q2 q1:q4
"vswp q2, q3 \n" // p1:q1 p0:q2 q0:q3 q1:q4
LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
"vswp d3, d24 \n" // p1:q1 p0:q3
"vswp d5, d26 \n" // q0:q2 q1:q4
"vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
DO_FILTER2(q1, q2, q3, q4, %[thresh])
DO_FILTER2(q1, q2, q12, q13, %[thresh])
"sub %[p], %[p], #1 \n" // p - 1
"vswp d5, d6 \n"
"vswp d5, d24 \n"
STORE8x2(d4, d5, [%[p]], %[stride])
STORE8x2(d6, d7, [%[p]], %[stride])
STORE8x2(d24, d25, [%[p]], %[stride])
: [p] "+r"(p)
: [stride] "r"(stride), [thresh] "r"(thresh)
@ -158,7 +156,7 @@ static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
//-----------------------------------------------------------------------------
// Inverse transforms (Paragraph 14.4)
static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
static void TransformOne(const int16_t* in, uint8_t* dst) {
const int kBPS = BPS;
const int16_t constants[] = {20091, 17734, 0, 0};
/* kC1, kC2. Padded because vld1.16 loads 8 bytes
@ -307,13 +305,44 @@ static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
);
}
static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) {
TransformOneNEON(in, dst);
static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
TransformOne(in, dst);
if (do_two) {
TransformOneNEON(in + 16, dst + 4);
TransformOne(in + 16, dst + 4);
}
}
static void TransformDC(const int16_t* in, uint8_t* dst) {
const int DC = (in[0] + 4) >> 3;
const int kBPS = BPS;
__asm__ volatile (
"vdup.16 q1, %[DC] \n"
"vld1.32 d0[0], [%[dst]], %[kBPS] \n"
"vld1.32 d1[0], [%[dst]], %[kBPS] \n"
"vld1.32 d0[1], [%[dst]], %[kBPS] \n"
"vld1.32 d1[1], [%[dst]], %[kBPS] \n"
"sub %[dst], %[dst], %[kBPS], lsl #2 \n"
// add DC and convert to s16.
"vaddw.u8 q2, q1, d0 \n"
"vaddw.u8 q3, q1, d1 \n"
// convert back to u8 with saturation
"vqmovun.s16 d0, q2 \n"
"vqmovun.s16 d1, q3 \n"
"vst1.32 d0[0], [%[dst]], %[kBPS] \n"
"vst1.32 d1[0], [%[dst]], %[kBPS] \n"
"vst1.32 d0[1], [%[dst]], %[kBPS] \n"
"vst1.32 d1[1], [%[dst]] \n"
: [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
: [kBPS] "r"(kBPS), /* constants */
[DC] "r"(DC)
: "memory", "q0", "q1", "q2", "q3" /* clobbered */
);
}
static void TransformWHT(const int16_t* in, int16_t* out) {
const int kStep = 32; // The store is only incrementing the pointer as if we
// had stored a single byte.
@ -322,39 +351,39 @@ static void TransformWHT(const int16_t* in, int16_t* out) {
// load data into q0, q1
"vld1.16 {q0, q1}, [%[in]] \n"
"vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
"vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
"vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
"vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
"vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
"vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
"vsubl.s16 q10, d1, d2 \n" // a2 = in[4] - in[8]
"vsubl.s16 q11, d0, d3 \n" // a3 = in[0] - in[12]
"vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
"vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
"vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
"vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
"vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
"vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
"vadd.s32 q1, q11, q10 \n" // tmp[4] = a3 + a2
"vsub.s32 q3, q11, q10 \n" // tmp[12] = a3 - a2
// Transpose
// q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
// q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
"vswp d1, d4 \n" // vtrn.64 q0, q2
"vswp d3, d6 \n" // vtrn.64 q1, q3
"vswp d1, d4 \n" // vtrn.64 q0, q2
"vswp d3, d6 \n" // vtrn.64 q1, q3
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vmov.s32 q4, #3 \n" // dc = 3
"vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
"vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
"vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
"vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
"vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
"vmov.s32 q10, #3 \n" // dc = 3
"vadd.s32 q0, q0, q10 \n" // dc = tmp[0] + 3
"vadd.s32 q12, q0, q3 \n" // a0 = dc + tmp[3]
"vadd.s32 q13, q1, q2 \n" // a1 = tmp[1] + tmp[2]
"vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
"vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
"vadd.s32 q0, q6, q7 \n"
"vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
"vadd.s32 q0, q12, q13 \n"
"vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
"vadd.s32 q1, q9, q8 \n"
"vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
"vsub.s32 q2, q6, q7 \n"
"vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
"vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
"vsub.s32 q2, q12, q13 \n"
"vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
"vsub.s32 q3, q9, q8 \n"
"vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
"vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
// set the results to output
"vst1.16 d0[0], [%[out]], %[kStep] \n"
@ -376,8 +405,8 @@ static void TransformWHT(const int16_t* in, int16_t* out) {
: [out] "+r"(out) // modified registers
: [in] "r"(in), [kStep] "r"(kStep) // constants
: "memory", "q0", "q1", "q2", "q3", "q4",
"q5", "q6", "q7", "q8", "q9" // clobbered
: "memory", "q0", "q1", "q2", "q3",
"q8", "q9", "q10", "q11", "q12", "q13" // clobbered
);
}
@ -390,7 +419,9 @@ extern void VP8DspInitNEON(void);
void VP8DspInitNEON(void) {
#if defined(WEBP_USE_NEON)
VP8Transform = TransformTwoNEON;
VP8Transform = TransformTwo;
VP8TransformAC3 = TransformOne; // no special code here
VP8TransformDC = TransformDC;
VP8TransformWHT = TransformWHT;
VP8SimpleVFilter16 = SimpleVFilter16NEON;
@ -400,6 +431,3 @@ void VP8DspInitNEON(void) {
#endif // WEBP_USE_NEON
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// SSE2 version of some decoding functions (idct, loop filtering).
@ -12,12 +14,12 @@
#include "./dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_USE_SSE2)
// The 3-coeff sparse transform in SSE2 is not really faster than the plain-C
// one it seems => disable it by default. Uncomment the following to enable:
// #define USE_TRANSFORM_AC3
#include <emmintrin.h>
#include "../dec/vp8i.h"
@ -199,16 +201,16 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
__m128i dst0, dst1, dst2, dst3;
if (do_two) {
// Load eight bytes/pixels per line.
dst0 = _mm_loadl_epi64((__m128i*)&dst[0 * BPS]);
dst1 = _mm_loadl_epi64((__m128i*)&dst[1 * BPS]);
dst2 = _mm_loadl_epi64((__m128i*)&dst[2 * BPS]);
dst3 = _mm_loadl_epi64((__m128i*)&dst[3 * BPS]);
dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS));
dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS));
dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS));
dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS));
} else {
// Load four bytes/pixels per line.
dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]);
dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]);
dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]);
dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]);
dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS));
dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS));
dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS));
dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS));
}
// Convert to 16b.
dst0 = _mm_unpacklo_epi8(dst0, zero);
@ -228,20 +230,66 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Store the results.
if (do_two) {
// Store eight bytes/pixels per line.
_mm_storel_epi64((__m128i*)&dst[0 * BPS], dst0);
_mm_storel_epi64((__m128i*)&dst[1 * BPS], dst1);
_mm_storel_epi64((__m128i*)&dst[2 * BPS], dst2);
_mm_storel_epi64((__m128i*)&dst[3 * BPS], dst3);
_mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0);
_mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1);
_mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2);
_mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3);
} else {
// Store four bytes/pixels per line.
*((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0);
*((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1);
*((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2);
*((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3);
*(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0);
*(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1);
*(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2);
*(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3);
}
}
}
#if defined(USE_TRANSFORM_AC3)
#define MUL(a, b) (((a) * (b)) >> 16)
static void TransformAC3SSE2(const int16_t* in, uint8_t* dst) {
static const int kC1 = 20091 + (1 << 16);
static const int kC2 = 35468;
const __m128i A = _mm_set1_epi16(in[0] + 4);
const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2));
const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1));
const int c1 = MUL(in[1], kC2);
const int d1 = MUL(in[1], kC1);
const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1);
const __m128i B = _mm_adds_epi16(A, CD);
const __m128i m0 = _mm_adds_epi16(B, d4);
const __m128i m1 = _mm_adds_epi16(B, c4);
const __m128i m2 = _mm_subs_epi16(B, c4);
const __m128i m3 = _mm_subs_epi16(B, d4);
const __m128i zero = _mm_setzero_si128();
// Load the source pixels.
__m128i dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS));
__m128i dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS));
__m128i dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS));
__m128i dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS));
// Convert to 16b.
dst0 = _mm_unpacklo_epi8(dst0, zero);
dst1 = _mm_unpacklo_epi8(dst1, zero);
dst2 = _mm_unpacklo_epi8(dst2, zero);
dst3 = _mm_unpacklo_epi8(dst3, zero);
// Add the inverse transform.
dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3));
dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3));
dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3));
dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3));
// Unsigned saturate to 8b.
dst0 = _mm_packus_epi16(dst0, dst0);
dst1 = _mm_packus_epi16(dst1, dst1);
dst2 = _mm_packus_epi16(dst2, dst2);
dst3 = _mm_packus_epi16(dst3, dst3);
// Store the results.
*(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0);
*(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1);
*(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2);
*(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3);
}
#undef MUL
#endif // USE_TRANSFORM_AC3
//------------------------------------------------------------------------------
// Loop Filter (Paragraph 15)
@ -886,6 +934,9 @@ extern void VP8DspInitSSE2(void);
void VP8DspInitSSE2(void) {
#if defined(WEBP_USE_SSE2)
VP8Transform = TransformSSE2;
#if defined(USE_TRANSFORM_AC3)
VP8TransformAC3 = TransformAC3SSE2;
#endif
VP8VFilter16 = VFilter16SSE2;
VP8HFilter16 = HFilter16SSE2;
@ -903,6 +954,3 @@ void VP8DspInitSSE2(void) {
#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical functions.
@ -14,14 +16,15 @@
#include "../webp/types.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
// CPU detection
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
#if defined(_MSC_VER) && _MSC_VER > 1310 && \
(defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets
#endif
@ -83,6 +86,11 @@ typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
int n, const struct VP8Matrix* const mtx);
extern VP8QuantizeBlock VP8EncQuantizeBlock;
// specific to 2nd transform:
typedef int (*VP8QuantizeBlockWHT)(int16_t in[16], int16_t out[16],
const struct VP8Matrix* const mtx);
extern VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT;
// Collect histogram for susceptibility calculation and accumulate in histo[].
struct VP8Histogram;
typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
@ -100,6 +108,7 @@ typedef void (*VP8DecIdct)(const int16_t* coeffs, uint8_t* dst);
// when doing two transforms, coeffs is actually int16_t[2][16].
typedef void (*VP8DecIdct2)(const int16_t* coeffs, uint8_t* dst, int do_two);
extern VP8DecIdct2 VP8Transform;
extern VP8DecIdct VP8TransformAC3;
extern VP8DecIdct VP8TransformUV;
extern VP8DecIdct VP8TransformDC;
extern VP8DecIdct VP8TransformDCUV;
@ -144,6 +153,8 @@ void VP8DspInit(void);
#define FANCY_UPSAMPLING // undefined to remove fancy upsampling support
// Convert a pair of y/u/v lines together to the output rgb/a colorspace.
// bottom_y can be NULL if only one line of output is needed (at top/bottom).
typedef void (*WebPUpsampleLinePairFunc)(
const uint8_t* top_y, const uint8_t* bottom_y,
const uint8_t* top_u, const uint8_t* top_v,
@ -206,7 +217,7 @@ void WebPInitPremultiplyNEON(void);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,22 +1,22 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Speed-critical encoding functions.
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h> // for abs()
#include "./dsp.h"
#include "../enc/vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static WEBP_INLINE uint8_t clip_8b(int v) {
return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
}
@ -142,9 +142,9 @@ static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
const int a1 = (d1 + d2);
const int a2 = (d1 - d2);
const int a3 = (d0 - d3);
tmp[0 + i * 4] = (a0 + a1) << 3; // 14b [-8160,8160]
tmp[0 + i * 4] = (a0 + a1) * 8; // 14b [-8160,8160]
tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542]
tmp[2 + i * 4] = (a0 - a1) << 3;
tmp[2 + i * 4] = (a0 - a1) * 8;
tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9;
}
for (i = 0; i < 4; ++i) {
@ -187,31 +187,32 @@ static void ITransformWHT(const int16_t* in, int16_t* out) {
}
static void FTransformWHT(const int16_t* in, int16_t* out) {
int tmp[16];
// input is 12b signed
int32_t tmp[16];
int i;
for (i = 0; i < 4; ++i, in += 64) {
const int a0 = (in[0 * 16] + in[2 * 16]) << 2;
const int a1 = (in[1 * 16] + in[3 * 16]) << 2;
const int a2 = (in[1 * 16] - in[3 * 16]) << 2;
const int a3 = (in[0 * 16] - in[2 * 16]) << 2;
tmp[0 + i * 4] = (a0 + a1) + (a0 != 0);
const int a0 = (in[0 * 16] + in[2 * 16]); // 13b
const int a1 = (in[1 * 16] + in[3 * 16]);
const int a2 = (in[1 * 16] - in[3 * 16]);
const int a3 = (in[0 * 16] - in[2 * 16]);
tmp[0 + i * 4] = a0 + a1; // 14b
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
for (i = 0; i < 4; ++i) {
const int a0 = (tmp[0 + i] + tmp[8 + i]);
const int a0 = (tmp[0 + i] + tmp[8 + i]); // 15b
const int a1 = (tmp[4 + i] + tmp[12+ i]);
const int a2 = (tmp[4 + i] - tmp[12+ i]);
const int a3 = (tmp[0 + i] - tmp[8 + i]);
const int b0 = a0 + a1;
const int b0 = a0 + a1; // 16b
const int b1 = a3 + a2;
const int b2 = a3 - a2;
const int b3 = a0 - a1;
out[ 0 + i] = (b0 + (b0 > 0) + 3) >> 3;
out[ 4 + i] = (b1 + (b1 > 0) + 3) >> 3;
out[ 8 + i] = (b2 + (b2 > 0) + 3) >> 3;
out[12 + i] = (b3 + (b3 > 0) + 3) >> 3;
out[ 0 + i] = b0 >> 1; // 15b
out[ 4 + i] = b1 >> 1;
out[ 8 + i] = b2 >> 1;
out[12 + i] = b3 >> 1;
}
}
@ -649,6 +650,31 @@ static int QuantizeBlock(int16_t in[16], int16_t out[16],
return (last >= 0);
}
static int QuantizeBlockWHT(int16_t in[16], int16_t out[16],
const VP8Matrix* const mtx) {
int n, last = -1;
for (n = 0; n < 16; ++n) {
const int j = kZigzag[n];
const int sign = (in[j] < 0);
const int coeff = sign ? -in[j] : in[j];
assert(mtx->sharpen_[j] == 0);
if (coeff > mtx->zthresh_[j]) {
const int Q = mtx->q_[j];
const int iQ = mtx->iq_[j];
const int B = mtx->bias_[j];
out[n] = QUANTDIV(coeff, iQ, B);
if (out[n] > MAX_LEVEL) out[n] = MAX_LEVEL;
if (sign) out[n] = -out[n];
in[j] = out[n] * Q;
if (out[n]) last = n;
} else {
out[n] = 0;
in[j] = 0;
}
}
return (last >= 0);
}
//------------------------------------------------------------------------------
// Block copy
@ -683,6 +709,7 @@ VP8Metric VP8SSE4x4;
VP8WMetric VP8TDisto4x4;
VP8WMetric VP8TDisto16x16;
VP8QuantizeBlock VP8EncQuantizeBlock;
VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT;
VP8BlockCopy VP8Copy4x4;
extern void VP8EncDspInitSSE2(void);
@ -707,6 +734,7 @@ void VP8EncDspInit(void) {
VP8TDisto4x4 = Disto4x4;
VP8TDisto16x16 = Disto16x16;
VP8EncQuantizeBlock = QuantizeBlock;
VP8EncQuantizeBlockWHT = QuantizeBlockWHT;
VP8Copy4x4 = Copy4x4;
// If defined, use CPUInfo() to overwrite some pointers with faster versions.
@ -723,6 +751,3 @@ void VP8EncDspInit(void) {
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// ARM NEON version of speed-critical encoding functions.
@ -11,10 +13,6 @@
#include "./dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_USE_NEON)
#include "../enc/vp8enci.h"
@ -322,7 +320,7 @@ static void FTransform(const uint8_t* src, const uint8_t* ref,
"vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000
"vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000
"vmvn.s16 d4, d4 \n"
"vmvn d4, d4 \n" // !(d1 == 0)
// op[4] = (c1*2217 + d1*5352 + 12000)>>16
"vshrn.s32 d1, q11, #16 \n"
// op[4] += (d1!=0)
@ -363,19 +361,12 @@ static void FTransformWHT(const int16_t* in, int16_t* out) {
"vld1.16 d2[3], [%[in]], %[kStep] \n"
"vld1.16 d3[3], [%[in]], %[kStep] \n"
"vaddl.s16 q2, d0, d2 \n"
"vshl.s32 q2, q2, #2 \n" // a0=(in[0*16]+in[2*16])<<2
"vaddl.s16 q3, d1, d3 \n"
"vshl.s32 q3, q3, #2 \n" // a1=(in[1*16]+in[3*16])<<2
"vsubl.s16 q4, d1, d3 \n"
"vshl.s32 q4, q4, #2 \n" // a2=(in[1*16]-in[3*16])<<2
"vsubl.s16 q5, d0, d2 \n"
"vshl.s32 q5, q5, #2 \n" // a3=(in[0*16]-in[2*16])<<2
"vaddl.s16 q2, d0, d2 \n" // a0=(in[0*16]+in[2*16])
"vaddl.s16 q3, d1, d3 \n" // a1=(in[1*16]+in[3*16])
"vsubl.s16 q4, d1, d3 \n" // a2=(in[1*16]-in[3*16])
"vsubl.s16 q5, d0, d2 \n" // a3=(in[0*16]-in[2*16])
"vceq.s32 q10, q2, #0 \n"
"vmvn.s32 q10, q10 \n" // (a0 != 0)
"vqadd.s32 q6, q2, q3 \n" // (a0 + a1)
"vqsub.s32 q6, q6, q10 \n" // (a0 + a1) + (a0 != 0)
"vqadd.s32 q6, q2, q3 \n" // a0 + a1
"vqadd.s32 q7, q5, q4 \n" // a3 + a2
"vqsub.s32 q8, q5, q4 \n" // a3 - a2
"vqsub.s32 q9, q2, q3 \n" // a0 - a1
@ -398,27 +389,10 @@ static void FTransformWHT(const int16_t* in, int16_t* out) {
"vqsub.s32 q6, q3, q2 \n" // b2 = a3 - a2
"vqsub.s32 q7, q0, q1 \n" // b3 = a0 - a1
"vmov.s32 q0, #3 \n" // q0 = 3
"vcgt.s32 q1, q4, #0 \n" // (b0>0)
"vqsub.s32 q2, q4, q1 \n" // (b0+(b0>0))
"vqadd.s32 q3, q2, q0 \n" // (b0+(b0>0)+3)
"vshrn.s32 d18, q3, #3 \n" // (b0+(b0>0)+3) >> 3
"vcgt.s32 q1, q5, #0 \n" // (b1>0)
"vqsub.s32 q2, q5, q1 \n" // (b1+(b1>0))
"vqadd.s32 q3, q2, q0 \n" // (b1+(b1>0)+3)
"vshrn.s32 d19, q3, #3 \n" // (b1+(b1>0)+3) >> 3
"vcgt.s32 q1, q6, #0 \n" // (b2>0)
"vqsub.s32 q2, q6, q1 \n" // (b2+(b2>0))
"vqadd.s32 q3, q2, q0 \n" // (b2+(b2>0)+3)
"vshrn.s32 d20, q3, #3 \n" // (b2+(b2>0)+3) >> 3
"vcgt.s32 q1, q7, #0 \n" // (b3>0)
"vqsub.s32 q2, q7, q1 \n" // (b3+(b3>0))
"vqadd.s32 q3, q2, q0 \n" // (b3+(b3>0)+3)
"vshrn.s32 d21, q3, #3 \n" // (b3+(b3>0)+3) >> 3
"vshrn.s32 d18, q4, #1 \n" // b0 >> 1
"vshrn.s32 d19, q5, #1 \n" // b1 >> 1
"vshrn.s32 d20, q6, #1 \n" // b2 >> 1
"vshrn.s32 d21, q7, #1 \n" // b3 >> 1
"vst1.16 {q9, q10}, [%[out]] \n"
@ -515,7 +489,7 @@ static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
// q12/14 tmp[12-15]
// These are still in 01 45 23 67 order. We fix it easily in the addition
// case but the subtraction propegates them.
// case but the subtraction propagates them.
"vswp d3, d27 \n"
"vswp d19, d31 \n"
@ -656,6 +630,3 @@ void VP8EncDspInitNEON(void) {
#endif // WEBP_USE_NEON
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// SSE2 version of speed-critical encoding functions.
@ -11,10 +13,6 @@
#include "./dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_USE_SSE2)
#include <stdlib.h> // for abs()
#include <emmintrin.h>
@ -453,6 +451,39 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
}
}
static void FTransformWHTSSE2(const int16_t* in, int16_t* out) {
int32_t tmp[16];
int i;
for (i = 0; i < 4; ++i, in += 64) {
const int a0 = (in[0 * 16] + in[2 * 16]);
const int a1 = (in[1 * 16] + in[3 * 16]);
const int a2 = (in[1 * 16] - in[3 * 16]);
const int a3 = (in[0 * 16] - in[2 * 16]);
tmp[0 + i * 4] = a0 + a1;
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
{
const __m128i src0 = _mm_loadu_si128((__m128i*)&tmp[0]);
const __m128i src1 = _mm_loadu_si128((__m128i*)&tmp[4]);
const __m128i src2 = _mm_loadu_si128((__m128i*)&tmp[8]);
const __m128i src3 = _mm_loadu_si128((__m128i*)&tmp[12]);
const __m128i a0 = _mm_add_epi32(src0, src2);
const __m128i a1 = _mm_add_epi32(src1, src3);
const __m128i a2 = _mm_sub_epi32(src1, src3);
const __m128i a3 = _mm_sub_epi32(src0, src2);
const __m128i b0 = _mm_srai_epi32(_mm_add_epi32(a0, a1), 1);
const __m128i b1 = _mm_srai_epi32(_mm_add_epi32(a3, a2), 1);
const __m128i b2 = _mm_srai_epi32(_mm_sub_epi32(a3, a2), 1);
const __m128i b3 = _mm_srai_epi32(_mm_sub_epi32(a0, a1), 1);
const __m128i out0 = _mm_packs_epi32(b0, b1);
const __m128i out1 = _mm_packs_epi32(b2, b3);
_mm_storeu_si128((__m128i*)&out[0], out0);
_mm_storeu_si128((__m128i*)&out[8], out1);
}
}
//------------------------------------------------------------------------------
// Metric
@ -609,7 +640,7 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
__m128i tmp_0, tmp_1, tmp_2, tmp_3;
const __m128i zero = _mm_setzero_si128();
// Load, combine and tranpose inputs.
// Load, combine and transpose inputs.
{
const __m128i inA_0 = _mm_loadl_epi64((__m128i*)&inA[BPS * 0]);
const __m128i inA_1 = _mm_loadl_epi64((__m128i*)&inA[BPS * 1]);
@ -795,8 +826,6 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
const __m128i bias8 = _mm_loadu_si128((__m128i*)&mtx->bias_[8]);
const __m128i q0 = _mm_loadu_si128((__m128i*)&mtx->q_[0]);
const __m128i q8 = _mm_loadu_si128((__m128i*)&mtx->q_[8]);
const __m128i zthresh0 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[0]);
const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]);
// sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative)
const __m128i sign0 = _mm_srai_epi16(in0, 15);
@ -859,17 +888,8 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
in0 = _mm_mullo_epi16(out0, q0);
in8 = _mm_mullo_epi16(out8, q8);
// if (coeff <= mtx->zthresh_) {in=0; out=0;}
{
__m128i cmp0 = _mm_cmpgt_epi16(coeff0, zthresh0);
__m128i cmp8 = _mm_cmpgt_epi16(coeff8, zthresh8);
in0 = _mm_and_si128(in0, cmp0);
in8 = _mm_and_si128(in8, cmp8);
_mm_storeu_si128((__m128i*)&in[0], in0);
_mm_storeu_si128((__m128i*)&in[8], in8);
out0 = _mm_and_si128(out0, cmp0);
out8 = _mm_and_si128(out8, cmp8);
}
_mm_storeu_si128((__m128i*)&in[0], in0);
_mm_storeu_si128((__m128i*)&in[8], in8);
// zigzag the output before storing it.
//
@ -906,6 +926,11 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
}
}
static int QuantizeBlockWHTSSE2(int16_t in[16], int16_t out[16],
const VP8Matrix* const mtx) {
return QuantizeBlockSSE2(in, out, 0, mtx);
}
#endif // WEBP_USE_SSE2
//------------------------------------------------------------------------------
@ -917,8 +942,10 @@ void VP8EncDspInitSSE2(void) {
#if defined(WEBP_USE_SSE2)
VP8CollectHistogram = CollectHistogramSSE2;
VP8EncQuantizeBlock = QuantizeBlockSSE2;
VP8EncQuantizeBlockWHT = QuantizeBlockWHTSSE2;
VP8ITransform = ITransformSSE2;
VP8FTransform = FTransformSSE2;
VP8FTransformWHT = FTransformWHTSSE2;
VP8SSE16x16 = SSE16x16SSE2;
VP8SSE16x8 = SSE16x8SSE2;
VP8SSE8x8 = SSE8x8SSE2;
@ -928,6 +955,3 @@ void VP8EncDspInitSSE2(void) {
#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Image transforms and color space conversion methods for lossless decoder.
@ -13,14 +15,7 @@
#include "./dsp.h"
// Define the following if target arch is sure to have SSE2
// #define WEBP_TARGET_HAS_SSE2
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_TARGET_HAS_SSE2)
#if defined(WEBP_USE_SSE2)
#include <emmintrin.h>
#endif
@ -233,6 +228,109 @@ const float kSLog2Table[LOG_LOOKUP_IDX_MAX] = {
2010.27454072f, 2019.69737440f, 2029.12591044f, 2038.56012640f
};
const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX] = {
{ 0, 0}, { 0, 0}, { 1, 0}, { 2, 0}, { 3, 0}, { 4, 1}, { 4, 1}, { 5, 1},
{ 5, 1}, { 6, 2}, { 6, 2}, { 6, 2}, { 6, 2}, { 7, 2}, { 7, 2}, { 7, 2},
{ 7, 2}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3},
{ 8, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3},
{ 9, 3}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},
{10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},
{10, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4},
{11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4},
{11, 4}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
{12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
{12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
{12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
{12, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
{13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
{13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
{13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
{13, 5}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
{14, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
{15, 6}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
{16, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
{17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
};
const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX] = {
0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3,
0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
127,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126
};
float VP8LFastSLog2Slow(int v) {
assert(v >= LOG_LOOKUP_IDX_MAX);
if (v < APPROX_LOG_MAX) {
@ -285,61 +383,6 @@ static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1,
return Average2(Average2(a0, a1), Average2(a2, a3));
}
#if defined(WEBP_TARGET_HAS_SSE2)
static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1,
uint32_t c2) {
const __m128i zero = _mm_setzero_si128();
const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero);
const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero);
const __m128i C2 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
const __m128i V1 = _mm_add_epi16(C0, C1);
const __m128i V2 = _mm_sub_epi16(V1, C2);
const __m128i b = _mm_packus_epi16(V2, V2);
const uint32_t output = _mm_cvtsi128_si32(b);
return output;
}
static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1,
uint32_t c2) {
const uint32_t ave = Average2(c0, c1);
const __m128i zero = _mm_setzero_si128();
const __m128i A0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(ave), zero);
const __m128i B0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
const __m128i A1 = _mm_sub_epi16(A0, B0);
const __m128i BgtA = _mm_cmpgt_epi16(B0, A0);
const __m128i A2 = _mm_sub_epi16(A1, BgtA);
const __m128i A3 = _mm_srai_epi16(A2, 1);
const __m128i A4 = _mm_add_epi16(A0, A3);
const __m128i A5 = _mm_packus_epi16(A4, A4);
const uint32_t output = _mm_cvtsi128_si32(A5);
return output;
}
static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
int pa_minus_pb;
const __m128i zero = _mm_setzero_si128();
const __m128i A0 = _mm_cvtsi32_si128(a);
const __m128i B0 = _mm_cvtsi32_si128(b);
const __m128i C0 = _mm_cvtsi32_si128(c);
const __m128i AC0 = _mm_subs_epu8(A0, C0);
const __m128i CA0 = _mm_subs_epu8(C0, A0);
const __m128i BC0 = _mm_subs_epu8(B0, C0);
const __m128i CB0 = _mm_subs_epu8(C0, B0);
const __m128i AC = _mm_or_si128(AC0, CA0);
const __m128i BC = _mm_or_si128(BC0, CB0);
const __m128i pa = _mm_unpacklo_epi8(AC, zero); // |a - c|
const __m128i pb = _mm_unpacklo_epi8(BC, zero); // |b - c|
const __m128i diff = _mm_sub_epi16(pb, pa);
{
int16_t out[8];
_mm_storeu_si128((__m128i*)out, diff);
pa_minus_pb = out[0] + out[1] + out[2] + out[3];
}
return (pa_minus_pb <= 0) ? a : b;
}
#else
static WEBP_INLINE uint32_t Clip255(uint32_t a) {
if (a < 256) {
return a;
@ -394,7 +437,6 @@ static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff);
return (pa_minus_pb <= 0) ? a : b;
}
#endif
//------------------------------------------------------------------------------
// Predictors
@ -447,18 +489,19 @@ static uint32_t Predictor10(uint32_t left, const uint32_t* const top) {
return pred;
}
static uint32_t Predictor11(uint32_t left, const uint32_t* const top) {
const uint32_t pred = Select(top[0], left, top[-1]);
const uint32_t pred = VP8LSelect(top[0], left, top[-1]);
return pred;
}
static uint32_t Predictor12(uint32_t left, const uint32_t* const top) {
const uint32_t pred = ClampedAddSubtractFull(left, top[0], top[-1]);
const uint32_t pred = VP8LClampedAddSubtractFull(left, top[0], top[-1]);
return pred;
}
static uint32_t Predictor13(uint32_t left, const uint32_t* const top) {
const uint32_t pred = ClampedAddSubtractHalf(left, top[0], top[-1]);
const uint32_t pred = VP8LClampedAddSubtractHalf(left, top[0], top[-1]);
return pred;
}
// TODO(vikasa): Export the predictor array, to allow SSE2 variants.
typedef uint32_t (*PredictorFunc)(uint32_t left, const uint32_t* const top);
static const PredictorFunc kPredictors[16] = {
Predictor0, Predictor1, Predictor2, Predictor3,
@ -714,21 +757,8 @@ static void PredictorInverseTransform(const VP8LTransform* const transform,
}
}
void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
static void SubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
int i = 0;
#if defined(WEBP_TARGET_HAS_SSE2)
const __m128i mask = _mm_set1_epi32(0x0000ff00);
for (; i + 4 < num_pixs; i += 4) {
const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]);
const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
const __m128i out = _mm_sub_epi8(in, in_0g0g);
_mm_storeu_si128((__m128i*)&argb_data[i], out);
}
// fallthrough and finish off with plain-C
#endif
for (; i < num_pixs; ++i) {
const uint32_t argb = argb_data[i];
const uint32_t green = (argb >> 8) & 0xff;
@ -740,23 +770,7 @@ void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
// Add green to blue and red channels (i.e. perform the inverse transform of
// 'subtract green').
static void AddGreenToBlueAndRed(const VP8LTransform* const transform,
int y_start, int y_end, uint32_t* data) {
const int width = transform->xsize_;
const uint32_t* const data_end = data + (y_end - y_start) * width;
#if defined(WEBP_TARGET_HAS_SSE2)
const __m128i mask = _mm_set1_epi32(0x0000ff00);
for (; data + 4 < data_end; data += 4) {
const __m128i in = _mm_loadu_si128((__m128i*)data);
const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
const __m128i out = _mm_add_epi8(in, in_0g0g);
_mm_storeu_si128((__m128i*)data, out);
}
// fallthrough and finish off with plain-C
#endif
static void AddGreenToBlueAndRed(uint32_t* data, const uint32_t* data_end) {
while (data < data_end) {
const uint32_t argb = *data;
const uint32_t green = ((argb >> 8) & 0xff);
@ -1093,54 +1107,79 @@ static void ColorSpaceInverseTransform(const VP8LTransform* const transform,
}
// Separate out pixels packed together using pixel-bundling.
static void ColorIndexInverseTransform(
const VP8LTransform* const transform,
int y_start, int y_end, const uint32_t* src, uint32_t* dst) {
int y;
const int bits_per_pixel = 8 >> transform->bits_;
const int width = transform->xsize_;
const uint32_t* const color_map = transform->data_;
if (bits_per_pixel < 8) {
const int pixels_per_byte = 1 << transform->bits_;
const int count_mask = pixels_per_byte - 1;
const uint32_t bit_mask = (1 << bits_per_pixel) - 1;
for (y = y_start; y < y_end; ++y) {
uint32_t packed_pixels = 0;
int x;
for (x = 0; x < width; ++x) {
// We need to load fresh 'packed_pixels' once every 'pixels_per_byte'
// increments of x. Fortunately, pixels_per_byte is a power of 2, so
// can just use a mask for that, instead of decrementing a counter.
if ((x & count_mask) == 0) packed_pixels = ((*src++) >> 8) & 0xff;
*dst++ = color_map[packed_pixels & bit_mask];
packed_pixels >>= bits_per_pixel;
}
}
} else {
for (y = y_start; y < y_end; ++y) {
int x;
for (x = 0; x < width; ++x) {
*dst++ = color_map[((*src++) >> 8) & 0xff];
}
}
}
// We define two methods for ARGB data (uint32_t) and alpha-only data (uint8_t).
#define COLOR_INDEX_INVERSE(FUNC_NAME, TYPE, GET_INDEX, GET_VALUE) \
void FUNC_NAME(const VP8LTransform* const transform, \
int y_start, int y_end, const TYPE* src, TYPE* dst) { \
int y; \
const int bits_per_pixel = 8 >> transform->bits_; \
const int width = transform->xsize_; \
const uint32_t* const color_map = transform->data_; \
if (bits_per_pixel < 8) { \
const int pixels_per_byte = 1 << transform->bits_; \
const int count_mask = pixels_per_byte - 1; \
const uint32_t bit_mask = (1 << bits_per_pixel) - 1; \
for (y = y_start; y < y_end; ++y) { \
uint32_t packed_pixels = 0; \
int x; \
for (x = 0; x < width; ++x) { \
/* We need to load fresh 'packed_pixels' once every */ \
/* 'pixels_per_byte' increments of x. Fortunately, pixels_per_byte */ \
/* is a power of 2, so can just use a mask for that, instead of */ \
/* decrementing a counter. */ \
if ((x & count_mask) == 0) packed_pixels = GET_INDEX(*src++); \
*dst++ = GET_VALUE(color_map[packed_pixels & bit_mask]); \
packed_pixels >>= bits_per_pixel; \
} \
} \
} else { \
for (y = y_start; y < y_end; ++y) { \
int x; \
for (x = 0; x < width; ++x) { \
*dst++ = GET_VALUE(color_map[GET_INDEX(*src++)]); \
} \
} \
} \
}
static WEBP_INLINE uint32_t GetARGBIndex(uint32_t idx) {
return (idx >> 8) & 0xff;
}
static WEBP_INLINE uint8_t GetAlphaIndex(uint8_t idx) {
return idx;
}
static WEBP_INLINE uint32_t GetARGBValue(uint32_t val) {
return val;
}
static WEBP_INLINE uint8_t GetAlphaValue(uint32_t val) {
return (val >> 8) & 0xff;
}
static COLOR_INDEX_INVERSE(ColorIndexInverseTransform, uint32_t, GetARGBIndex,
GetARGBValue)
COLOR_INDEX_INVERSE(VP8LColorIndexInverseTransformAlpha, uint8_t, GetAlphaIndex,
GetAlphaValue)
#undef COLOR_INDEX_INVERSE
void VP8LInverseTransform(const VP8LTransform* const transform,
int row_start, int row_end,
const uint32_t* const in, uint32_t* const out) {
const int width = transform->xsize_;
assert(row_start < row_end);
assert(row_end <= transform->ysize_);
switch (transform->type_) {
case SUBTRACT_GREEN:
AddGreenToBlueAndRed(transform, row_start, row_end, out);
VP8LAddGreenToBlueAndRed(out, out + (row_end - row_start) * width);
break;
case PREDICTOR_TRANSFORM:
PredictorInverseTransform(transform, row_start, row_end, out);
if (row_end != transform->ysize_) {
// The last predicted row in this iteration will be the top-pred row
// for the first row in next iteration.
const int width = transform->xsize_;
memcpy(out - width, out + (row_end - row_start - 1) * width,
width * sizeof(*out));
}
@ -1155,7 +1194,7 @@ void VP8LInverseTransform(const VP8LTransform* const transform,
// Also, note that this is the only transform that applies on
// the effective width of VP8LSubSampleSize(xsize_, bits_). All other
// transforms work on effective width of xsize_.
const int out_stride = (row_end - row_start) * transform->xsize_;
const int out_stride = (row_end - row_start) * width;
const int in_stride = (row_end - row_start) *
VP8LSubSampleSize(transform->xsize_, transform->bits_);
uint32_t* const src = out + out_stride - in_stride;
@ -1254,11 +1293,12 @@ static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst,
while (src < src_end) {
uint32_t argb = *src++;
#if !defined(__BIG_ENDIAN__)
#if !defined(WEBP_REFERENCE_IMPLEMENTATION)
#if !defined(__BIG_ENDIAN__) && (defined(__i386__) || defined(__x86_64__))
#if defined(__i386__) || defined(__x86_64__)
__asm__ volatile("bswap %0" : "=r"(argb) : "0"(argb));
*(uint32_t*)dst = argb;
#elif !defined(__BIG_ENDIAN__) && defined(_MSC_VER)
#elif defined(_MSC_VER)
argb = _byteswap_ulong(argb);
*(uint32_t*)dst = argb;
#else
@ -1267,11 +1307,17 @@ static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst,
dst[2] = (argb >> 8) & 0xff;
dst[3] = (argb >> 0) & 0xff;
#endif
#else // WEBP_REFERENCE_IMPLEMENTATION
#else // WEBP_REFERENCE_IMPLEMENTATION
dst[0] = (argb >> 24) & 0xff;
dst[1] = (argb >> 16) & 0xff;
dst[2] = (argb >> 8) & 0xff;
dst[3] = (argb >> 0) & 0xff;
#endif
#else // __BIG_ENDIAN__
dst[0] = (argb >> 0) & 0xff;
dst[1] = (argb >> 8) & 0xff;
dst[2] = (argb >> 16) & 0xff;
dst[3] = (argb >> 24) & 0xff;
#endif
dst += sizeof(argb);
}
@ -1325,8 +1371,162 @@ void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels,
}
}
// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.
void VP8LBundleColorMap(const uint8_t* const row, int width,
int xbits, uint32_t* const dst) {
int x;
if (xbits > 0) {
const int bit_depth = 1 << (3 - xbits);
const int mask = (1 << xbits) - 1;
uint32_t code = 0xff000000;
for (x = 0; x < width; ++x) {
const int xsub = x & mask;
if (xsub == 0) {
code = 0xff000000;
}
code |= row[x] << (8 + bit_depth * xsub);
dst[x >> xbits] = code;
}
} else {
for (x = 0; x < width; ++x) dst[x] = 0xff000000 | (row[x] << 8);
}
}
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
// TODO(vikasa): Move the SSE2 functions to lossless_dsp.c (new file), once
// color-space conversion methods (ConvertFromBGRA) are also updated for SSE2.
#if defined(WEBP_USE_SSE2)
static WEBP_INLINE uint32_t ClampedAddSubtractFullSSE2(uint32_t c0, uint32_t c1,
uint32_t c2) {
const __m128i zero = _mm_setzero_si128();
const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero);
const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero);
const __m128i C2 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
const __m128i V1 = _mm_add_epi16(C0, C1);
const __m128i V2 = _mm_sub_epi16(V1, C2);
const __m128i b = _mm_packus_epi16(V2, V2);
const uint32_t output = _mm_cvtsi128_si32(b);
return output;
}
static WEBP_INLINE uint32_t ClampedAddSubtractHalfSSE2(uint32_t c0, uint32_t c1,
uint32_t c2) {
const uint32_t ave = Average2(c0, c1);
const __m128i zero = _mm_setzero_si128();
const __m128i A0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(ave), zero);
const __m128i B0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
const __m128i A1 = _mm_sub_epi16(A0, B0);
const __m128i BgtA = _mm_cmpgt_epi16(B0, A0);
const __m128i A2 = _mm_sub_epi16(A1, BgtA);
const __m128i A3 = _mm_srai_epi16(A2, 1);
const __m128i A4 = _mm_add_epi16(A0, A3);
const __m128i A5 = _mm_packus_epi16(A4, A4);
const uint32_t output = _mm_cvtsi128_si32(A5);
return output;
}
static WEBP_INLINE uint32_t SelectSSE2(uint32_t a, uint32_t b, uint32_t c) {
int pa_minus_pb;
const __m128i zero = _mm_setzero_si128();
const __m128i A0 = _mm_cvtsi32_si128(a);
const __m128i B0 = _mm_cvtsi32_si128(b);
const __m128i C0 = _mm_cvtsi32_si128(c);
const __m128i AC0 = _mm_subs_epu8(A0, C0);
const __m128i CA0 = _mm_subs_epu8(C0, A0);
const __m128i BC0 = _mm_subs_epu8(B0, C0);
const __m128i CB0 = _mm_subs_epu8(C0, B0);
const __m128i AC = _mm_or_si128(AC0, CA0);
const __m128i BC = _mm_or_si128(BC0, CB0);
const __m128i pa = _mm_unpacklo_epi8(AC, zero); // |a - c|
const __m128i pb = _mm_unpacklo_epi8(BC, zero); // |b - c|
const __m128i diff = _mm_sub_epi16(pb, pa);
{
int16_t out[8];
_mm_storeu_si128((__m128i*)out, diff);
pa_minus_pb = out[0] + out[1] + out[2] + out[3];
}
return (pa_minus_pb <= 0) ? a : b;
}
static void SubtractGreenFromBlueAndRedSSE2(uint32_t* argb_data, int num_pixs) {
int i = 0;
const __m128i mask = _mm_set1_epi32(0x0000ff00);
for (; i + 4 < num_pixs; i += 4) {
const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]);
const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
const __m128i out = _mm_sub_epi8(in, in_0g0g);
_mm_storeu_si128((__m128i*)&argb_data[i], out);
}
// fallthrough and finish off with plain-C
for (; i < num_pixs; ++i) {
const uint32_t argb = argb_data[i];
const uint32_t green = (argb >> 8) & 0xff;
const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
const uint32_t new_b = ((argb & 0xff) - green) & 0xff;
argb_data[i] = (argb & 0xff00ff00) | (new_r << 16) | new_b;
}
}
static void AddGreenToBlueAndRedSSE2(uint32_t* data, const uint32_t* data_end) {
const __m128i mask = _mm_set1_epi32(0x0000ff00);
for (; data + 4 < data_end; data += 4) {
const __m128i in = _mm_loadu_si128((__m128i*)data);
const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
const __m128i out = _mm_add_epi8(in, in_0g0g);
_mm_storeu_si128((__m128i*)data, out);
}
// fallthrough and finish off with plain-C
while (data < data_end) {
const uint32_t argb = *data;
const uint32_t green = ((argb >> 8) & 0xff);
uint32_t red_blue = (argb & 0x00ff00ffu);
red_blue += (green << 16) | green;
red_blue &= 0x00ff00ffu;
*data++ = (argb & 0xff00ff00u) | red_blue;
}
}
extern void VP8LDspInitSSE2(void);
void VP8LDspInitSSE2(void) {
VP8LClampedAddSubtractFull = ClampedAddSubtractFullSSE2;
VP8LClampedAddSubtractHalf = ClampedAddSubtractHalfSSE2;
VP8LSelect = SelectSSE2;
VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRedSSE2;
VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRedSSE2;
}
#endif
//------------------------------------------------------------------------------
VP8LPredClampedAddSubFunc VP8LClampedAddSubtractFull;
VP8LPredClampedAddSubFunc VP8LClampedAddSubtractHalf;
VP8LPredSelectFunc VP8LSelect;
VP8LSubtractGreenFromBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
VP8LAddGreenToBlueAndRedFunc VP8LAddGreenToBlueAndRed;
void VP8LDspInit(void) {
VP8LClampedAddSubtractFull = ClampedAddSubtractFull;
VP8LClampedAddSubtractHalf = ClampedAddSubtractHalf;
VP8LSelect = Select;
VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed;
VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed;
// If defined, use CPUInfo() to overwrite some pointers with faster versions.
if (VP8GetCPUInfo != NULL) {
#if defined(WEBP_USE_SSE2)
if (VP8GetCPUInfo(kSSE2)) {
VP8LDspInitSSE2();
}
#endif
}
}
//------------------------------------------------------------------------------

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Image transforms and color space conversion methods for lossless decoder.
@ -16,10 +18,30 @@
#include "../webp/types.h"
#include "../webp/decode.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
//------------------------------------------------------------------------------
//
typedef uint32_t (*VP8LPredClampedAddSubFunc)(uint32_t c0, uint32_t c1,
uint32_t c2);
typedef uint32_t (*VP8LPredSelectFunc)(uint32_t c0, uint32_t c1, uint32_t c2);
typedef void (*VP8LSubtractGreenFromBlueAndRedFunc)(uint32_t* argb_data,
int num_pixs);
typedef void (*VP8LAddGreenToBlueAndRedFunc)(uint32_t* data_start,
const uint32_t* data_end);
extern VP8LPredClampedAddSubFunc VP8LClampedAddSubtractFull;
extern VP8LPredClampedAddSubFunc VP8LClampedAddSubtractHalf;
extern VP8LPredSelectFunc VP8LSelect;
extern VP8LSubtractGreenFromBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
extern VP8LAddGreenToBlueAndRedFunc VP8LAddGreenToBlueAndRed;
// Must be called before calling any of the above methods.
void VP8LDspInit(void);
//------------------------------------------------------------------------------
// Image transforms.
@ -33,8 +55,12 @@ void VP8LInverseTransform(const struct VP8LTransform* const transform,
int row_start, int row_end,
const uint32_t* const in, uint32_t* const out);
// Subtracts green from blue and red channels.
void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs);
// Similar to the static method ColorIndexInverseTransform() that is part of
// lossless.c, but used only for alpha decoding. It takes uint8_t (rather than
// uint32_t) arguments for 'src' and 'dst'.
void VP8LColorIndexInverseTransformAlpha(
const struct VP8LTransform* const transform, int y_start, int y_end,
const uint8_t* src, uint8_t* dst);
void VP8LResidualImage(int width, int height, int bits,
uint32_t* const argb, uint32_t* const argb_scratch,
@ -63,8 +89,8 @@ static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size,
#define LOG_LOOKUP_IDX_MAX 256
extern const float kLog2Table[LOG_LOOKUP_IDX_MAX];
extern const float kSLog2Table[LOG_LOOKUP_IDX_MAX];
extern float VP8LFastLog2Slow(int v);
extern float VP8LFastSLog2Slow(int v);
float VP8LFastLog2Slow(int v);
float VP8LFastSLog2Slow(int v);
static WEBP_INLINE float VP8LFastLog2(int v) {
return (v < LOG_LOOKUP_IDX_MAX) ? kLog2Table[v] : VP8LFastLog2Slow(v);
}
@ -73,6 +99,105 @@ static WEBP_INLINE float VP8LFastSLog2(int v) {
return (v < LOG_LOOKUP_IDX_MAX) ? kSLog2Table[v] : VP8LFastSLog2Slow(v);
}
// -----------------------------------------------------------------------------
// PrefixEncode()
// use GNU builtins where available.
#if defined(__GNUC__) && \
((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4)
static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
return 31 ^ __builtin_clz(n);
}
#elif defined(_MSC_VER) && _MSC_VER > 1310 && \
(defined(_M_X64) || defined(_M_IX86))
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
unsigned long first_set_bit;
_BitScanReverse(&first_set_bit, n);
return first_set_bit;
}
#else
// Returns (int)floor(log2(n)). n must be > 0.
static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
int log = 0;
uint32_t value = n;
int i;
for (i = 4; i >= 0; --i) {
const int shift = (1 << i);
const uint32_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
return log;
}
#endif
static WEBP_INLINE int VP8LBitsLog2Ceiling(uint32_t n) {
const int log_floor = BitsLog2Floor(n);
if (n == (n & ~(n - 1))) // zero or a power of two.
return log_floor;
else
return log_floor + 1;
}
// Splitting of distance and length codes into prefixes and
// extra bits. The prefixes are encoded with an entropy code
// while the extra bits are stored just as normal bits.
static WEBP_INLINE void VP8LPrefixEncodeBitsNoLUT(int distance, int* const code,
int* const extra_bits) {
const int highest_bit = BitsLog2Floor(--distance);
const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
*extra_bits = highest_bit - 1;
*code = 2 * highest_bit + second_highest_bit;
}
static WEBP_INLINE void VP8LPrefixEncodeNoLUT(int distance, int* const code,
int* const extra_bits,
int* const extra_bits_value) {
const int highest_bit = BitsLog2Floor(--distance);
const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
*extra_bits = highest_bit - 1;
*extra_bits_value = distance & ((1 << *extra_bits) - 1);
*code = 2 * highest_bit + second_highest_bit;
}
#define PREFIX_LOOKUP_IDX_MAX 512
typedef struct {
int8_t code_;
int8_t extra_bits_;
} VP8LPrefixCode;
// These tables are derived using VP8LPrefixEncodeNoLUT.
extern const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX];
extern const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX];
static WEBP_INLINE void VP8LPrefixEncodeBits(int distance, int* const code,
int* const extra_bits) {
if (distance < PREFIX_LOOKUP_IDX_MAX) {
const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance];
*code = prefix_code.code_;
*extra_bits = prefix_code.extra_bits_;
} else {
VP8LPrefixEncodeBitsNoLUT(distance, code, extra_bits);
}
}
static WEBP_INLINE void VP8LPrefixEncode(int distance, int* const code,
int* const extra_bits,
int* const extra_bits_value) {
if (distance < PREFIX_LOOKUP_IDX_MAX) {
const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance];
*code = prefix_code.code_;
*extra_bits = prefix_code.extra_bits_;
*extra_bits_value = kPrefixEncodeExtraBitsValue[distance];
} else {
VP8LPrefixEncodeNoLUT(distance, code, extra_bits, extra_bits_value);
}
}
// In-place difference of each component with mod 256.
static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) {
@ -83,9 +208,12 @@ static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) {
return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu);
}
void VP8LBundleColorMap(const uint8_t* const row, int width,
int xbits, uint32_t* const dst);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// YUV to RGB upsampling functions.
@ -12,9 +14,7 @@
#include "./dsp.h"
#include "./yuv.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#include <assert.h>
//------------------------------------------------------------------------------
// Fancy upsampler
@ -43,11 +43,12 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const int last_pixel_pair = (len - 1) >> 1; \
uint32_t tl_uv = LOAD_UV(top_u[0], top_v[0]); /* top-left sample */ \
uint32_t l_uv = LOAD_UV(cur_u[0], cur_v[0]); /* left-sample */ \
if (top_y) { \
assert(top_y != NULL); \
{ \
const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \
FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \
} \
if (bottom_y) { \
if (bottom_y != NULL) { \
const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \
FUNC(bottom_y[0], uv0 & 0xff, (uv0 >> 16), bottom_dst); \
} \
@ -58,7 +59,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint32_t avg = tl_uv + t_uv + l_uv + uv + 0x00080008u; \
const uint32_t diag_12 = (avg + 2 * (t_uv + l_uv)) >> 3; \
const uint32_t diag_03 = (avg + 2 * (tl_uv + uv)) >> 3; \
if (top_y) { \
{ \
const uint32_t uv0 = (diag_12 + tl_uv) >> 1; \
const uint32_t uv1 = (diag_03 + t_uv) >> 1; \
FUNC(top_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \
@ -66,7 +67,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
FUNC(top_y[2 * x - 0], uv1 & 0xff, (uv1 >> 16), \
top_dst + (2 * x - 0) * XSTEP); \
} \
if (bottom_y) { \
if (bottom_y != NULL) { \
const uint32_t uv0 = (diag_03 + l_uv) >> 1; \
const uint32_t uv1 = (diag_12 + uv) >> 1; \
FUNC(bottom_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \
@ -78,12 +79,12 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
l_uv = uv; \
} \
if (!(len & 1)) { \
if (top_y) { \
{ \
const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \
FUNC(top_y[len - 1], uv0 & 0xff, (uv0 >> 16), \
top_dst + (len - 1) * XSTEP); \
} \
if (bottom_y) { \
if (bottom_y != NULL) { \
const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \
FUNC(bottom_y[len - 1], uv0 & 0xff, (uv0 >> 16), \
bottom_dst + (len - 1) * XSTEP); \
@ -166,7 +167,8 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bot_y, \
uint8_t* top_dst, uint8_t* bot_dst, int len) { \
const int half_len = len >> 1; \
int x; \
if (top_dst != NULL) { \
assert(top_dst != NULL); \
{ \
for (x = 0; x < half_len; ++x) { \
FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x + 0); \
FUNC(top_y[2 * x + 1], top_u[x], top_v[x], top_dst + 8 * x + 4); \
@ -362,6 +364,3 @@ void WebPInitPremultiply(void) {
#endif // FANCY_UPSAMPLING
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// NEON version of YUV to RGB upsampling functions.
@ -12,10 +14,6 @@
#include "./dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_USE_NEON)
#include <assert.h>
@ -25,6 +23,9 @@ extern "C" {
#ifdef FANCY_UPSAMPLING
//-----------------------------------------------------------------------------
// U/V upsampling
// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
#define UPSAMPLE_16PIXELS(r1, r2, out) { \
uint8x8_t a = vld1_u8(r1); \
@ -83,125 +84,90 @@ static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
Upsample16Pixels(r1, r2, out); \
}
#define CY 76283
#define CVR 89858
#define CUG 22014
#define CVG 45773
#define CUB 113618
//-----------------------------------------------------------------------------
// YUV->RGB conversion
static const int16_t coef[4] = { CVR / 4, CUG, CVG / 2, CUB / 4 };
#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
int i; \
for (i = 0; i < N; i += 8) { \
int off = ((cur_x) + i) * XSTEP; \
uint8x8_t y = vld1_u8(src_y + (cur_x) + i); \
uint8x8_t u = vld1_u8((src_uv) + i); \
uint8x8_t v = vld1_u8((src_uv) + i + 16); \
int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
\
int16x8_t ud = vshlq_n_s16(uu, 1); \
int16x8_t vd = vshlq_n_s16(vv, 1); \
\
int32x4_t vrl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(vv), 1), \
vget_low_s16(vd), cf16, 0); \
int32x4_t vrh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(vv), 1), \
vget_high_s16(vd), cf16, 0); \
int16x8_t vr = vcombine_s16(vrshrn_n_s32(vrl, 16), \
vrshrn_n_s32(vrh, 16)); \
\
int32x4_t vl = vmovl_s16(vget_low_s16(vv)); \
int32x4_t vh = vmovl_s16(vget_high_s16(vv)); \
int32x4_t ugl = vmlal_lane_s16(vl, vget_low_s16(uu), cf16, 1); \
int32x4_t ugh = vmlal_lane_s16(vh, vget_high_s16(uu), cf16, 1); \
int32x4_t gcl = vqdmlal_lane_s16(ugl, vget_low_s16(vv), cf16, 2); \
int32x4_t gch = vqdmlal_lane_s16(ugh, vget_high_s16(vv), cf16, 2); \
int16x8_t gc = vcombine_s16(vrshrn_n_s32(gcl, 16), \
vrshrn_n_s32(gch, 16)); \
\
int32x4_t ubl = vqdmlal_lane_s16(vshll_n_s16(vget_low_s16(uu), 1), \
vget_low_s16(ud), cf16, 3); \
int32x4_t ubh = vqdmlal_lane_s16(vshll_n_s16(vget_high_s16(uu), 1), \
vget_high_s16(ud), cf16, 3); \
int16x8_t ub = vcombine_s16(vrshrn_n_s32(ubl, 16), \
vrshrn_n_s32(ubh, 16)); \
\
int32x4_t rl = vaddl_s16(vget_low_s16(yy), vget_low_s16(vr)); \
int32x4_t rh = vaddl_s16(vget_high_s16(yy), vget_high_s16(vr)); \
int32x4_t gl = vsubl_s16(vget_low_s16(yy), vget_low_s16(gc)); \
int32x4_t gh = vsubl_s16(vget_high_s16(yy), vget_high_s16(gc)); \
int32x4_t bl = vaddl_s16(vget_low_s16(yy), vget_low_s16(ub)); \
int32x4_t bh = vaddl_s16(vget_high_s16(yy), vget_high_s16(ub)); \
\
rl = vmulq_lane_s32(rl, cf32, 0); \
rh = vmulq_lane_s32(rh, cf32, 0); \
gl = vmulq_lane_s32(gl, cf32, 0); \
gh = vmulq_lane_s32(gh, cf32, 0); \
bl = vmulq_lane_s32(bl, cf32, 0); \
bh = vmulq_lane_s32(bh, cf32, 0); \
\
y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, 16), \
vrshrn_n_s32(rh, 16))); \
u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, 16), \
vrshrn_n_s32(gh, 16))); \
v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(bl, 16), \
vrshrn_n_s32(bh, 16))); \
STR_ ## FMT(out + off, y, u, v); \
} \
}
static const int16_t kCoeffs[4] = { kYScale, kVToR, kUToG, kVToG };
#define v255 vmov_n_u8(255)
#define STR_Rgb(out, r, g, b) do { \
#define STORE_Rgb(out, r, g, b) do { \
const uint8x8x3_t r_g_b = {{ r, g, b }}; \
vst3_u8(out, r_g_b); \
} while (0)
#define STR_Bgr(out, r, g, b) do { \
#define STORE_Bgr(out, r, g, b) do { \
const uint8x8x3_t b_g_r = {{ b, g, r }}; \
vst3_u8(out, b_g_r); \
} while (0)
#define STR_Rgba(out, r, g, b) do { \
#define STORE_Rgba(out, r, g, b) do { \
const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \
vst4_u8(out, r_g_b_v255); \
} while (0)
#define STR_Bgra(out, r, g, b) do { \
#define STORE_Bgra(out, r, g, b) do { \
const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \
vst4_u8(out, b_g_r_v255); \
} while (0)
#define CONVERT1(FMT, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
int i; \
for (i = 0; i < N; i += 8) { \
const int off = ((cur_x) + i) * XSTEP; \
uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \
uint8x8_t u = vld1_u8((src_uv) + i); \
uint8x8_t v = vld1_u8((src_uv) + i + 16); \
const int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
const int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
const int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
int32x4_t yl = vmull_lane_s16(vget_low_s16(yy), cf16, 0); \
int32x4_t yh = vmull_lane_s16(vget_high_s16(yy), cf16, 0); \
const int32x4_t rl = vmlal_lane_s16(yl, vget_low_s16(vv), cf16, 1);\
const int32x4_t rh = vmlal_lane_s16(yh, vget_high_s16(vv), cf16, 1);\
int32x4_t gl = vmlsl_lane_s16(yl, vget_low_s16(uu), cf16, 2); \
int32x4_t gh = vmlsl_lane_s16(yh, vget_high_s16(uu), cf16, 2); \
const int32x4_t bl = vmovl_s16(vget_low_s16(uu)); \
const int32x4_t bh = vmovl_s16(vget_high_s16(uu)); \
gl = vmlsl_lane_s16(gl, vget_low_s16(vv), cf16, 3); \
gh = vmlsl_lane_s16(gh, vget_high_s16(vv), cf16, 3); \
yl = vmlaq_lane_s32(yl, bl, cf32, 0); \
yh = vmlaq_lane_s32(yh, bh, cf32, 0); \
/* vrshrn_n_s32() already incorporates the rounding constant */ \
y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, YUV_FIX2), \
vrshrn_n_s32(rh, YUV_FIX2))); \
u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, YUV_FIX2), \
vrshrn_n_s32(gh, YUV_FIX2))); \
v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(yl, YUV_FIX2), \
vrshrn_n_s32(yh, YUV_FIX2))); \
STORE_ ## FMT(out + off, y, u, v); \
} \
}
#define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
int i; \
for (i = 0; i < N; i++) { \
int off = ((cur_x) + i) * XSTEP; \
int y = src_y[(cur_x) + i]; \
int u = (src_uv)[i]; \
int v = (src_uv)[i + 16]; \
VP8YuvTo ## FMT(y, u, v, rgb + off); \
const int off = ((cur_x) + i) * XSTEP; \
const int y = src_y[(cur_x) + i]; \
const int u = (src_uv)[i]; \
const int v = (src_uv)[i + 16]; \
FUNC(y, u, v, rgb + off); \
} \
}
#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
top_dst, bottom_dst, cur_x, len) { \
if (top_y) { \
CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
} \
if (bottom_y) { \
CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
if (bottom_y != NULL) { \
CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
} \
}
#define CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, uv, \
#define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \
top_dst, bottom_dst, cur_x, len) { \
if (top_y) { \
CONVERT1(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \
} \
if (bottom_y) { \
CONVERT1(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \
if (bottom_y != NULL) { \
CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
} \
}
@ -223,18 +189,19 @@ static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
\
const int16x4_t cf16 = vld1_s16(coef); \
const int32x2_t cf32 = vmov_n_s32(CY); \
const int16x4_t cf16 = vld1_s16(kCoeffs); \
const int32x2_t cf32 = vmov_n_s32(kUToB); \
const uint8x8_t u16 = vmov_n_u8(16); \
const uint8x8_t u128 = vmov_n_u8(128); \
\
/* Treat the first pixel in regular way */ \
if (top_y) { \
assert(top_y != NULL); \
{ \
const int u0 = (top_u[0] + u_diag) >> 1; \
const int v0 = (top_v[0] + v_diag) >> 1; \
VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
} \
if (bottom_y) { \
if (bottom_y != NULL) { \
const int u0 = (cur_u[0] + u_diag) >> 1; \
const int v0 = (cur_v[0] + v_diag) >> 1; \
VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
@ -253,7 +220,7 @@ static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
\
UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
CONVERT2RGB_1(FMT, XSTEP, top_y, bottom_y, r_uv, \
CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \
top_dst, bottom_dst, last_pos, len - last_pos); \
}
@ -269,6 +236,8 @@ NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
//------------------------------------------------------------------------------
#ifdef FANCY_UPSAMPLING
extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
void WebPInitUpsamplersNEON(void) {
@ -287,6 +256,10 @@ void WebPInitPremultiplyNEON(void) {
#endif // WEBP_USE_NEON
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
#else
// this empty function is to avoid an empty .o
void WebPInitPremultiplyNEON(void) {}
#endif // FANCY_UPSAMPLING

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// SSE2 version of YUV to RGB upsampling functions.
@ -11,10 +13,6 @@
#include "./dsp.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(WEBP_USE_SSE2)
#include <assert.h>
@ -49,7 +47,7 @@ extern "C" {
(out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \
} while (0)
// pack and store two alterning pixel rows
// pack and store two alternating pixel rows
#define PACK_AND_STORE(a, b, da, db, out) do { \
const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
@ -85,8 +83,8 @@ extern "C" {
GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \
\
/* pack the alternate pixels */ \
PACK_AND_STORE(a, b, diag1, diag2, &(out)[0 * 32]); \
PACK_AND_STORE(c, d, diag2, diag1, &(out)[2 * 32]); \
PACK_AND_STORE(a, b, diag1, diag2, out + 0); /* store top */ \
PACK_AND_STORE(c, d, diag2, diag1, out + 2 * 32); /* store bottom */ \
}
// Turn the macro into a function for reducing code-size when non-critical
@ -106,69 +104,68 @@ static void Upsample32Pixels(const uint8_t r1[], const uint8_t r2[],
Upsample32Pixels(r1, r2, out); \
}
#define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, uv, \
#define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, \
top_dst, bottom_dst, cur_x, num_pixels) { \
int n; \
if (top_y) { \
for (n = 0; n < (num_pixels); ++n) { \
FUNC(top_y[(cur_x) + n], (uv)[n], (uv)[32 + n], \
top_dst + ((cur_x) + n) * XSTEP); \
} \
for (n = 0; n < (num_pixels); ++n) { \
FUNC(top_y[(cur_x) + n], r_u[n], r_v[n], \
top_dst + ((cur_x) + n) * XSTEP); \
} \
if (bottom_y) { \
if (bottom_y != NULL) { \
for (n = 0; n < (num_pixels); ++n) { \
FUNC(bottom_y[(cur_x) + n], (uv)[64 + n], (uv)[64 + 32 + n], \
FUNC(bottom_y[(cur_x) + n], r_u[64 + n], r_v[64 + n], \
bottom_dst + ((cur_x) + n) * XSTEP); \
} \
} \
}
#define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \
top_dst, bottom_dst, cur_x) do { \
FUNC##32(top_y + (cur_x), r_u, r_v, top_dst + (cur_x) * XSTEP); \
if (bottom_y != NULL) { \
FUNC##32(bottom_y + (cur_x), r_u + 64, r_v + 64, \
bottom_dst + (cur_x) * XSTEP); \
} \
} while (0)
#define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint8_t* top_u, const uint8_t* top_v, \
const uint8_t* cur_u, const uint8_t* cur_v, \
uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
int block; \
/* 16 byte aligned array to cache reconstructed u and v */ \
int uv_pos, pos; \
/* 16byte-aligned array to cache reconstructed u and v */ \
uint8_t uv_buf[4 * 32 + 15]; \
uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
const int uv_len = (len + 1) >> 1; \
/* 17 pixels must be read-able for each block */ \
const int num_blocks = (uv_len - 1) >> 4; \
const int leftover = uv_len - num_blocks * 16; \
const int last_pos = 1 + 32 * num_blocks; \
uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
uint8_t* const r_v = r_u + 32; \
\
const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
\
assert(len > 0); \
/* Treat the first pixel in regular way */ \
if (top_y) { \
const int u0 = (top_u[0] + u_diag) >> 1; \
const int v0 = (top_v[0] + v_diag) >> 1; \
FUNC(top_y[0], u0, v0, top_dst); \
assert(top_y != NULL); \
{ /* Treat the first pixel in regular way */ \
const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
const int u0_t = (top_u[0] + u_diag) >> 1; \
const int v0_t = (top_v[0] + v_diag) >> 1; \
FUNC(top_y[0], u0_t, v0_t, top_dst); \
if (bottom_y != NULL) { \
const int u0_b = (cur_u[0] + u_diag) >> 1; \
const int v0_b = (cur_v[0] + v_diag) >> 1; \
FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \
} \
} \
if (bottom_y) { \
const int u0 = (cur_u[0] + u_diag) >> 1; \
const int v0 = (cur_v[0] + v_diag) >> 1; \
FUNC(bottom_y[0], u0, v0, bottom_dst); \
/* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \
for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \
UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \
UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \
CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \
} \
\
for (block = 0; block < num_blocks; ++block) { \
UPSAMPLE_32PIXELS(top_u, cur_u, r_uv + 0 * 32); \
UPSAMPLE_32PIXELS(top_v, cur_v, r_uv + 1 * 32); \
CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
32 * block + 1, 32) \
top_u += 16; \
cur_u += 16; \
top_v += 16; \
cur_v += 16; \
if (len > 1) { \
const int left_over = ((len + 1) >> 1) - (pos >> 1); \
assert(left_over > 0); \
UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \
UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \
CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, \
pos, len - pos); \
} \
\
UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv + 0 * 32); \
UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 1 * 32); \
CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
last_pos, len - last_pos); \
}
// SSE2 variants of the fancy upsampler.
@ -182,6 +179,7 @@ SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
#undef UPSAMPLE_32PIXELS
#undef UPSAMPLE_LAST_BLOCK
#undef CONVERT2RGB
#undef CONVERT2RGB_32
#undef SSE2_UPSAMPLE_FUNC
#endif // FANCY_UPSAMPLING
@ -190,10 +188,13 @@ SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
//------------------------------------------------------------------------------
#ifdef FANCY_UPSAMPLING
extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
void WebPInitUpsamplersSSE2(void) {
#if defined(WEBP_USE_SSE2)
VP8YUVInitSSE2();
WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2;
WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2;
@ -208,8 +209,10 @@ void WebPInitPremultiplySSE2(void) {
#endif // WEBP_USE_SSE2
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
#else
// this empty function is to avoid an empty .o
void WebPInitPremultiplySSE2(void) {}
#endif // FANCY_UPSAMPLING

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// YUV->RGB conversion function
@ -11,16 +13,8 @@
#include "./yuv.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#ifdef WEBP_YUV_USE_TABLE
int16_t VP8kVToR[256], VP8kUToB[256];
int32_t VP8kVToG[256], VP8kUToG[256];
uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
#if defined(WEBP_YUV_USE_TABLE)
static int done = 0;
@ -28,6 +22,11 @@ static WEBP_INLINE uint8_t clip(int v, int max_value) {
return v < 0 ? 0 : v > max_value ? max_value : v;
}
int16_t VP8kVToR[256], VP8kUToB[256];
int32_t VP8kVToG[256], VP8kUToG[256];
uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
void VP8YUVInit(void) {
int i;
if (done) {
@ -68,6 +67,141 @@ void VP8YUVInit(void) {}
#endif // WEBP_YUV_USE_TABLE
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
//-----------------------------------------------------------------------------
// SSE2 extras
#if defined(WEBP_USE_SSE2)
#ifdef FANCY_UPSAMPLING
#include <emmintrin.h>
#include <string.h> // for memcpy
typedef union { // handy struct for converting SSE2 registers
int32_t i32[4];
uint8_t u8[16];
__m128i m;
} VP8kCstSSE2;
static int done_sse2 = 0;
static VP8kCstSSE2 VP8kUtoRGBA[256], VP8kVtoRGBA[256], VP8kYtoRGBA[256];
void VP8YUVInitSSE2(void) {
if (!done_sse2) {
int i;
for (i = 0; i < 256; ++i) {
VP8kYtoRGBA[i].i32[0] =
VP8kYtoRGBA[i].i32[1] =
VP8kYtoRGBA[i].i32[2] = (i - 16) * kYScale + YUV_HALF2;
VP8kYtoRGBA[i].i32[3] = 0xff << YUV_FIX2;
VP8kUtoRGBA[i].i32[0] = 0;
VP8kUtoRGBA[i].i32[1] = -kUToG * (i - 128);
VP8kUtoRGBA[i].i32[2] = kUToB * (i - 128);
VP8kUtoRGBA[i].i32[3] = 0;
VP8kVtoRGBA[i].i32[0] = kVToR * (i - 128);
VP8kVtoRGBA[i].i32[1] = -kVToG * (i - 128);
VP8kVtoRGBA[i].i32[2] = 0;
VP8kVtoRGBA[i].i32[3] = 0;
}
done_sse2 = 1;
}
}
static WEBP_INLINE __m128i VP8GetRGBA32b(int y, int u, int v) {
const __m128i u_part = _mm_loadu_si128(&VP8kUtoRGBA[u].m);
const __m128i v_part = _mm_loadu_si128(&VP8kVtoRGBA[v].m);
const __m128i y_part = _mm_loadu_si128(&VP8kYtoRGBA[y].m);
const __m128i uv_part = _mm_add_epi32(u_part, v_part);
const __m128i rgba1 = _mm_add_epi32(y_part, uv_part);
const __m128i rgba2 = _mm_srai_epi32(rgba1, YUV_FIX2);
return rgba2;
}
static WEBP_INLINE void VP8YuvToRgbSSE2(uint8_t y, uint8_t u, uint8_t v,
uint8_t* const rgb) {
const __m128i tmp0 = VP8GetRGBA32b(y, u, v);
const __m128i tmp1 = _mm_packs_epi32(tmp0, tmp0);
const __m128i tmp2 = _mm_packus_epi16(tmp1, tmp1);
// Note: we store 8 bytes at a time, not 3 bytes! -> memory stomp
_mm_storel_epi64((__m128i*)rgb, tmp2);
}
static WEBP_INLINE void VP8YuvToBgrSSE2(uint8_t y, uint8_t u, uint8_t v,
uint8_t* const bgr) {
const __m128i tmp0 = VP8GetRGBA32b(y, u, v);
const __m128i tmp1 = _mm_shuffle_epi32(tmp0, _MM_SHUFFLE(3, 0, 1, 2));
const __m128i tmp2 = _mm_packs_epi32(tmp1, tmp1);
const __m128i tmp3 = _mm_packus_epi16(tmp2, tmp2);
// Note: we store 8 bytes at a time, not 3 bytes! -> memory stomp
_mm_storel_epi64((__m128i*)bgr, tmp3);
}
void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst) {
int n;
for (n = 0; n < 32; n += 4) {
const __m128i tmp0_1 = VP8GetRGBA32b(y[n + 0], u[n + 0], v[n + 0]);
const __m128i tmp0_2 = VP8GetRGBA32b(y[n + 1], u[n + 1], v[n + 1]);
const __m128i tmp0_3 = VP8GetRGBA32b(y[n + 2], u[n + 2], v[n + 2]);
const __m128i tmp0_4 = VP8GetRGBA32b(y[n + 3], u[n + 3], v[n + 3]);
const __m128i tmp1_1 = _mm_packs_epi32(tmp0_1, tmp0_2);
const __m128i tmp1_2 = _mm_packs_epi32(tmp0_3, tmp0_4);
const __m128i tmp2 = _mm_packus_epi16(tmp1_1, tmp1_2);
_mm_storeu_si128((__m128i*)dst, tmp2);
dst += 4 * 4;
}
}
void VP8YuvToBgra32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst) {
int n;
for (n = 0; n < 32; n += 2) {
const __m128i tmp0_1 = VP8GetRGBA32b(y[n + 0], u[n + 0], v[n + 0]);
const __m128i tmp0_2 = VP8GetRGBA32b(y[n + 1], u[n + 1], v[n + 1]);
const __m128i tmp1_1 = _mm_shuffle_epi32(tmp0_1, _MM_SHUFFLE(3, 0, 1, 2));
const __m128i tmp1_2 = _mm_shuffle_epi32(tmp0_2, _MM_SHUFFLE(3, 0, 1, 2));
const __m128i tmp2_1 = _mm_packs_epi32(tmp1_1, tmp1_2);
const __m128i tmp3 = _mm_packus_epi16(tmp2_1, tmp2_1);
_mm_storel_epi64((__m128i*)dst, tmp3);
dst += 4 * 2;
}
}
void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst) {
int n;
uint8_t tmp0[2 * 3 + 5 + 15];
uint8_t* const tmp = (uint8_t*)((uintptr_t)(tmp0 + 15) & ~15); // align
for (n = 0; n < 30; ++n) { // we directly stomp the *dst memory
VP8YuvToRgbSSE2(y[n], u[n], v[n], dst + n * 3);
}
// Last two pixels are special: we write in a tmp buffer before sending
// to dst.
VP8YuvToRgbSSE2(y[n + 0], u[n + 0], v[n + 0], tmp + 0);
VP8YuvToRgbSSE2(y[n + 1], u[n + 1], v[n + 1], tmp + 3);
memcpy(dst + n * 3, tmp, 2 * 3);
}
void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst) {
int n;
uint8_t tmp0[2 * 3 + 5 + 15];
uint8_t* const tmp = (uint8_t*)((uintptr_t)(tmp0 + 15) & ~15); // align
for (n = 0; n < 30; ++n) {
VP8YuvToBgrSSE2(y[n], u[n], v[n], dst + n * 3);
}
VP8YuvToBgrSSE2(y[n + 0], u[n + 0], v[n + 0], tmp + 0);
VP8YuvToBgrSSE2(y[n + 1], u[n + 1], v[n + 1], tmp + 3);
memcpy(dst + n * 3, tmp, 2 * 3);
}
#else
void VP8YUVInitSSE2(void) {}
#endif // FANCY_UPSAMPLING
#endif // WEBP_USE_SSE2

View File

@ -1,8 +1,10 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// inline YUV<->RGB conversion function
@ -12,7 +14,7 @@
// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
// We use 16bit fixed point operations for RGB->YUV conversion.
// We use 16bit fixed point operations for RGB->YUV conversion (YUV_FIX).
//
// For the Y'CbCr to RGB conversion, the BT.601 specification reads:
// R = 1.164 * (Y-16) + 1.596 * (V-128)
@ -21,21 +23,24 @@
// where Y is in the [16,235] range, and U/V in the [16,240] range.
// In the table-lookup version (WEBP_YUV_USE_TABLE), the common factor
// "1.164 * (Y-16)" can be handled as an offset in the VP8kClip[] table.
// So in this case the formulae should be read as:
// So in this case the formulae should read:
// R = 1.164 * [Y + 1.371 * (V-128) ] - 18.624
// G = 1.164 * [Y - 0.698 * (V-128) - 0.336 * (U-128)] - 18.624
// B = 1.164 * [Y + 1.733 * (U-128)] - 18.624
// once factorized. Here too, 16bit fixed precision is used.
// once factorized.
// For YUV->RGB conversion, only 14bit fixed precision is used (YUV_FIX2).
// That's the maximum possible for a convenient ARM implementation.
//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DSP_YUV_H_
#define WEBP_DSP_YUV_H_
#include "./dsp.h"
#include "../dec/decode_vp8.h"
// Define the following to use the LUT-based code:
#define WEBP_YUV_USE_TABLE
// #define WEBP_YUV_USE_TABLE
#if defined(WEBP_EXPERIMENTAL_FEATURES)
// Do NOT activate this feature for real compression. This is only experimental!
@ -50,53 +55,75 @@
//------------------------------------------------------------------------------
// YUV -> RGB conversion
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
enum { YUV_FIX = 16, // fixed-point precision
YUV_HALF = 1 << (YUV_FIX - 1),
YUV_MASK = (256 << YUV_FIX) - 1,
YUV_RANGE_MIN = -227, // min value of r/g/b output
YUV_RANGE_MAX = 256 + 226 // max value of r/g/b output
enum {
YUV_FIX = 16, // fixed-point precision for RGB->YUV
YUV_HALF = 1 << (YUV_FIX - 1),
YUV_MASK = (256 << YUV_FIX) - 1,
YUV_RANGE_MIN = -227, // min value of r/g/b output
YUV_RANGE_MAX = 256 + 226, // max value of r/g/b output
YUV_FIX2 = 14, // fixed-point precision for YUV->RGB
YUV_HALF2 = 1 << (YUV_FIX2 - 1),
YUV_MASK2 = (256 << YUV_FIX2) - 1
};
#ifdef WEBP_YUV_USE_TABLE
// These constants are 14b fixed-point version of ITU-R BT.601 constants.
#define kYScale 19077 // 1.164 = 255 / 219
#define kVToR 26149 // 1.596 = 255 / 112 * 0.701
#define kUToG 6419 // 0.391 = 255 / 112 * 0.886 * 0.114 / 0.587
#define kVToG 13320 // 0.813 = 255 / 112 * 0.701 * 0.299 / 0.587
#define kUToB 33050 // 2.018 = 255 / 112 * 0.886
#define kRCst (-kYScale * 16 - kVToR * 128 + YUV_HALF2)
#define kGCst (-kYScale * 16 + kUToG * 128 + kVToG * 128 + YUV_HALF2)
#define kBCst (-kYScale * 16 - kUToB * 128 + YUV_HALF2)
extern int16_t VP8kVToR[256], VP8kUToB[256];
extern int32_t VP8kVToG[256], VP8kUToG[256];
extern uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
extern uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
//------------------------------------------------------------------------------
static WEBP_INLINE void VP8YuvToRgb(uint8_t y, uint8_t u, uint8_t v,
#if !defined(WEBP_YUV_USE_TABLE)
// slower on x86 by ~7-8%, but bit-exact with the SSE2 version
static WEBP_INLINE int VP8Clip8(int v) {
return ((v & ~YUV_MASK2) == 0) ? (v >> YUV_FIX2) : (v < 0) ? 0 : 255;
}
static WEBP_INLINE int VP8YUVToR(int y, int v) {
return VP8Clip8(kYScale * y + kVToR * v + kRCst);
}
static WEBP_INLINE int VP8YUVToG(int y, int u, int v) {
return VP8Clip8(kYScale * y - kUToG * u - kVToG * v + kGCst);
}
static WEBP_INLINE int VP8YUVToB(int y, int u) {
return VP8Clip8(kYScale * y + kUToB * u + kBCst);
}
static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
uint8_t* const rgb) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
rgb[0] = VP8kClip[y + r_off - YUV_RANGE_MIN];
rgb[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
rgb[2] = VP8kClip[y + b_off - YUV_RANGE_MIN];
rgb[0] = VP8YUVToR(y, v);
rgb[1] = VP8YUVToG(y, u, v);
rgb[2] = VP8YUVToB(y, u);
}
static WEBP_INLINE void VP8YuvToBgr(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v,
uint8_t* const bgr) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
bgr[0] = VP8kClip[y + b_off - YUV_RANGE_MIN];
bgr[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
bgr[2] = VP8kClip[y + r_off - YUV_RANGE_MIN];
bgr[0] = VP8YUVToB(y, u);
bgr[1] = VP8YUVToG(y, u, v);
bgr[2] = VP8YUVToR(y, v);
}
static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v,
uint8_t* const rgb) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
const uint8_t rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
(VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
const uint8_t gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
(VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
const int r = VP8YUVToR(y, v); // 5 usable bits
const int g = VP8YUVToG(y, u, v); // 6 usable bits
const int b = VP8YUVToB(y, u); // 5 usable bits
const int rg = (r & 0xf8) | (g >> 5);
const int gb = ((g << 3) & 0xe0) | (b >> 3);
#ifdef WEBP_SWAP_16BIT_CSP
rgb[0] = gb;
rgb[1] = rg;
@ -106,14 +133,13 @@ static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
#endif
}
static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v,
uint8_t* const argb) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
const uint8_t rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
const uint8_t ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
const int r = VP8YUVToR(y, v); // 4 usable bits
const int g = VP8YUVToG(y, u, v); // 4 usable bits
const int b = VP8YUVToB(y, u); // 4 usable bits
const int rg = (r & 0xf0) | (g >> 4);
const int ba = (b & 0xf0) | 0x0f; // overwrite the lower 4 bits
#ifdef WEBP_SWAP_16BIT_CSP
argb[0] = ba;
argb[1] = rg;
@ -123,61 +149,45 @@ static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
#endif
}
#else // Table-free version (slower on x86)
#else
// These constants are 16b fixed-point version of ITU-R BT.601 constants
#define kYScale 76309 // 1.164 = 255 / 219
#define kVToR 104597 // 1.596 = 255 / 112 * 0.701
#define kUToG 25674 // 0.391 = 255 / 112 * 0.886 * 0.114 / 0.587
#define kVToG 53278 // 0.813 = 255 / 112 * 0.701 * 0.299 / 0.587
#define kUToB 132201 // 2.018 = 255 / 112 * 0.886
#define kRCst (-kYScale * 16 - kVToR * 128 + YUV_HALF)
#define kGCst (-kYScale * 16 + kUToG * 128 + kVToG * 128 + YUV_HALF)
#define kBCst (-kYScale * 16 - kUToB * 128 + YUV_HALF)
// Table-based version, not totally equivalent to the SSE2 version.
// Rounding diff is only +/-1 though.
static WEBP_INLINE uint8_t VP8Clip8(int v) {
return ((v & ~YUV_MASK) == 0) ? (uint8_t)(v >> YUV_FIX)
: (v < 0) ? 0u : 255u;
}
extern int16_t VP8kVToR[256], VP8kUToB[256];
extern int32_t VP8kVToG[256], VP8kUToG[256];
extern uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
extern uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
static WEBP_INLINE uint8_t VP8ClipN(int v, int N) { // clip to N bits
return ((v & ~YUV_MASK) == 0) ? (uint8_t)(v >> (YUV_FIX + (8 - N)))
: (v < 0) ? 0u : (255u >> (8 - N));
}
static WEBP_INLINE int VP8YUVToR(int y, int v) {
return kYScale * y + kVToR * v + kRCst;
}
static WEBP_INLINE int VP8YUVToG(int y, int u, int v) {
return kYScale * y - kUToG * u - kVToG * v + kGCst;
}
static WEBP_INLINE int VP8YUVToB(int y, int u) {
return kYScale * y + kUToB * u + kBCst;
}
static WEBP_INLINE void VP8YuvToRgb(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
uint8_t* const rgb) {
rgb[0] = VP8Clip8(VP8YUVToR(y, v));
rgb[1] = VP8Clip8(VP8YUVToG(y, u, v));
rgb[2] = VP8Clip8(VP8YUVToB(y, u));
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
rgb[0] = VP8kClip[y + r_off - YUV_RANGE_MIN];
rgb[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
rgb[2] = VP8kClip[y + b_off - YUV_RANGE_MIN];
}
static WEBP_INLINE void VP8YuvToBgr(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v,
uint8_t* const bgr) {
bgr[0] = VP8Clip8(VP8YUVToB(y, u));
bgr[1] = VP8Clip8(VP8YUVToG(y, u, v));
bgr[2] = VP8Clip8(VP8YUVToR(y, v));
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
bgr[0] = VP8kClip[y + b_off - YUV_RANGE_MIN];
bgr[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
bgr[2] = VP8kClip[y + r_off - YUV_RANGE_MIN];
}
static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v,
uint8_t* const rgb) {
const int r = VP8Clip8(VP8YUVToR(y, u));
const int g = VP8ClipN(VP8YUVToG(y, u, v), 6);
const int b = VP8ClipN(VP8YUVToB(y, v), 5);
const uint8_t rg = (r & 0xf8) | (g >> 3);
const uint8_t gb = (g << 5) | b;
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
const int rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
(VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
const int gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
(VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
#ifdef WEBP_SWAP_16BIT_CSP
rgb[0] = gb;
rgb[1] = rg;
@ -187,13 +197,14 @@ static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
#endif
}
static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v,
uint8_t* const argb) {
const int r = VP8Clip8(VP8YUVToR(y, u));
const int g = VP8ClipN(VP8YUVToG(y, u, v), 4);
const int b = VP8Clip8(VP8YUVToB(y, v));
const uint8_t rg = (r & 0xf0) | g;
const uint8_t ba = b | 0x0f; // overwrite the lower 4 bits
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
const int rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
const int ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
#ifdef WEBP_SWAP_16BIT_CSP
argb[0] = ba;
argb[1] = rg;
@ -205,6 +216,9 @@ static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
#endif // WEBP_YUV_USE_TABLE
//-----------------------------------------------------------------------------
// Alpha handling variants
static WEBP_INLINE void VP8YuvToArgb(uint8_t y, uint8_t u, uint8_t v,
uint8_t* const argb) {
argb[0] = 0xff;
@ -226,56 +240,77 @@ static WEBP_INLINE void VP8YuvToRgba(uint8_t y, uint8_t u, uint8_t v,
// Must be called before everything, to initialize the tables.
void VP8YUVInit(void);
//-----------------------------------------------------------------------------
// SSE2 extra functions (mostly for upsampling_sse2.c)
#if defined(WEBP_USE_SSE2)
#if defined(FANCY_UPSAMPLING)
// Process 32 pixels and store the result (24b or 32b per pixel) in *dst.
void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst);
void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst);
void VP8YuvToBgra32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst);
void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
uint8_t* dst);
#endif // FANCY_UPSAMPLING
// Must be called to initialize tables before using the functions.
void VP8YUVInitSSE2(void);
#endif // WEBP_USE_SSE2
//------------------------------------------------------------------------------
// RGB -> YUV conversion
static WEBP_INLINE int VP8ClipUV(int v) {
v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
// Stub functions that can be called with various rounding values:
static WEBP_INLINE int VP8ClipUV(int uv, int rounding) {
uv = (uv + rounding + (128 << (YUV_FIX + 2))) >> (YUV_FIX + 2);
return ((uv & ~0xff) == 0) ? uv : (uv < 0) ? 0 : 255;
}
#ifndef USE_YUVj
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX);
static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
const int luma = 16839 * r + 33059 * g + 6420 * b;
return (luma + kRound) >> YUV_FIX; // no need to clip
return (luma + rounding + (16 << YUV_FIX)) >> YUV_FIX; // no need to clip
}
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
static WEBP_INLINE int VP8RGBToU(int r, int g, int b, int rounding) {
const int u = -9719 * r - 19081 * g + 28800 * b;
return VP8ClipUV(u);
return VP8ClipUV(u, rounding);
}
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) {
const int v = +28800 * r - 24116 * g - 4684 * b;
return VP8ClipUV(v);
return VP8ClipUV(v, rounding);
}
#else
// This JPEG-YUV colorspace, only for comparison!
// These are also 16-bit precision coefficients from Rec.601, but with full
// These are also 16bit precision coefficients from Rec.601, but with full
// [0..255] output range.
static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
const int kRound = (1 << (YUV_FIX - 1));
static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
const int luma = 19595 * r + 38470 * g + 7471 * b;
return (luma + kRound) >> YUV_FIX; // no need to clip
return (luma + rounding) >> YUV_FIX; // no need to clip
}
static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
static WEBP_INLINE int VP8_RGB_TO_U(int r, int g, int b, int rounding) {
const int u = -11058 * r - 21710 * g + 32768 * b;
return VP8ClipUV(u);
return VP8ClipUV(u, rounding);
}
static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
static WEBP_INLINE int VP8_RGB_TO_V(int r, int g, int b, int rounding) {
const int v = 32768 * r - 27439 * g - 5329 * b;
return VP8ClipUV(v);
return VP8ClipUV(v, rounding);
}
#endif // USE_YUVj
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Alpha-plane compression.
@ -17,10 +19,6 @@
#include "../utils/quant_levels.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
// -----------------------------------------------------------------------------
// Encodes the given alpha data via specified compression method 'method'.
// The pre-processing (quantization) is performed if 'quality' is less than 100.
@ -69,7 +67,7 @@ static int EncodeLossless(const uint8_t* const data, int width, int height,
const uint8_t* src = data;
for (j = 0; j < picture.height; ++j) {
for (i = 0; i < picture.width; ++i) {
dst[i] = (src[i] << 8) | 0xff000000u;
dst[i] = src[i] << 8; // we leave A/R/B channels zero'd.
}
src += width;
dst += picture.argb_stride;
@ -79,8 +77,10 @@ static int EncodeLossless(const uint8_t* const data, int width, int height,
WebPConfigInit(&config);
config.lossless = 1;
config.method = effort_level; // impact is very small
// Set a moderate default quality setting for alpha.
config.quality = 5.f * effort_level;
// Set a low default quality for encoding alpha. Ensure that Alpha quality at
// lower methods (3 and below) is less than the threshold for triggering
// costly 'BackwardReferencesTraceBackwards'.
config.quality = 8.f * effort_level;
assert(config.quality >= 0 && config.quality <= 100.f);
ok = VP8LBitWriterInit(&tmp_bw, (width * height) >> 3);
@ -97,12 +97,19 @@ static int EncodeLossless(const uint8_t* const data, int width, int height,
// -----------------------------------------------------------------------------
// Small struct to hold the result of a filter mode compression attempt.
typedef struct {
size_t score;
VP8BitWriter bw;
WebPAuxStats stats;
} FilterTrial;
// This function always returns an initialized 'bw' object, even upon error.
static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
int method, int filter, int reduce_levels,
int effort_level, // in [0..6] range
uint8_t* const tmp_alpha,
VP8BitWriter* const bw,
WebPAuxStats* const stats) {
FilterTrial* result) {
int ok = 0;
const uint8_t* alpha_src;
WebPFilterFunc filter_func;
@ -123,8 +130,8 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
header = method | (filter << 2);
if (reduce_levels) header |= ALPHA_PREPROCESSED_LEVELS << 4;
VP8BitWriterInit(bw, expected_size);
VP8BitWriterAppend(bw, &header, ALPHA_HEADER_LEN);
VP8BitWriterInit(&result->bw, expected_size);
VP8BitWriterAppend(&result->bw, &header, ALPHA_HEADER_LEN);
filter_func = WebPFilters[filter];
if (filter_func != NULL) {
@ -135,12 +142,14 @@ static int EncodeAlphaInternal(const uint8_t* const data, int width, int height,
}
if (method == ALPHA_NO_COMPRESSION) {
ok = VP8BitWriterAppend(bw, alpha_src, width * height);
ok = ok && !bw->error_;
ok = VP8BitWriterAppend(&result->bw, alpha_src, width * height);
ok = ok && !result->bw.error_;
} else {
ok = EncodeLossless(alpha_src, width, height, effort_level, bw, stats);
VP8BitWriterFinish(bw);
ok = EncodeLossless(alpha_src, width, height, effort_level,
&result->bw, &result->stats);
VP8BitWriterFinish(&result->bw);
}
result->score = VP8BitWriterSize(&result->bw);
return ok;
}
@ -156,6 +165,104 @@ static void CopyPlane(const uint8_t* src, int src_stride,
}
}
static int GetNumColors(const uint8_t* data, int width, int height,
int stride) {
int j;
int colors = 0;
uint8_t color[256] = { 0 };
for (j = 0; j < height; ++j) {
int i;
const uint8_t* const p = data + j * stride;
for (i = 0; i < width; ++i) {
color[p[i]] = 1;
}
}
for (j = 0; j < 256; ++j) {
if (color[j] > 0) ++colors;
}
return colors;
}
#define FILTER_TRY_NONE (1 << WEBP_FILTER_NONE)
#define FILTER_TRY_ALL ((1 << WEBP_FILTER_LAST) - 1)
// Given the input 'filter' option, return an OR'd bit-set of filters to try.
static uint32_t GetFilterMap(const uint8_t* alpha, int width, int height,
int filter, int effort_level) {
uint32_t bit_map = 0U;
if (filter == WEBP_FILTER_FAST) {
// Quick estimate of the best candidate.
int try_filter_none = (effort_level > 3);
const int kMinColorsForFilterNone = 16;
const int kMaxColorsForFilterNone = 192;
const int num_colors = GetNumColors(alpha, width, height, width);
// For low number of colors, NONE yields better compression.
filter = (num_colors <= kMinColorsForFilterNone) ? WEBP_FILTER_NONE :
EstimateBestFilter(alpha, width, height, width);
bit_map |= 1 << filter;
// For large number of colors, try FILTER_NONE in addition to the best
// filter as well.
if (try_filter_none || num_colors > kMaxColorsForFilterNone) {
bit_map |= FILTER_TRY_NONE;
}
} else if (filter == WEBP_FILTER_NONE) {
bit_map = FILTER_TRY_NONE;
} else { // WEBP_FILTER_BEST -> try all
bit_map = FILTER_TRY_ALL;
}
return bit_map;
}
static void InitFilterTrial(FilterTrial* const score) {
score->score = (size_t)~0U;
VP8BitWriterInit(&score->bw, 0);
}
static int ApplyFiltersAndEncode(const uint8_t* alpha, int width, int height,
size_t data_size, int method, int filter,
int reduce_levels, int effort_level,
uint8_t** const output,
size_t* const output_size,
WebPAuxStats* const stats) {
int ok = 1;
FilterTrial best;
uint32_t try_map =
GetFilterMap(alpha, width, height, filter, effort_level);
InitFilterTrial(&best);
if (try_map != FILTER_TRY_NONE) {
uint8_t* filtered_alpha = (uint8_t*)malloc(data_size);
if (filtered_alpha == NULL) return 0;
for (filter = WEBP_FILTER_NONE; ok && try_map; ++filter, try_map >>= 1) {
if (try_map & 1) {
FilterTrial trial;
ok = EncodeAlphaInternal(alpha, width, height, method, filter,
reduce_levels, effort_level, filtered_alpha,
&trial);
if (ok && trial.score < best.score) {
VP8BitWriterWipeOut(&best.bw);
best = trial;
} else {
VP8BitWriterWipeOut(&trial.bw);
}
}
}
free(filtered_alpha);
} else {
ok = EncodeAlphaInternal(alpha, width, height, method, WEBP_FILTER_NONE,
reduce_levels, effort_level, NULL, &best);
}
if (ok) {
if (stats != NULL) *stats = best.stats;
*output_size = VP8BitWriterSize(&best.bw);
*output = VP8BitWriterBuf(&best.bw);
} else {
VP8BitWriterWipeOut(&best.bw);
}
return ok;
}
static int EncodeAlpha(VP8Encoder* const enc,
int quality, int method, int filter,
int effort_level,
@ -186,6 +293,11 @@ static int EncodeAlpha(VP8Encoder* const enc,
return 0;
}
if (method == ALPHA_NO_COMPRESSION) {
// Don't filter, as filtering will make no impact on compressed size.
filter = WEBP_FILTER_NONE;
}
quant_alpha = (uint8_t*)malloc(data_size);
if (quant_alpha == NULL) {
return 0;
@ -204,85 +316,19 @@ static int EncodeAlpha(VP8Encoder* const enc,
}
if (ok) {
VP8BitWriter bw;
int test_filter;
uint8_t* filtered_alpha = NULL;
// We always test WEBP_FILTER_NONE first.
ok = EncodeAlphaInternal(quant_alpha, width, height,
method, WEBP_FILTER_NONE, reduce_levels,
effort_level, NULL, &bw, pic->stats);
if (!ok) {
VP8BitWriterWipeOut(&bw);
goto End;
ok = ApplyFiltersAndEncode(quant_alpha, width, height, data_size, method,
filter, reduce_levels, effort_level, output,
output_size, pic->stats);
if (pic->stats != NULL) { // need stats?
pic->stats->coded_size += (int)(*output_size);
enc->sse_[3] = sse;
}
if (filter == WEBP_FILTER_FAST) { // Quick estimate of a second candidate?
filter = EstimateBestFilter(quant_alpha, width, height, width);
}
// Stop?
if (filter == WEBP_FILTER_NONE) {
goto Ok;
}
filtered_alpha = (uint8_t*)malloc(data_size);
ok = (filtered_alpha != NULL);
if (!ok) {
goto End;
}
// Try the other mode(s).
{
WebPAuxStats best_stats;
size_t best_score = VP8BitWriterSize(&bw);
memset(&best_stats, 0, sizeof(best_stats)); // prevent spurious warning
if (pic->stats != NULL) best_stats = *pic->stats;
for (test_filter = WEBP_FILTER_HORIZONTAL;
ok && (test_filter <= WEBP_FILTER_GRADIENT);
++test_filter) {
VP8BitWriter tmp_bw;
if (filter != WEBP_FILTER_BEST && test_filter != filter) {
continue;
}
ok = EncodeAlphaInternal(quant_alpha, width, height,
method, test_filter, reduce_levels,
effort_level, filtered_alpha, &tmp_bw,
pic->stats);
if (ok) {
const size_t score = VP8BitWriterSize(&tmp_bw);
if (score < best_score) {
// swap bitwriter objects.
VP8BitWriter tmp = tmp_bw;
tmp_bw = bw;
bw = tmp;
best_score = score;
if (pic->stats != NULL) best_stats = *pic->stats;
}
} else {
VP8BitWriterWipeOut(&bw);
}
VP8BitWriterWipeOut(&tmp_bw);
}
if (pic->stats != NULL) *pic->stats = best_stats;
}
Ok:
if (ok) {
*output_size = VP8BitWriterSize(&bw);
*output = VP8BitWriterBuf(&bw);
if (pic->stats != NULL) { // need stats?
pic->stats->coded_size += (int)(*output_size);
enc->sse_[3] = sse;
}
}
free(filtered_alpha);
}
End:
free(quant_alpha);
return ok;
}
//------------------------------------------------------------------------------
// Main calls
@ -362,6 +408,3 @@ int VP8EncDeleteAlpha(VP8Encoder* const enc) {
return ok;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Macroblock analysis
@ -17,10 +19,6 @@
#include "./cost.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define MAX_ITERS_K_MEANS 6
//------------------------------------------------------------------------------
@ -53,6 +51,7 @@ static void SmoothSegmentMap(VP8Encoder* const enc) {
for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
if (cnt[n] >= majority_cnt_3_x_3_grid) {
majority_seg = n;
break;
}
}
tmp[x + y * w] = majority_seg;
@ -151,6 +150,8 @@ static void AssignSegments(VP8Encoder* const enc,
// 'int' type is ok for histo, and won't overflow
int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
assert(nb >= 1);
// bracket the input
for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {}
min_a = n;
@ -159,8 +160,9 @@ static void AssignSegments(VP8Encoder* const enc,
range_a = max_a - min_a;
// Spread initial centers evenly
for (n = 1, k = 0; n < 2 * nb; n += 2) {
centers[k++] = min_a + (n * range_a) / (2 * nb);
for (k = 0, n = 1; k < nb; ++k, n += 2) {
assert(n < 2 * nb);
centers[k] = min_a + (n * range_a) / (2 * nb);
}
for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough
@ -175,7 +177,7 @@ static void AssignSegments(VP8Encoder* const enc,
n = 0; // track the nearest center for current 'a'
for (a = min_a; a <= max_a; ++a) {
if (alphas[a]) {
while (n < nb - 1 && abs(a - centers[n + 1]) < abs(a - centers[n])) {
while (n + 1 < nb && abs(a - centers[n + 1]) < abs(a - centers[n])) {
n++;
}
map[a] = n;
@ -382,38 +384,114 @@ static void ResetAllMBInfo(VP8Encoder* const enc) {
// Default susceptibilities.
enc->dqm_[0].alpha_ = 0;
enc->dqm_[0].beta_ = 0;
// Note: we can't compute this alpha_ / uv_alpha_.
// Note: we can't compute this alpha_ / uv_alpha_ -> set to default value.
enc->alpha_ = 0;
enc->uv_alpha_ = 0;
WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_);
}
// struct used to collect job result
typedef struct {
WebPWorker worker;
int alphas[MAX_ALPHA + 1];
int alpha, uv_alpha;
VP8EncIterator it;
int delta_progress;
} SegmentJob;
// main work call
static int DoSegmentsJob(SegmentJob* const job, VP8EncIterator* const it) {
int ok = 1;
if (!VP8IteratorIsDone(it)) {
uint8_t tmp[32 + ALIGN_CST];
uint8_t* const scratch = (uint8_t*)DO_ALIGN(tmp);
do {
// Let's pretend we have perfect lossless reconstruction.
VP8IteratorImport(it, scratch);
MBAnalyze(it, job->alphas, &job->alpha, &job->uv_alpha);
ok = VP8IteratorProgress(it, job->delta_progress);
} while (ok && VP8IteratorNext(it));
}
return ok;
}
static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) {
int i;
for (i = 0; i <= MAX_ALPHA; ++i) dst->alphas[i] += src->alphas[i];
dst->alpha += src->alpha;
dst->uv_alpha += src->uv_alpha;
}
// initialize the job struct with some TODOs
static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
int start_row, int end_row) {
WebPWorkerInit(&job->worker);
job->worker.data1 = job;
job->worker.data2 = &job->it;
job->worker.hook = (WebPWorkerHook)DoSegmentsJob;
VP8IteratorInit(enc, &job->it);
VP8IteratorSetRow(&job->it, start_row);
VP8IteratorSetCountDown(&job->it, (end_row - start_row) * enc->mb_w_);
memset(job->alphas, 0, sizeof(job->alphas));
job->alpha = 0;
job->uv_alpha = 0;
// only one of both jobs can record the progress, since we don't
// expect the user's hook to be multi-thread safe
job->delta_progress = (start_row == 0) ? 20 : 0;
}
// main entry point
int VP8EncAnalyze(VP8Encoder* const enc) {
int ok = 1;
const int do_segments =
enc->config_->emulate_jpeg_size || // We need the complexity evaluation.
(enc->segment_hdr_.num_segments_ > 1) ||
(enc->method_ == 0); // for method 0, we need preds_[] to be filled.
enc->alpha_ = 0;
enc->uv_alpha_ = 0;
if (do_segments) {
int alphas[MAX_ALPHA + 1] = { 0 };
VP8EncIterator it;
VP8IteratorInit(enc, &it);
do {
VP8IteratorImport(&it);
MBAnalyze(&it, alphas, &enc->alpha_, &enc->uv_alpha_);
ok = VP8IteratorProgress(&it, 20);
// Let's pretend we have perfect lossless reconstruction.
} while (ok && VP8IteratorNext(&it, it.yuv_in_));
enc->alpha_ /= enc->mb_w_ * enc->mb_h_;
enc->uv_alpha_ /= enc->mb_w_ * enc->mb_h_;
if (ok) AssignSegments(enc, alphas);
const int last_row = enc->mb_h_;
// We give a little more than a half work to the main thread.
const int split_row = (9 * last_row + 15) >> 4;
const int total_mb = last_row * enc->mb_w_;
#ifdef WEBP_USE_THREAD
const int kMinSplitRow = 2; // minimal rows needed for mt to be worth it
const int do_mt = (enc->thread_level_ > 0) && (split_row >= kMinSplitRow);
#else
const int do_mt = 0;
#endif
SegmentJob main_job;
if (do_mt) {
SegmentJob side_job;
// Note the use of '&' instead of '&&' because we must call the functions
// no matter what.
InitSegmentJob(enc, &main_job, 0, split_row);
InitSegmentJob(enc, &side_job, split_row, last_row);
// we don't need to call Reset() on main_job.worker, since we're calling
// WebPWorkerExecute() on it
ok &= WebPWorkerReset(&side_job.worker);
// launch the two jobs in parallel
if (ok) {
WebPWorkerLaunch(&side_job.worker);
WebPWorkerExecute(&main_job.worker);
ok &= WebPWorkerSync(&side_job.worker);
ok &= WebPWorkerSync(&main_job.worker);
}
WebPWorkerEnd(&side_job.worker);
if (ok) MergeJobs(&side_job, &main_job); // merge results together
} else {
// Even for single-thread case, we use the generic Worker tools.
InitSegmentJob(enc, &main_job, 0, last_row);
WebPWorkerExecute(&main_job.worker);
ok &= WebPWorkerSync(&main_job.worker);
}
WebPWorkerEnd(&main_job.worker);
if (ok) {
enc->alpha_ = main_job.alpha / total_mb;
enc->uv_alpha_ = main_job.uv_alpha / total_mb;
AssignSegments(enc, main_job.alphas);
}
} else { // Use only one default segment.
ResetAllMBInfo(enc);
}
return ok;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Author: Jyrki Alakuijala (jyrki@google.com)
@ -142,9 +144,10 @@ static void HashChainInsert(HashChain* const p,
}
static void GetParamsForHashChainFindCopy(int quality, int xsize,
int* window_size, int* iter_pos,
int* iter_limit) {
int cache_bits, int* window_size,
int* iter_pos, int* iter_limit) {
const int iter_mult = (quality < 27) ? 1 : 1 + ((quality - 27) >> 4);
const int iter_neg = -iter_mult * (quality >> 1);
// Limit the backward-ref window size for lower qualities.
const int max_window_size = (quality > 50) ? WINDOW_SIZE
: (quality > 25) ? (xsize << 8)
@ -152,77 +155,83 @@ static void GetParamsForHashChainFindCopy(int quality, int xsize,
assert(xsize > 0);
*window_size = (max_window_size > WINDOW_SIZE) ? WINDOW_SIZE
: max_window_size;
*iter_pos = 5 + (quality >> 3);
*iter_limit = -quality * iter_mult;
*iter_pos = 8 + (quality >> 3);
// For lower entropy images, the rigorous search loop in HashChainFindCopy
// can be relaxed.
*iter_limit = (cache_bits > 0) ? iter_neg : iter_neg / 2;
}
static int HashChainFindCopy(const HashChain* const p,
int base_position, int xsize,
const uint32_t* const argb, int maxlen,
int base_position, int xsize_signed,
const uint32_t* const argb, int max_len,
int window_size, int iter_pos, int iter_limit,
int* const distance_ptr,
int* const length_ptr) {
const uint64_t hash_code = GetPixPairHash64(&argb[base_position]);
int prev_length = 0;
int64_t best_val = 0;
int best_length = 0;
int best_distance = 0;
const uint32_t* const argb_start = argb + base_position;
uint64_t best_val = 0;
uint32_t best_length = 1;
uint32_t best_distance = 0;
const uint32_t xsize = (uint32_t)xsize_signed;
const int min_pos =
(base_position > window_size) ? base_position - window_size : 0;
int pos;
assert(xsize > 0);
for (pos = p->hash_to_first_index_[hash_code];
if (max_len > MAX_LENGTH) {
max_len = MAX_LENGTH;
}
for (pos = p->hash_to_first_index_[GetPixPairHash64(argb_start)];
pos >= min_pos;
pos = p->chain_[pos]) {
int64_t val;
int curr_length;
uint64_t val;
uint32_t curr_length;
uint32_t distance;
const uint64_t* const ptr1 =
(const uint64_t*)(argb + pos + best_length - 1);
const uint64_t* const ptr2 =
(const uint64_t*)(argb_start + best_length - 1);
if (iter_pos < 0) {
if (iter_pos < iter_limit || best_val >= 0xff0000) {
break;
}
}
--iter_pos;
if (best_length != 0 &&
argb[pos + best_length - 1] != argb_start[best_length - 1]) {
continue;
}
curr_length = FindMatchLength(argb + pos, argb_start, maxlen);
if (curr_length < prev_length) {
continue;
}
val = 65536 * curr_length;
// Before 'expensive' linear match, check if the two arrays match at the
// current best length index and also for the succeeding elements.
if (*ptr1 != *ptr2) continue;
curr_length = FindMatchLength(argb + pos, argb_start, max_len);
if (curr_length < best_length) continue;
distance = (uint32_t)(base_position - pos);
val = curr_length << 16;
// Favoring 2d locality here gives savings for certain images.
if (base_position - pos < 9 * xsize) {
const int y = (base_position - pos) / xsize;
int x = (base_position - pos) % xsize;
if (x > xsize / 2) {
if (distance < 9 * xsize) {
const uint32_t y = distance / xsize;
uint32_t x = distance % xsize;
if (x > (xsize >> 1)) {
x = xsize - x;
}
if (x <= 7 && x >= -8) {
if (x <= 7) {
val += 9 * 9 + 9 * 9;
val -= y * y + x * x;
} else {
val -= 9 * 9 + 9 * 9;
}
} else {
val -= 9 * 9 + 9 * 9;
}
if (best_val < val) {
prev_length = curr_length;
best_val = val;
best_length = curr_length;
best_distance = base_position - pos;
if (curr_length >= MAX_LENGTH) {
best_distance = distance;
if (curr_length >= (uint32_t)max_len) {
break;
}
if ((best_distance == 1 || best_distance == xsize) &&
if ((best_distance == 1 || distance == xsize) &&
best_length >= 128) {
break;
}
}
}
*distance_ptr = best_distance;
*distance_ptr = (int)best_distance;
*length_ptr = best_length;
return (best_length >= MIN_LENGTH);
}
@ -284,18 +293,15 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
if (!HashChainInit(hash_chain, pix_count)) goto Error;
refs->size = 0;
GetParamsForHashChainFindCopy(quality, xsize, &window_size, &iter_pos,
&iter_limit);
GetParamsForHashChainFindCopy(quality, xsize, cache_bits,
&window_size, &iter_pos, &iter_limit);
for (i = 0; i < pix_count; ) {
// Alternative#1: Code the pixels starting at 'i' using backward reference.
int offset = 0;
int len = 0;
if (i < pix_count - 1) { // FindCopy(i,..) reads pixels at [i] and [i + 1].
int maxlen = pix_count - i;
if (maxlen > MAX_LENGTH) {
maxlen = MAX_LENGTH;
}
HashChainFindCopy(hash_chain, i, xsize, argb, maxlen,
int max_len = pix_count - i;
HashChainFindCopy(hash_chain, i, xsize, argb, max_len,
window_size, iter_pos, iter_limit,
&offset, &len);
}
@ -307,11 +313,8 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
int k;
HashChainInsert(hash_chain, &argb[i], i);
if (i < pix_count - 2) { // FindCopy(i+1,..) reads [i + 1] and [i + 2].
int maxlen = pix_count - (i + 1);
if (maxlen > MAX_LENGTH) {
maxlen = MAX_LENGTH;
}
HashChainFindCopy(hash_chain, i + 1, xsize, argb, maxlen,
int max_len = pix_count - (i + 1);
HashChainFindCopy(hash_chain, i + 1, xsize, argb, max_len,
window_size, iter_pos, iter_limit,
&offset2, &len2);
if (len2 > len + 1) {
@ -321,10 +324,10 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
const int ix = VP8LColorCacheGetIndex(&hashers, pixel);
refs->refs[refs->size] = PixOrCopyCreateCacheIdx(ix);
} else {
if (use_color_cache) VP8LColorCacheInsert(&hashers, pixel);
refs->refs[refs->size] = PixOrCopyCreateLiteral(pixel);
}
++refs->size;
if (use_color_cache) VP8LColorCacheInsert(&hashers, pixel);
i++; // Backward reference to be done for next pixel.
len = len2;
offset = offset2;
@ -354,10 +357,10 @@ static int BackwardReferencesHashChain(int xsize, int ysize,
const int ix = VP8LColorCacheGetIndex(&hashers, pixel);
refs->refs[refs->size] = PixOrCopyCreateCacheIdx(ix);
} else {
if (use_color_cache) VP8LColorCacheInsert(&hashers, pixel);
refs->refs[refs->size] = PixOrCopyCreateLiteral(pixel);
}
++refs->size;
if (use_color_cache) VP8LColorCacheInsert(&hashers, pixel);
if (i + 1 < pix_count) {
HashChainInsert(hash_chain, &argb[i], i);
}
@ -459,16 +462,16 @@ static WEBP_INLINE double GetCacheCost(const CostModel* const m, uint32_t idx) {
static WEBP_INLINE double GetLengthCost(const CostModel* const m,
uint32_t length) {
int code, extra_bits_count, extra_bits_value;
PrefixEncode(length, &code, &extra_bits_count, &extra_bits_value);
return m->literal_[VALUES_IN_BYTE + code] + extra_bits_count;
int code, extra_bits;
VP8LPrefixEncodeBits(length, &code, &extra_bits);
return m->literal_[VALUES_IN_BYTE + code] + extra_bits;
}
static WEBP_INLINE double GetDistanceCost(const CostModel* const m,
uint32_t distance) {
int code, extra_bits_count, extra_bits_value;
PrefixEncode(distance, &code, &extra_bits_count, &extra_bits_value);
return m->distance_[code] + extra_bits_count;
int code, extra_bits;
VP8LPrefixEncodeBits(distance, &code, &extra_bits);
return m->distance_[code] + extra_bits;
}
static int BackwardReferencesHashChainDistanceOnly(
@ -510,8 +513,8 @@ static int BackwardReferencesHashChainDistanceOnly(
// We loop one pixel at a time, but store all currently best points to
// non-processed locations from this point.
dist_array[0] = 0;
GetParamsForHashChainFindCopy(quality, xsize, &window_size, &iter_pos,
&iter_limit);
GetParamsForHashChainFindCopy(quality, xsize, cache_bits,
&window_size, &iter_pos, &iter_limit);
for (i = 0; i < pix_count; ++i) {
double prev_cost = 0.0;
int shortmax;
@ -522,11 +525,8 @@ static int BackwardReferencesHashChainDistanceOnly(
int offset = 0;
int len = 0;
if (i < pix_count - 1) { // FindCopy reads pixels at [i] and [i + 1].
int maxlen = shortmax ? 2 : MAX_LENGTH;
if (maxlen > pix_count - i) {
maxlen = pix_count - i;
}
HashChainFindCopy(hash_chain, i, xsize, argb, maxlen,
int max_len = shortmax ? 2 : pix_count - i;
HashChainFindCopy(hash_chain, i, xsize, argb, max_len,
window_size, iter_pos, iter_limit,
&offset, &len);
}
@ -577,13 +577,13 @@ static int BackwardReferencesHashChainDistanceOnly(
const int ix = VP8LColorCacheGetIndex(&hashers, argb[i]);
cost_val += GetCacheCost(cost_model, ix) * mul0;
} else {
if (use_color_cache) VP8LColorCacheInsert(&hashers, argb[i]);
cost_val += GetLiteralCost(cost_model, argb[i]) * mul1;
}
if (cost[i] > cost_val) {
cost[i] = (float)cost_val;
dist_array[i] = 1; // only one is inserted.
}
if (use_color_cache) VP8LColorCacheInsert(&hashers, argb[i]);
}
next_symbol: ;
}
@ -645,17 +645,17 @@ static int BackwardReferencesHashChainFollowChosenPath(
}
refs->size = 0;
GetParamsForHashChainFindCopy(quality, xsize, &window_size, &iter_pos,
&iter_limit);
GetParamsForHashChainFindCopy(quality, xsize, cache_bits,
&window_size, &iter_pos, &iter_limit);
for (ix = 0; ix < chosen_path_size; ++ix, ++size) {
int offset = 0;
int len = 0;
int maxlen = chosen_path[ix];
if (maxlen != 1) {
HashChainFindCopy(hash_chain, i, xsize, argb, maxlen,
int max_len = chosen_path[ix];
if (max_len != 1) {
HashChainFindCopy(hash_chain, i, xsize, argb, max_len,
window_size, iter_pos, iter_limit,
&offset, &len);
assert(len == maxlen);
assert(len == max_len);
refs->refs[size] = PixOrCopyCreateCopy(offset, len);
if (use_color_cache) {
for (k = 0; k < len; ++k) {
@ -675,9 +675,9 @@ static int BackwardReferencesHashChainFollowChosenPath(
const int idx = VP8LColorCacheGetIndex(&hashers, argb[i]);
refs->refs[size] = PixOrCopyCreateCacheIdx(idx);
} else {
if (use_color_cache) VP8LColorCacheInsert(&hashers, argb[i]);
refs->refs[size] = PixOrCopyCreateLiteral(argb[i]);
}
if (use_color_cache) VP8LColorCacheInsert(&hashers, argb[i]);
if (i + 1 < pix_count) {
HashChainInsert(hash_chain, &argb[i], i);
}
@ -780,12 +780,14 @@ int VP8LGetBackwardReferences(int width, int height,
// Choose appropriate backward reference.
if (lz77_is_useful) {
// TraceBackwards is costly. Don't execute it at lower quality (q <= 10).
const int try_lz77_trace_backwards = (quality > 10);
// TraceBackwards is costly. Don't execute it at lower quality.
const int try_lz77_trace_backwards = (quality >= 25);
*best = refs_lz77; // default guess: lz77 is better
VP8LClearBackwardRefs(&refs_rle);
if (try_lz77_trace_backwards) {
const int recursion_level = (num_pix < 320 * 200) ? 1 : 0;
// Set recursion level for large images using a color cache.
const int recursion_level =
(num_pix < 320 * 200) && (cache_bits > 0) ? 1 : 0;
VP8LBackwardRefs refs_trace;
if (!VP8LBackwardRefsAlloc(&refs_trace, num_pix)) {
goto End;

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Author: Jyrki Alakuijala (jyrki@google.com)
@ -16,7 +18,7 @@
#include "../webp/types.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -28,73 +30,6 @@ extern "C" {
#define PIX_OR_COPY_CODES_MAX \
(NUM_LITERAL_CODES + NUM_LENGTH_CODES + (1 << MAX_COLOR_CACHE_BITS))
// -----------------------------------------------------------------------------
// PrefixEncode()
// use GNU builtins where available.
#if defined(__GNUC__) && \
((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4)
static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
assert(n != 0);
return 31 ^ __builtin_clz(n);
}
#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
#include <intrin.h>
#pragma intrinsic(_BitScanReverse)
static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
unsigned long first_set_bit;
assert(n != 0);
_BitScanReverse(&first_set_bit, n);
return first_set_bit;
}
#else
// Returns (int)floor(log2(n)). n must be > 0.
static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
int log = 0;
uint32_t value = n;
int i;
assert(n != 0);
for (i = 4; i >= 0; --i) {
const int shift = (1 << i);
const uint32_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
return log;
}
#endif
static WEBP_INLINE int VP8LBitsLog2Ceiling(uint32_t n) {
const int log_floor = BitsLog2Floor(n);
if (n == (n & ~(n - 1))) // zero or a power of two.
return log_floor;
else
return log_floor + 1;
}
// Splitting of distance and length codes into prefixes and
// extra bits. The prefixes are encoded with an entropy code
// while the extra bits are stored just as normal bits.
static WEBP_INLINE void PrefixEncode(int distance, int* const code,
int* const extra_bits_count,
int* const extra_bits_value) {
if (distance > 2) { // Collect the two most significant bits.
const int highest_bit = BitsLog2Floor(--distance);
const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
*extra_bits_count = highest_bit - 1;
*extra_bits_value = distance & ((1 << *extra_bits_count) - 1);
*code = 2 * highest_bit + second_highest_bit;
} else {
*extra_bits_count = 0;
*extra_bits_value = 0;
*code = (distance == 2) ? 1 : 0;
}
}
// -----------------------------------------------------------------------------
// PixOrCopy
@ -210,7 +145,7 @@ int VP8LCalculateEstimateForCacheSize(const uint32_t* const argb,
int xsize, int ysize,
int* const best_cache_bits);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
}
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Coding tools configuration
@ -11,10 +13,6 @@
#include "../webp/encode.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// WebPConfig
//------------------------------------------------------------------------------
@ -31,7 +29,7 @@ int WebPConfigInitInternal(WebPConfig* config,
config->target_PSNR = 0.;
config->method = 4;
config->sns_strength = 50;
config->filter_strength = 60; // rather high filtering, helps w/ gradients.
config->filter_strength = 60; // mid-filtering
config->filter_sharpness = 0;
config->filter_type = 1; // default: strong (so U/V is filtered too)
config->partitions = 0;
@ -56,11 +54,13 @@ int WebPConfigInitInternal(WebPConfig* config,
config->sns_strength = 80;
config->filter_sharpness = 4;
config->filter_strength = 35;
config->preprocessing &= ~2; // no dithering
break;
case WEBP_PRESET_PHOTO:
config->sns_strength = 80;
config->filter_sharpness = 3;
config->filter_strength = 30;
config->preprocessing |= 2;
break;
case WEBP_PRESET_DRAWING:
config->sns_strength = 25;
@ -70,10 +70,12 @@ int WebPConfigInitInternal(WebPConfig* config,
case WEBP_PRESET_ICON:
config->sns_strength = 0;
config->filter_strength = 0; // disable filtering to retain sharpness
config->preprocessing &= ~2; // no dithering
break;
case WEBP_PRESET_TEXT:
config->sns_strength = 0;
config->filter_strength = 0; // disable filtering to retain sharpness
config->preprocessing &= ~2; // no dithering
config->segments = 2;
break;
case WEBP_PRESET_DEFAULT:
@ -109,7 +111,7 @@ int WebPValidateConfig(const WebPConfig* config) {
return 0;
if (config->show_compressed < 0 || config->show_compressed > 1)
return 0;
if (config->preprocessing < 0 || config->preprocessing > 1)
if (config->preprocessing < 0 || config->preprocessing > 3)
return 0;
if (config->partitions < 0 || config->partitions > 3)
return 0;
@ -136,6 +138,3 @@ int WebPValidateConfig(const WebPConfig* config) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Cost tables for level and modes
@ -11,10 +13,6 @@
#include "./cost.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// Boolean-cost cost table
@ -385,110 +383,107 @@ const uint16_t VP8FixedCostsUV[4] = { 302, 984, 439, 642 };
// note: these values include the fixed VP8BitCost(1, 145) mode selection cost.
const uint16_t VP8FixedCostsI16[4] = { 663, 919, 872, 919 };
const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES] = {
{ { 251, 1362, 1934, 2085, 2314, 2230, 1839, 1988, 2437, 2348 },
{ 403, 680, 1507, 1519, 2060, 2005, 1992, 1914, 1924, 1733 },
{ 353, 1121, 973, 1895, 2060, 1787, 1671, 1516, 2012, 1868 },
{ 770, 852, 1581, 632, 1393, 1780, 1823, 1936, 1074, 1218 },
{ 510, 1270, 1467, 1319, 847, 1279, 1792, 2094, 1080, 1353 },
{ 488, 1322, 918, 1573, 1300, 883, 1814, 1752, 1756, 1502 },
{ 425, 992, 1820, 1514, 1843, 2440, 937, 1771, 1924, 1129 },
{ 363, 1248, 1257, 1970, 2194, 2385, 1569, 953, 1951, 1601 },
{ 723, 1257, 1631, 964, 963, 1508, 1697, 1824, 671, 1418 },
{ 635, 1038, 1573, 930, 1673, 1413, 1410, 1687, 1410, 749 } },
{ { 451, 613, 1345, 1702, 1870, 1716, 1728, 1766, 2190, 2310 },
{ 678, 453, 1171, 1443, 1925, 1831, 2045, 1781, 1887, 1602 },
{ 711, 666, 674, 1718, 1910, 1493, 1775, 1193, 2325, 2325 },
{ 883, 854, 1583, 542, 1800, 1878, 1664, 2149, 1207, 1087 },
{ 669, 994, 1248, 1122, 949, 1179, 1376, 1729, 1070, 1244 },
{ 715, 1026, 715, 1350, 1430, 930, 1717, 1296, 1479, 1479 },
{ 544, 841, 1656, 1450, 2094, 3883, 1010, 1759, 2076, 809 },
{ 610, 855, 957, 1553, 2067, 1561, 1704, 824, 2066, 1226 },
{ 833, 960, 1416, 819, 1277, 1619, 1501, 1617, 757, 1182 },
{ 711, 964, 1252, 879, 1441, 1828, 1508, 1636, 1594, 734 } },
{ { 605, 764, 734, 1713, 1747, 1192, 1819, 1353, 1877, 2392 },
{ 866, 641, 586, 1622, 2072, 1431, 1888, 1346, 2189, 1764 },
{ 901, 851, 456, 2165, 2281, 1405, 1739, 1193, 2183, 2443 },
{ 770, 1045, 952, 1078, 1342, 1191, 1436, 1063, 1303, 995 },
{ 901, 1086, 727, 1170, 884, 1105, 1267, 1401, 1739, 1337 },
{ 951, 1162, 595, 1488, 1388, 703, 1790, 1366, 2057, 1724 },
{ 534, 986, 1273, 1987, 3273, 1485, 1024, 1399, 1583, 866 },
{ 699, 1182, 695, 1978, 1726, 1986, 1326, 714, 1750, 1672 },
{ 951, 1217, 1209, 920, 1062, 1441, 1548, 999, 952, 932 },
{ 733, 1284, 784, 1256, 1557, 1098, 1257, 1357, 1414, 908 } },
{ { 316, 1075, 1653, 1220, 2145, 2051, 1730, 2131, 1884, 1790 },
{ 745, 516, 1404, 894, 1599, 2375, 2013, 2105, 1475, 1381 },
{ 516, 729, 1088, 1319, 1637, 3426, 1636, 1275, 1531, 1453 },
{ 894, 943, 2138, 468, 1704, 2259, 2069, 1763, 1266, 1158 },
{ 605, 1025, 1235, 871, 1170, 1767, 1493, 1500, 1104, 1258 },
{ 739, 826, 1207, 1151, 1412, 846, 1305, 2726, 1014, 1569 },
{ 558, 825, 1820, 1398, 3344, 1556, 1218, 1550, 1228, 878 },
{ 429, 951, 1089, 1816, 3861, 3861, 1556, 969, 1568, 1828 },
{ 883, 961, 1752, 769, 1468, 1810, 2081, 2346, 613, 1298 },
{ 803, 895, 1372, 641, 1303, 1708, 1686, 1700, 1306, 1033 } },
{ { 439, 1267, 1270, 1579, 963, 1193, 1723, 1729, 1198, 1993 },
{ 705, 725, 1029, 1153, 1176, 1103, 1821, 1567, 1259, 1574 },
{ 723, 859, 802, 1253, 972, 1202, 1407, 1665, 1520, 1674 },
{ 894, 960, 1254, 887, 1052, 1607, 1344, 1349, 865, 1150 },
{ 833, 1312, 1337, 1205, 572, 1288, 1414, 1529, 1088, 1430 },
{ 842, 1279, 1068, 1861, 862, 688, 1861, 1630, 1039, 1381 },
{ 766, 938, 1279, 1546, 3338, 1550, 1031, 1542, 1288, 640 },
{ 715, 1090, 835, 1609, 1100, 1100, 1603, 1019, 1102, 1617 },
{ 894, 1813, 1500, 1188, 789, 1194, 1491, 1919, 617, 1333 },
{ 610, 1076, 1644, 1281, 1283, 975, 1179, 1688, 1434, 889 } },
{ { 544, 971, 1146, 1849, 1221, 740, 1857, 1621, 1683, 2430 },
{ 723, 705, 961, 1371, 1426, 821, 2081, 2079, 1839, 1380 },
{ 783, 857, 703, 2145, 1419, 814, 1791, 1310, 1609, 2206 },
{ 997, 1000, 1153, 792, 1229, 1162, 1810, 1418, 942, 979 },
{ 901, 1226, 883, 1289, 793, 715, 1904, 1649, 1319, 3108 },
{ 979, 1478, 782, 2216, 1454, 455, 3092, 1591, 1997, 1664 },
{ 663, 1110, 1504, 1114, 1522, 3311, 676, 1522, 1530, 1024 },
{ 605, 1138, 1153, 1314, 1569, 1315, 1157, 804, 1574, 1320 },
{ 770, 1216, 1218, 1227, 869, 1384, 1232, 1375, 834, 1239 },
{ 775, 1007, 843, 1216, 1225, 1074, 2527, 1479, 1149, 975 } },
{ { 477, 817, 1309, 1439, 1708, 1454, 1159, 1241, 1945, 1672 },
{ 577, 796, 1112, 1271, 1618, 1458, 1087, 1345, 1831, 1265 },
{ 663, 776, 753, 1940, 1690, 1690, 1227, 1097, 3149, 1361 },
{ 766, 1299, 1744, 1161, 1565, 1106, 1045, 1230, 1232, 707 },
{ 915, 1026, 1404, 1182, 1184, 851, 1428, 2425, 1043, 789 },
{ 883, 1456, 790, 1082, 1086, 985, 1083, 1484, 1238, 1160 },
{ 507, 1345, 2261, 1995, 1847, 3636, 653, 1761, 2287, 933 },
{ 553, 1193, 1470, 2057, 2059, 2059, 833, 779, 2058, 1263 },
{ 766, 1275, 1515, 1039, 957, 1554, 1286, 1540, 1289, 705 },
{ 499, 1378, 1496, 1385, 1850, 1850, 1044, 2465, 1515, 720 } },
{ { 553, 930, 978, 2077, 1968, 1481, 1457, 761, 1957, 2362 },
{ 694, 864, 905, 1720, 1670, 1621, 1429, 718, 2125, 1477 },
{ 699, 968, 658, 3190, 2024, 1479, 1865, 750, 2060, 2320 },
{ 733, 1308, 1296, 1062, 1576, 1322, 1062, 1112, 1172, 816 },
{ 920, 927, 1052, 939, 947, 1156, 1152, 1073, 3056, 1268 },
{ 723, 1534, 711, 1547, 1294, 892, 1553, 928, 1815, 1561 },
{ 663, 1366, 1583, 2111, 1712, 3501, 522, 1155, 2130, 1133 },
{ 614, 1731, 1188, 2343, 1944, 3733, 1287, 487, 3546, 1758 },
{ 770, 1585, 1312, 826, 884, 2673, 1185, 1006, 1195, 1195 },
{ 758, 1333, 1273, 1023, 1621, 1162, 1351, 833, 1479, 862 } },
{ { 376, 1193, 1446, 1149, 1545, 1577, 1870, 1789, 1175, 1823 },
{ 803, 633, 1136, 1058, 1350, 1323, 1598, 2247, 1072, 1252 },
{ 614, 1048, 943, 981, 1152, 1869, 1461, 1020, 1618, 1618 },
{ 1107, 1085, 1282, 592, 1779, 1933, 1648, 2403, 691, 1246 },
{ 851, 1309, 1223, 1243, 895, 1593, 1792, 2317, 627, 1076 },
{ 770, 1216, 1030, 1125, 921, 981, 1629, 1131, 1049, 1646 },
{ 626, 1469, 1456, 1081, 1489, 3278, 981, 1232, 1498, 733 },
{ 617, 1201, 812, 1220, 1476, 1476, 1478, 970, 1228, 1488 },
{ 1179, 1393, 1540, 999, 1243, 1503, 1916, 1925, 414, 1614 },
{ 943, 1088, 1490, 682, 1112, 1372, 1756, 1505, 966, 966 } },
{ { 322, 1142, 1589, 1396, 2144, 1859, 1359, 1925, 2084, 1518 },
{ 617, 625, 1241, 1234, 2121, 1615, 1524, 1858, 1720, 1004 },
{ 553, 851, 786, 1299, 1452, 1560, 1372, 1561, 1967, 1713 },
{ 770, 977, 1396, 568, 1893, 1639, 1540, 2108, 1430, 1013 },
{ 684, 1120, 1375, 982, 930, 2719, 1638, 1643, 933, 993 },
{ 553, 1103, 996, 1356, 1361, 1005, 1507, 1761, 1184, 1268 },
{ 419, 1247, 1537, 1554, 1817, 3606, 1026, 1666, 1829, 923 },
{ 439, 1139, 1101, 1257, 3710, 1922, 1205, 1040, 1931, 1529 },
{ 979, 935, 1269, 847, 1202, 1286, 1530, 1535, 827, 1036 },
{ 516, 1378, 1569, 1110, 1798, 1798, 1198, 2199, 1543, 712 } },
{ { 40, 1151, 1723, 1874, 2103, 2019, 1628, 1777, 2226, 2137 },
{ 192, 469, 1296, 1308, 1849, 1794, 1781, 1703, 1713, 1522 },
{ 142, 910, 762, 1684, 1849, 1576, 1460, 1305, 1801, 1657 },
{ 559, 641, 1370, 421, 1182, 1569, 1612, 1725, 863, 1007 },
{ 299, 1059, 1256, 1108, 636, 1068, 1581, 1883, 869, 1142 },
{ 277, 1111, 707, 1362, 1089, 672, 1603, 1541, 1545, 1291 },
{ 214, 781, 1609, 1303, 1632, 2229, 726, 1560, 1713, 918 },
{ 152, 1037, 1046, 1759, 1983, 2174, 1358, 742, 1740, 1390 },
{ 512, 1046, 1420, 753, 752, 1297, 1486, 1613, 460, 1207 },
{ 424, 827, 1362, 719, 1462, 1202, 1199, 1476, 1199, 538 } },
{ { 240, 402, 1134, 1491, 1659, 1505, 1517, 1555, 1979, 2099 },
{ 467, 242, 960, 1232, 1714, 1620, 1834, 1570, 1676, 1391 },
{ 500, 455, 463, 1507, 1699, 1282, 1564, 982, 2114, 2114 },
{ 672, 643, 1372, 331, 1589, 1667, 1453, 1938, 996, 876 },
{ 458, 783, 1037, 911, 738, 968, 1165, 1518, 859, 1033 },
{ 504, 815, 504, 1139, 1219, 719, 1506, 1085, 1268, 1268 },
{ 333, 630, 1445, 1239, 1883, 3672, 799, 1548, 1865, 598 },
{ 399, 644, 746, 1342, 1856, 1350, 1493, 613, 1855, 1015 },
{ 622, 749, 1205, 608, 1066, 1408, 1290, 1406, 546, 971 },
{ 500, 753, 1041, 668, 1230, 1617, 1297, 1425, 1383, 523 } },
{ { 394, 553, 523, 1502, 1536, 981, 1608, 1142, 1666, 2181 },
{ 655, 430, 375, 1411, 1861, 1220, 1677, 1135, 1978, 1553 },
{ 690, 640, 245, 1954, 2070, 1194, 1528, 982, 1972, 2232 },
{ 559, 834, 741, 867, 1131, 980, 1225, 852, 1092, 784 },
{ 690, 875, 516, 959, 673, 894, 1056, 1190, 1528, 1126 },
{ 740, 951, 384, 1277, 1177, 492, 1579, 1155, 1846, 1513 },
{ 323, 775, 1062, 1776, 3062, 1274, 813, 1188, 1372, 655 },
{ 488, 971, 484, 1767, 1515, 1775, 1115, 503, 1539, 1461 },
{ 740, 1006, 998, 709, 851, 1230, 1337, 788, 741, 721 },
{ 522, 1073, 573, 1045, 1346, 887, 1046, 1146, 1203, 697 } },
{ { 105, 864, 1442, 1009, 1934, 1840, 1519, 1920, 1673, 1579 },
{ 534, 305, 1193, 683, 1388, 2164, 1802, 1894, 1264, 1170 },
{ 305, 518, 877, 1108, 1426, 3215, 1425, 1064, 1320, 1242 },
{ 683, 732, 1927, 257, 1493, 2048, 1858, 1552, 1055, 947 },
{ 394, 814, 1024, 660, 959, 1556, 1282, 1289, 893, 1047 },
{ 528, 615, 996, 940, 1201, 635, 1094, 2515, 803, 1358 },
{ 347, 614, 1609, 1187, 3133, 1345, 1007, 1339, 1017, 667 },
{ 218, 740, 878, 1605, 3650, 3650, 1345, 758, 1357, 1617 },
{ 672, 750, 1541, 558, 1257, 1599, 1870, 2135, 402, 1087 },
{ 592, 684, 1161, 430, 1092, 1497, 1475, 1489, 1095, 822 } },
{ { 228, 1056, 1059, 1368, 752, 982, 1512, 1518, 987, 1782 },
{ 494, 514, 818, 942, 965, 892, 1610, 1356, 1048, 1363 },
{ 512, 648, 591, 1042, 761, 991, 1196, 1454, 1309, 1463 },
{ 683, 749, 1043, 676, 841, 1396, 1133, 1138, 654, 939 },
{ 622, 1101, 1126, 994, 361, 1077, 1203, 1318, 877, 1219 },
{ 631, 1068, 857, 1650, 651, 477, 1650, 1419, 828, 1170 },
{ 555, 727, 1068, 1335, 3127, 1339, 820, 1331, 1077, 429 },
{ 504, 879, 624, 1398, 889, 889, 1392, 808, 891, 1406 },
{ 683, 1602, 1289, 977, 578, 983, 1280, 1708, 406, 1122 },
{ 399, 865, 1433, 1070, 1072, 764, 968, 1477, 1223, 678 } },
{ { 333, 760, 935, 1638, 1010, 529, 1646, 1410, 1472, 2219 },
{ 512, 494, 750, 1160, 1215, 610, 1870, 1868, 1628, 1169 },
{ 572, 646, 492, 1934, 1208, 603, 1580, 1099, 1398, 1995 },
{ 786, 789, 942, 581, 1018, 951, 1599, 1207, 731, 768 },
{ 690, 1015, 672, 1078, 582, 504, 1693, 1438, 1108, 2897 },
{ 768, 1267, 571, 2005, 1243, 244, 2881, 1380, 1786, 1453 },
{ 452, 899, 1293, 903, 1311, 3100, 465, 1311, 1319, 813 },
{ 394, 927, 942, 1103, 1358, 1104, 946, 593, 1363, 1109 },
{ 559, 1005, 1007, 1016, 658, 1173, 1021, 1164, 623, 1028 },
{ 564, 796, 632, 1005, 1014, 863, 2316, 1268, 938, 764 } },
{ { 266, 606, 1098, 1228, 1497, 1243, 948, 1030, 1734, 1461 },
{ 366, 585, 901, 1060, 1407, 1247, 876, 1134, 1620, 1054 },
{ 452, 565, 542, 1729, 1479, 1479, 1016, 886, 2938, 1150 },
{ 555, 1088, 1533, 950, 1354, 895, 834, 1019, 1021, 496 },
{ 704, 815, 1193, 971, 973, 640, 1217, 2214, 832, 578 },
{ 672, 1245, 579, 871, 875, 774, 872, 1273, 1027, 949 },
{ 296, 1134, 2050, 1784, 1636, 3425, 442, 1550, 2076, 722 },
{ 342, 982, 1259, 1846, 1848, 1848, 622, 568, 1847, 1052 },
{ 555, 1064, 1304, 828, 746, 1343, 1075, 1329, 1078, 494 },
{ 288, 1167, 1285, 1174, 1639, 1639, 833, 2254, 1304, 509 } },
{ { 342, 719, 767, 1866, 1757, 1270, 1246, 550, 1746, 2151 },
{ 483, 653, 694, 1509, 1459, 1410, 1218, 507, 1914, 1266 },
{ 488, 757, 447, 2979, 1813, 1268, 1654, 539, 1849, 2109 },
{ 522, 1097, 1085, 851, 1365, 1111, 851, 901, 961, 605 },
{ 709, 716, 841, 728, 736, 945, 941, 862, 2845, 1057 },
{ 512, 1323, 500, 1336, 1083, 681, 1342, 717, 1604, 1350 },
{ 452, 1155, 1372, 1900, 1501, 3290, 311, 944, 1919, 922 },
{ 403, 1520, 977, 2132, 1733, 3522, 1076, 276, 3335, 1547 },
{ 559, 1374, 1101, 615, 673, 2462, 974, 795, 984, 984 },
{ 547, 1122, 1062, 812, 1410, 951, 1140, 622, 1268, 651 } },
{ { 165, 982, 1235, 938, 1334, 1366, 1659, 1578, 964, 1612 },
{ 592, 422, 925, 847, 1139, 1112, 1387, 2036, 861, 1041 },
{ 403, 837, 732, 770, 941, 1658, 1250, 809, 1407, 1407 },
{ 896, 874, 1071, 381, 1568, 1722, 1437, 2192, 480, 1035 },
{ 640, 1098, 1012, 1032, 684, 1382, 1581, 2106, 416, 865 },
{ 559, 1005, 819, 914, 710, 770, 1418, 920, 838, 1435 },
{ 415, 1258, 1245, 870, 1278, 3067, 770, 1021, 1287, 522 },
{ 406, 990, 601, 1009, 1265, 1265, 1267, 759, 1017, 1277 },
{ 968, 1182, 1329, 788, 1032, 1292, 1705, 1714, 203, 1403 },
{ 732, 877, 1279, 471, 901, 1161, 1545, 1294, 755, 755 } },
{ { 111, 931, 1378, 1185, 1933, 1648, 1148, 1714, 1873, 1307 },
{ 406, 414, 1030, 1023, 1910, 1404, 1313, 1647, 1509, 793 },
{ 342, 640, 575, 1088, 1241, 1349, 1161, 1350, 1756, 1502 },
{ 559, 766, 1185, 357, 1682, 1428, 1329, 1897, 1219, 802 },
{ 473, 909, 1164, 771, 719, 2508, 1427, 1432, 722, 782 },
{ 342, 892, 785, 1145, 1150, 794, 1296, 1550, 973, 1057 },
{ 208, 1036, 1326, 1343, 1606, 3395, 815, 1455, 1618, 712 },
{ 228, 928, 890, 1046, 3499, 1711, 994, 829, 1720, 1318 },
{ 768, 724, 1058, 636, 991, 1075, 1319, 1324, 616, 825 },
{ 305, 1167, 1358, 899, 1587, 1587, 987, 1988, 1332, 501 } }
};
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Cost tables for level and modes.
@ -14,7 +16,7 @@
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -42,7 +44,7 @@ extern const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES];
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,20 +1,67 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Selecting filter level
//
// Author: somnath@google.com (Somnath Banerjee)
#include <assert.h>
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
// This table gives, for a given sharpness, the filtering strength to be
// used (at least) in order to filter a given edge step delta.
// This is constructed by brute force inspection: for all delta, we iterate
// over all possible filtering strength / thresh until needs_filter() returns
// true.
#define MAX_DELTA_SIZE 64
static const uint8_t kLevelsFromDelta[8][MAX_DELTA_SIZE] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
{ 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 17, 18,
20, 21, 23, 24, 26, 27, 29, 30, 32, 33, 35, 36, 38, 39, 41, 42,
44, 45, 47, 48, 50, 51, 53, 54, 56, 57, 59, 60, 62, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 },
{ 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19,
20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 38, 40, 41, 43,
44, 46, 47, 49, 50, 52, 53, 55, 56, 58, 59, 61, 62, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 },
{ 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 15, 16, 18, 19,
21, 22, 24, 25, 27, 28, 30, 31, 33, 34, 36, 37, 39, 40, 42, 43,
45, 46, 48, 49, 51, 52, 54, 55, 57, 58, 60, 61, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 },
{ 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 14, 15, 17, 18, 20,
21, 23, 24, 26, 27, 29, 30, 32, 33, 35, 36, 38, 39, 41, 42, 44,
45, 47, 48, 50, 51, 53, 54, 56, 57, 59, 60, 62, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 },
{ 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20,
22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 38, 40, 41, 43, 44,
46, 47, 49, 50, 52, 53, 55, 56, 58, 59, 61, 62, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 },
{ 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 18, 19, 21,
22, 24, 25, 27, 28, 30, 31, 33, 34, 36, 37, 39, 40, 42, 43, 45,
46, 48, 49, 51, 52, 54, 55, 57, 58, 60, 61, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 },
{ 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 14, 15, 17, 18, 20, 21,
23, 24, 26, 27, 29, 30, 32, 33, 35, 36, 38, 39, 41, 42, 44, 45,
47, 48, 50, 51, 53, 54, 56, 57, 59, 60, 62, 63, 63, 63, 63, 63,
63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }
};
int VP8FilterStrengthFromDelta(int sharpness, int delta) {
const int pos = (delta < MAX_DELTA_SIZE) ? delta : MAX_DELTA_SIZE - 1;
assert(sharpness >= 0 && sharpness <= 7);
return kLevelsFromDelta[sharpness][pos];
}
// -----------------------------------------------------------------------------
// NOTE: clip1, tables and InitTables are repeated entries of dsp.c
static uint8_t abs0[255 + 255 + 1]; // abs(i)
static uint8_t abs1[255 + 255 + 1]; // abs(i)>>1
@ -338,28 +385,29 @@ static double GetMBSSIM(const uint8_t* yuv1, const uint8_t* yuv2) {
// loop filter strength
void VP8InitFilter(VP8EncIterator* const it) {
int s, i;
if (!it->lf_stats_) return;
InitTables();
for (s = 0; s < NUM_MB_SEGMENTS; s++) {
for (i = 0; i < MAX_LF_LEVELS; i++) {
(*it->lf_stats_)[s][i] = 0;
if (it->lf_stats_ != NULL) {
int s, i;
InitTables();
for (s = 0; s < NUM_MB_SEGMENTS; s++) {
for (i = 0; i < MAX_LF_LEVELS; i++) {
(*it->lf_stats_)[s][i] = 0;
}
}
}
}
void VP8StoreFilterStats(VP8EncIterator* const it) {
int d;
VP8Encoder* const enc = it->enc_;
const int s = it->mb_->segment_;
const int level0 = it->enc_->dqm_[s].fstrength_; // TODO: ref_lf_delta[]
const int level0 = enc->dqm_[s].fstrength_; // TODO: ref_lf_delta[]
// explore +/-quant range of values around level0
const int delta_min = -it->enc_->dqm_[s].quant_;
const int delta_max = it->enc_->dqm_[s].quant_;
const int delta_min = -enc->dqm_[s].quant_;
const int delta_max = enc->dqm_[s].quant_;
const int step_size = (delta_max - delta_min >= 4) ? 4 : 1;
if (!it->lf_stats_) return;
if (it->lf_stats_ == NULL) return;
// NOTE: Currently we are applying filter only across the sublock edges
// There are two reasons for that.
@ -383,27 +431,41 @@ void VP8StoreFilterStats(VP8EncIterator* const it) {
}
void VP8AdjustFilterStrength(VP8EncIterator* const it) {
int s;
VP8Encoder* const enc = it->enc_;
if (!it->lf_stats_) {
return;
}
for (s = 0; s < NUM_MB_SEGMENTS; s++) {
int i, best_level = 0;
// Improvement over filter level 0 should be at least 1e-5 (relatively)
double best_v = 1.00001 * (*it->lf_stats_)[s][0];
for (i = 1; i < MAX_LF_LEVELS; i++) {
const double v = (*it->lf_stats_)[s][i];
if (v > best_v) {
best_v = v;
best_level = i;
if (it->lf_stats_ != NULL) {
int s;
for (s = 0; s < NUM_MB_SEGMENTS; s++) {
int i, best_level = 0;
// Improvement over filter level 0 should be at least 1e-5 (relatively)
double best_v = 1.00001 * (*it->lf_stats_)[s][0];
for (i = 1; i < MAX_LF_LEVELS; i++) {
const double v = (*it->lf_stats_)[s][i];
if (v > best_v) {
best_v = v;
best_level = i;
}
}
enc->dqm_[s].fstrength_ = best_level;
}
} else if (enc->config_->filter_strength > 0) {
int max_level = 0;
int s;
for (s = 0; s < NUM_MB_SEGMENTS; s++) {
VP8SegmentInfo* const dqm = &enc->dqm_[s];
// this '>> 3' accounts for some inverse WHT scaling
const int delta = (dqm->max_edge_ * dqm->y2_.q_[1]) >> 3;
const int level =
VP8FilterStrengthFromDelta(enc->filter_hdr_.sharpness_, delta);
if (level > dqm->fstrength_) {
dqm->fstrength_ = level;
}
if (max_level < dqm->fstrength_) {
max_level = dqm->fstrength_;
}
}
enc->dqm_[s].fstrength_ = best_level;
enc->filter_hdr_.level_ = max_level;
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
// -----------------------------------------------------------------------------

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// frame coding and analysis
@ -16,10 +18,7 @@
#include "./vp8enci.h"
#include "./cost.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#include "../webp/format_constants.h" // RIFF constants
#define SEGMENT_VISU 0
#define DEBUG_SEARCH 0 // useful to track search convergence
@ -37,6 +36,63 @@ typedef struct {
CostArray* cost;
} VP8Residual;
//------------------------------------------------------------------------------
// multi-pass convergence
#define HEADER_SIZE_ESTIMATE (RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE + \
VP8_FRAME_HEADER_SIZE)
#define DQ_LIMIT 0.4 // convergence is considered reached if dq < DQ_LIMIT
// we allow 2k of extra head-room in PARTITION0 limit.
#define PARTITION0_SIZE_LIMIT ((VP8_MAX_PARTITION0_SIZE - 2048ULL) << 11)
typedef struct { // struct for organizing convergence in either size or PSNR
int is_first;
float dq;
float q, last_q;
double value, last_value; // PSNR or size
double target;
int do_size_search;
} PassStats;
static int InitPassStats(const VP8Encoder* const enc, PassStats* const s) {
const uint64_t target_size = (uint64_t)enc->config_->target_size;
const int do_size_search = (target_size != 0);
const float target_PSNR = enc->config_->target_PSNR;
s->is_first = 1;
s->dq = 10.f;
s->q = s->last_q = enc->config_->quality;
s->target = do_size_search ? (double)target_size
: (target_PSNR > 0.) ? target_PSNR
: 40.; // default, just in case
s->value = s->last_value = 0.;
s->do_size_search = do_size_search;
return do_size_search;
}
static float Clamp(float v, float min, float max) {
return (v < min) ? min : (v > max) ? max : v;
}
static float ComputeNextQ(PassStats* const s) {
float dq;
if (s->is_first) {
dq = (s->value > s->target) ? -s->dq : s->dq;
s->is_first = 0;
} else if (s->value != s->last_value) {
const double slope = (s->target - s->value) / (s->last_value - s->value);
dq = (float)(slope * (s->last_q - s->q));
} else {
dq = 0.; // we're done?!
}
// Limit variable to avoid large swings.
s->dq = Clamp(dq, -30.f, 30.f);
s->last_q = s->q;
s->last_value = s->value;
s->q = Clamp(s->q + s->dq, 0.f, 100.f);
return s->q;
}
//------------------------------------------------------------------------------
// Tables for level coding
@ -290,31 +346,20 @@ static int GetResidualCost(int ctx0, const VP8Residual* const res) {
if (res->last < 0) {
return VP8BitCost(0, p0);
}
cost = 0;
while (n < res->last) {
int v = res->coeffs[n];
cost = VP8BitCost(1, p0);
for (; n < res->last; ++n) {
const int v = abs(res->coeffs[n]);
const int b = VP8EncBands[n + 1];
++n;
if (v == 0) {
// short-case for VP8LevelCost(t, 0) (note: VP8LevelFixedCosts[0] == 0):
cost += t[0];
t = res->cost[b][0];
continue;
}
v = abs(v);
cost += VP8BitCost(1, p0);
const int ctx = (v >= 2) ? 2 : v;
cost += VP8LevelCost(t, v);
{
const int ctx = (v == 1) ? 1 : 2;
p0 = res->prob[b][ctx][0];
t = res->cost[b][ctx];
}
t = res->cost[b][ctx];
// the masking trick is faster than "if (v) cost += ..." with clang
cost += (v ? ~0U : 0) & VP8BitCost(1, res->prob[b][ctx][0]);
}
// Last coefficient is always non-zero
{
const int v = abs(res->coeffs[n]);
assert(v != 0);
cost += VP8BitCost(1, p0);
cost += VP8LevelCost(t, v);
if (n < 15) {
const int b = VP8EncBands[n + 1];
@ -683,81 +728,83 @@ static void StoreSideInfo(const VP8EncIterator* const it) {
#endif
}
static double GetPSNR(uint64_t mse, uint64_t size) {
return (mse > 0 && size > 0) ? 10. * log10(255. * 255. * size / mse) : 99;
}
//------------------------------------------------------------------------------
// StatLoop(): only collect statistics (number of skips, token usage, ...).
// This is used for deciding optimal probabilities. It also modifies the
// quantizer value if some target (size, PNSR) was specified.
#define kHeaderSizeEstimate (15 + 20 + 10) // TODO: fix better
// quantizer value if some target (size, PSNR) was specified.
static void SetLoopParams(VP8Encoder* const enc, float q) {
// Make sure the quality parameter is inside valid bounds
if (q < 0.) {
q = 0;
} else if (q > 100.) {
q = 100;
}
q = Clamp(q, 0.f, 100.f);
VP8SetSegmentParams(enc, q); // setup segment quantizations and filters
SetSegmentProbas(enc); // compute segment probabilities
ResetStats(enc);
ResetTokenStats(enc);
ResetSSE(enc);
}
static int OneStatPass(VP8Encoder* const enc, float q, VP8RDLevel rd_opt,
int nb_mbs, float* const PSNR, int percent_delta) {
static uint64_t OneStatPass(VP8Encoder* const enc, VP8RDLevel rd_opt,
int nb_mbs, int percent_delta,
PassStats* const s) {
VP8EncIterator it;
uint64_t size = 0;
uint64_t size_p0 = 0;
uint64_t distortion = 0;
const uint64_t pixel_count = nb_mbs * 384;
SetLoopParams(enc, q);
VP8IteratorInit(enc, &it);
SetLoopParams(enc, s->q);
do {
VP8ModeScore info;
VP8IteratorImport(&it);
VP8IteratorImport(&it, NULL);
if (VP8Decimate(&it, &info, rd_opt)) {
// Just record the number of skips and act like skip_proba is not used.
enc->proba_.nb_skip_++;
}
RecordResiduals(&it, &info);
size += info.R;
size += info.R + info.H;
size_p0 += info.H;
distortion += info.D;
if (percent_delta && !VP8IteratorProgress(&it, percent_delta))
return 0;
} while (VP8IteratorNext(&it, it.yuv_out_) && --nb_mbs > 0);
size += FinalizeSkipProba(enc);
size += FinalizeTokenProbas(&enc->proba_);
size += enc->segment_hdr_.size_;
size = ((size + 1024) >> 11) + kHeaderSizeEstimate;
VP8IteratorSaveBoundary(&it);
} while (VP8IteratorNext(&it) && --nb_mbs > 0);
if (PSNR) {
*PSNR = (float)(10.* log10(255. * 255. * pixel_count / distortion));
size_p0 += enc->segment_hdr_.size_;
if (s->do_size_search) {
size += FinalizeSkipProba(enc);
size += FinalizeTokenProbas(&enc->proba_);
size = ((size + size_p0 + 1024) >> 11) + HEADER_SIZE_ESTIMATE;
s->value = (double)size;
} else {
s->value = GetPSNR(distortion, pixel_count);
}
return (int)size;
return size_p0;
}
// successive refinement increments.
static const int dqs[] = { 20, 15, 10, 8, 6, 4, 2, 1, 0 };
static int StatLoop(VP8Encoder* const enc) {
const int method = enc->method_;
const int do_search = enc->do_search_;
const int fast_probe = ((method == 0 || method == 3) && !do_search);
float q = enc->config_->quality;
const int max_passes = enc->config_->pass;
int num_pass_left = enc->config_->pass;
const int task_percent = 20;
const int percent_per_pass = (task_percent + max_passes / 2) / max_passes;
const int percent_per_pass =
(task_percent + num_pass_left / 2) / num_pass_left;
const int final_percent = enc->percent_ + task_percent;
int pass;
int nb_mbs;
const VP8RDLevel rd_opt =
(method >= 3 || do_search) ? RD_OPT_BASIC : RD_OPT_NONE;
int nb_mbs = enc->mb_w_ * enc->mb_h_;
PassStats stats;
InitPassStats(enc, &stats);
ResetTokenStats(enc);
// Fast mode: quick analysis pass over few mbs. Better than nothing.
nb_mbs = enc->mb_w_ * enc->mb_h_;
if (fast_probe) {
if (method == 3) { // we need more stats for method 3 to be reliable.
nb_mbs = (nb_mbs > 200) ? nb_mbs >> 1 : 100;
@ -766,37 +813,35 @@ static int StatLoop(VP8Encoder* const enc) {
}
}
// No target size: just do several pass without changing 'q'
if (!do_search) {
for (pass = 0; pass < max_passes; ++pass) {
const VP8RDLevel rd_opt = (method >= 3) ? RD_OPT_BASIC : RD_OPT_NONE;
if (!OneStatPass(enc, q, rd_opt, nb_mbs, NULL, percent_per_pass)) {
return 0;
}
}
} else {
// binary search for a size close to target
for (pass = 0; pass < max_passes && (dqs[pass] > 0); ++pass) {
float PSNR;
int criterion;
const int size = OneStatPass(enc, q, RD_OPT_BASIC, nb_mbs, &PSNR,
percent_per_pass);
#if DEBUG_SEARCH
printf("#%d size=%d PSNR=%.2f q=%.2f\n", pass, size, PSNR, q);
while (num_pass_left-- > 0) {
const int is_last_pass = (fabs(stats.dq) <= DQ_LIMIT) ||
(num_pass_left == 0) ||
(enc->max_i4_header_bits_ == 0);
const uint64_t size_p0 =
OneStatPass(enc, rd_opt, nb_mbs, percent_per_pass, &stats);
if (size_p0 == 0) return 0;
#if (DEBUG_SEARCH > 0)
printf("#%d value:%.1lf -> %.1lf q:%.2f -> %.2f\n",
num_pass_left, stats.last_value, stats.value, stats.last_q, stats.q);
#endif
if (size == 0) return 0;
if (enc->config_->target_PSNR > 0) {
criterion = (PSNR < enc->config_->target_PSNR);
} else {
criterion = (size < enc->config_->target_size);
}
// dichotomize
if (criterion) {
q += dqs[pass];
} else {
q -= dqs[pass];
}
if (enc->max_i4_header_bits_ > 0 && size_p0 > PARTITION0_SIZE_LIMIT) {
++num_pass_left;
enc->max_i4_header_bits_ >>= 1; // strengthen header bit limitation...
continue; // ...and start over
}
if (is_last_pass) {
break;
}
// If no target size: just do several pass without changing 'q'
if (do_search) {
ComputeNextQ(&stats);
if (fabs(stats.dq) <= DQ_LIMIT) break;
}
}
if (!do_search || !stats.do_size_search) {
// Need to finalize probas now, since it wasn't done during the search.
FinalizeSkipProba(enc);
FinalizeTokenProbas(&enc->proba_);
}
VP8CalculateLevelCosts(&enc->proba_); // finalize costs
return WebPReportProgress(enc->pic_, final_percent, &enc->percent_);
@ -833,7 +878,7 @@ static int PostLoopFinalize(VP8EncIterator* const it, int ok) {
}
if (ok) { // All good. Finish up.
if (enc->pic_->stats) { // finalize byte counters...
if (enc->pic_->stats != NULL) { // finalize byte counters...
int i, s;
for (i = 0; i <= 2; ++i) {
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
@ -875,7 +920,7 @@ int VP8EncLoop(VP8Encoder* const enc) {
const int dont_use_skip = !enc->proba_.use_skip_proba_;
const VP8RDLevel rd_opt = enc->rd_opt_level_;
VP8IteratorImport(&it);
VP8IteratorImport(&it, NULL);
// Warning! order is important: first call VP8Decimate() and
// *then* decide how to code the skip decision if there's one.
if (!VP8Decimate(&it, &info, rd_opt) || dont_use_skip) {
@ -892,7 +937,8 @@ int VP8EncLoop(VP8Encoder* const enc) {
VP8StoreFilterStats(&it);
VP8IteratorExport(&it);
ok = VP8IteratorProgress(&it, 20);
} while (ok && VP8IteratorNext(&it, it.yuv_out_));
VP8IteratorSaveBoundary(&it);
} while (ok && VP8IteratorNext(&it));
return PostLoopFinalize(&it, ok);
}
@ -902,62 +948,110 @@ int VP8EncLoop(VP8Encoder* const enc) {
#if !defined(DISABLE_TOKEN_BUFFER)
#define MIN_COUNT 96 // minimum number of macroblocks before updating stats
#define MIN_COUNT 96 // minimum number of macroblocks before updating stats
int VP8EncTokenLoop(VP8Encoder* const enc) {
int ok;
// Roughly refresh the proba height times per pass
// Roughly refresh the proba eight times per pass
int max_count = (enc->mb_w_ * enc->mb_h_) >> 3;
int cnt;
int num_pass_left = enc->config_->pass;
const int do_search = enc->do_search_;
VP8EncIterator it;
VP8Proba* const proba = &enc->proba_;
const VP8RDLevel rd_opt = enc->rd_opt_level_;
const uint64_t pixel_count = enc->mb_w_ * enc->mb_h_ * 384;
PassStats stats;
int ok;
InitPassStats(enc, &stats);
ok = PreLoopInitialize(enc);
if (!ok) return 0;
if (max_count < MIN_COUNT) max_count = MIN_COUNT;
cnt = max_count;
assert(enc->num_parts_ == 1);
assert(enc->use_tokens_);
assert(proba->use_skip_proba_ == 0);
assert(rd_opt >= RD_OPT_BASIC); // otherwise, token-buffer won't be useful
assert(!enc->do_search_); // TODO(skal): handle pass and dichotomy
assert(num_pass_left > 0);
SetLoopParams(enc, enc->config_->quality);
ok = PreLoopInitialize(enc);
if (!ok) return 0;
VP8IteratorInit(enc, &it);
VP8InitFilter(&it);
do {
VP8ModeScore info;
VP8IteratorImport(&it);
if (--cnt < 0) {
FinalizeTokenProbas(proba);
VP8CalculateLevelCosts(proba); // refresh cost tables for rd-opt
cnt = max_count;
while (ok && num_pass_left-- > 0) {
const int is_last_pass = (fabs(stats.dq) <= DQ_LIMIT) ||
(num_pass_left == 0) ||
(enc->max_i4_header_bits_ == 0);
uint64_t size_p0 = 0;
uint64_t distortion = 0;
int cnt = max_count;
VP8IteratorInit(enc, &it);
SetLoopParams(enc, stats.q);
if (is_last_pass) {
ResetTokenStats(enc);
VP8InitFilter(&it); // don't collect stats until last pass (too costly)
}
VP8Decimate(&it, &info, rd_opt);
RecordTokens(&it, &info, &enc->tokens_);
VP8TBufferClear(&enc->tokens_);
do {
VP8ModeScore info;
VP8IteratorImport(&it, NULL);
if (--cnt < 0) {
FinalizeTokenProbas(proba);
VP8CalculateLevelCosts(proba); // refresh cost tables for rd-opt
cnt = max_count;
}
VP8Decimate(&it, &info, rd_opt);
RecordTokens(&it, &info, &enc->tokens_);
size_p0 += info.H;
distortion += info.D;
#ifdef WEBP_EXPERIMENTAL_FEATURES
if (enc->use_layer_) {
VP8EncCodeLayerBlock(&it);
}
if (enc->use_layer_) {
VP8EncCodeLayerBlock(&it);
}
#endif
StoreSideInfo(&it);
VP8StoreFilterStats(&it);
VP8IteratorExport(&it);
ok = VP8IteratorProgress(&it, 20);
} while (ok && VP8IteratorNext(&it, it.yuv_out_));
if (is_last_pass) {
StoreSideInfo(&it);
VP8StoreFilterStats(&it);
VP8IteratorExport(&it);
ok = VP8IteratorProgress(&it, 20);
}
VP8IteratorSaveBoundary(&it);
} while (ok && VP8IteratorNext(&it));
if (!ok) break;
ok = ok && WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_);
size_p0 += enc->segment_hdr_.size_;
if (stats.do_size_search) {
uint64_t size = FinalizeTokenProbas(&enc->proba_);
size += VP8EstimateTokenSize(&enc->tokens_,
(const uint8_t*)proba->coeffs_);
size = (size + size_p0 + 1024) >> 11; // -> size in bytes
size += HEADER_SIZE_ESTIMATE;
stats.value = (double)size;
} else { // compute and store PSNR
stats.value = GetPSNR(distortion, pixel_count);
}
#if (DEBUG_SEARCH > 0)
printf("#%2d metric:%.1lf -> %.1lf last_q=%.2lf q=%.2lf dq=%.2lf\n",
num_pass_left, stats.last_value, stats.value,
stats.last_q, stats.q, stats.dq);
#endif
if (size_p0 > PARTITION0_SIZE_LIMIT) {
++num_pass_left;
enc->max_i4_header_bits_ >>= 1; // strengthen header bit limitation...
continue; // ...and start over
}
if (is_last_pass) {
break; // done
}
if (do_search) {
ComputeNextQ(&stats); // Adjust q
}
}
if (ok) {
FinalizeTokenProbas(proba);
if (!stats.do_size_search) {
FinalizeTokenProbas(&enc->proba_);
}
ok = VP8EmitTokens(&enc->tokens_, enc->parts_ + 0,
(const uint8_t*)proba->coeffs_, 1);
}
ok = ok && WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_);
return PostLoopFinalize(&it, ok);
}
@ -972,6 +1066,3 @@ int VP8EncTokenLoop(VP8Encoder* const enc) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Author: Jyrki Alakuijala (jyrki@google.com)
@ -88,12 +90,10 @@ void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
int literal_ix = 256 + NUM_LENGTH_CODES + PixOrCopyCacheIdx(v);
++histo->literal_[literal_ix];
} else {
int code, extra_bits_count, extra_bits_value;
PrefixEncode(PixOrCopyLength(v),
&code, &extra_bits_count, &extra_bits_value);
int code, extra_bits;
VP8LPrefixEncodeBits(PixOrCopyLength(v), &code, &extra_bits);
++histo->literal_[256 + code];
PrefixEncode(PixOrCopyDistance(v),
&code, &extra_bits_count, &extra_bits_value);
VP8LPrefixEncodeBits(PixOrCopyDistance(v), &code, &extra_bits);
++histo->distance_[code];
}
}

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Author: Jyrki Alakuijala (jyrki@google.com)
@ -22,7 +24,7 @@
#include "../webp/format_constants.h"
#include "../webp/types.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -92,7 +94,7 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
VP8LHistogramSet* const image_in,
uint16_t* const histogram_symbols);
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
}
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// VP8Iterator: block iterator
@ -13,21 +15,16 @@
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// VP8Iterator
//------------------------------------------------------------------------------
static void InitLeft(VP8EncIterator* const it) {
const VP8Encoder* const enc = it->enc_;
enc->y_left_[-1] = enc->u_left_[-1] = enc->v_left_[-1] =
it->y_left_[-1] = it->u_left_[-1] = it->v_left_[-1] =
(it->y_ > 0) ? 129 : 127;
memset(enc->y_left_, 129, 16);
memset(enc->u_left_, 129, 8);
memset(enc->v_left_, 129, 8);
memset(it->y_left_, 129, 16);
memset(it->u_left_, 129, 8);
memset(it->v_left_, 129, 8);
it->left_nz_[8] = 0;
}
@ -38,43 +35,60 @@ static void InitTop(VP8EncIterator* const it) {
memset(enc->nz_, 0, enc->mb_w_ * sizeof(*enc->nz_));
}
void VP8IteratorReset(VP8EncIterator* const it) {
void VP8IteratorSetRow(VP8EncIterator* const it, int y) {
VP8Encoder* const enc = it->enc_;
it->x_ = 0;
it->y_ = 0;
it->y_offset_ = 0;
it->uv_offset_ = 0;
it->mb_ = enc->mb_info_;
it->preds_ = enc->preds_;
it->y_ = y;
it->bw_ = &enc->parts_[y & (enc->num_parts_ - 1)];
it->preds_ = enc->preds_ + y * 4 * enc->preds_w_;
it->nz_ = enc->nz_;
it->bw_ = &enc->parts_[0];
it->done_ = enc->mb_w_* enc->mb_h_;
it->mb_ = enc->mb_info_ + y * enc->mb_w_;
it->y_top_ = enc->y_top_;
it->uv_top_ = enc->uv_top_;
InitLeft(it);
}
void VP8IteratorReset(VP8EncIterator* const it) {
VP8Encoder* const enc = it->enc_;
VP8IteratorSetRow(it, 0);
VP8IteratorSetCountDown(it, enc->mb_w_ * enc->mb_h_); // default
InitTop(it);
InitLeft(it);
memset(it->bit_count_, 0, sizeof(it->bit_count_));
it->do_trellis_ = 0;
}
void VP8IteratorSetCountDown(VP8EncIterator* const it, int count_down) {
it->count_down_ = it->count_down0_ = count_down;
}
int VP8IteratorIsDone(const VP8EncIterator* const it) {
return (it->count_down_ <= 0);
}
void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it) {
it->enc_ = enc;
it->y_stride_ = enc->pic_->y_stride;
it->uv_stride_ = enc->pic_->uv_stride;
// TODO(later): for multithreading, these should be owned by 'it'.
it->yuv_in_ = enc->yuv_in_;
it->yuv_out_ = enc->yuv_out_;
it->yuv_out2_ = enc->yuv_out2_;
it->yuv_p_ = enc->yuv_p_;
it->yuv_in_ = (uint8_t*)DO_ALIGN(it->yuv_mem_);
it->yuv_out_ = it->yuv_in_ + YUV_SIZE;
it->yuv_out2_ = it->yuv_out_ + YUV_SIZE;
it->yuv_p_ = it->yuv_out2_ + YUV_SIZE;
it->lf_stats_ = enc->lf_stats_;
it->percent0_ = enc->percent_;
it->y_left_ = (uint8_t*)DO_ALIGN(it->yuv_left_mem_ + 1);
it->u_left_ = it->y_left_ + 16 + 16;
it->v_left_ = it->u_left_ + 16;
VP8IteratorReset(it);
}
int VP8IteratorProgress(const VP8EncIterator* const it, int delta) {
VP8Encoder* const enc = it->enc_;
if (delta && enc->pic_->progress_hook) {
const int percent = (enc->mb_h_ <= 1)
if (delta && enc->pic_->progress_hook != NULL) {
const int done = it->count_down0_ - it->count_down_;
const int percent = (it->count_down0_ <= 0)
? it->percent0_
: it->percent0_ + delta * it->y_ / (enc->mb_h_ - 1);
: it->percent0_ + delta * done / it->count_down0_;
return WebPReportProgress(enc->pic_, percent, &enc->percent_);
}
return 1;
@ -84,6 +98,8 @@ int VP8IteratorProgress(const VP8EncIterator* const it, int delta) {
// Import the source samples into the cache. Takes care of replicating
// boundary pixels if necessary.
static WEBP_INLINE int MinSize(int a, int b) { return (a < b) ? a : b; }
static void ImportBlock(const uint8_t* src, int src_stride,
uint8_t* dst, int w, int h, int size) {
int i;
@ -101,30 +117,55 @@ static void ImportBlock(const uint8_t* src, int src_stride,
}
}
void VP8IteratorImport(const VP8EncIterator* const it) {
static void ImportLine(const uint8_t* src, int src_stride,
uint8_t* dst, int len, int total_len) {
int i;
for (i = 0; i < len; ++i, src += src_stride) dst[i] = *src;
for (; i < total_len; ++i) dst[i] = dst[len - 1];
}
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* tmp_32) {
const VP8Encoder* const enc = it->enc_;
const int x = it->x_, y = it->y_;
const WebPPicture* const pic = enc->pic_;
const uint8_t* const ysrc = pic->y + (y * pic->y_stride + x) * 16;
const uint8_t* const ysrc = pic->y + (y * pic->y_stride + x) * 16;
const uint8_t* const usrc = pic->u + (y * pic->uv_stride + x) * 8;
const uint8_t* const vsrc = pic->v + (y * pic->uv_stride + x) * 8;
uint8_t* const ydst = it->yuv_in_ + Y_OFF;
uint8_t* const udst = it->yuv_in_ + U_OFF;
uint8_t* const vdst = it->yuv_in_ + V_OFF;
int w = (pic->width - x * 16);
int h = (pic->height - y * 16);
const int w = MinSize(pic->width - x * 16, 16);
const int h = MinSize(pic->height - y * 16, 16);
const int uv_w = (w + 1) >> 1;
const int uv_h = (h + 1) >> 1;
if (w > 16) w = 16;
if (h > 16) h = 16;
ImportBlock(ysrc, pic->y_stride, it->yuv_in_ + Y_OFF, w, h, 16);
ImportBlock(usrc, pic->uv_stride, it->yuv_in_ + U_OFF, uv_w, uv_h, 8);
ImportBlock(vsrc, pic->uv_stride, it->yuv_in_ + V_OFF, uv_w, uv_h, 8);
// Luma plane
ImportBlock(ysrc, pic->y_stride, ydst, w, h, 16);
if (tmp_32 == NULL) return;
{ // U/V planes
const int uv_w = (w + 1) >> 1;
const int uv_h = (h + 1) >> 1;
ImportBlock(usrc, pic->uv_stride, udst, uv_w, uv_h, 8);
ImportBlock(vsrc, pic->uv_stride, vdst, uv_w, uv_h, 8);
// Import source (uncompressed) samples into boundary.
if (x == 0) {
InitLeft(it);
} else {
if (y == 0) {
it->y_left_[-1] = it->u_left_[-1] = it->v_left_[-1] = 127;
} else {
it->y_left_[-1] = ysrc[- 1 - pic->y_stride];
it->u_left_[-1] = usrc[- 1 - pic->uv_stride];
it->v_left_[-1] = vsrc[- 1 - pic->uv_stride];
}
ImportLine(ysrc - 1, pic->y_stride, it->y_left_, h, 16);
ImportLine(usrc - 1, pic->uv_stride, it->u_left_, uv_h, 8);
ImportLine(vsrc - 1, pic->uv_stride, it->v_left_, uv_h, 8);
}
it->y_top_ = tmp_32 + 0;
it->uv_top_ = tmp_32 + 16;
if (y == 0) {
memset(tmp_32, 127, 32 * sizeof(*tmp_32));
} else {
ImportLine(ysrc - pic->y_stride, 1, tmp_32, w, 16);
ImportLine(usrc - pic->uv_stride, 1, tmp_32 + 16, uv_w, 8);
ImportLine(vsrc - pic->uv_stride, 1, tmp_32 + 16 + 8, uv_w, 8);
}
}
@ -240,48 +281,44 @@ void VP8IteratorBytesToNz(VP8EncIterator* const it) {
#undef BIT
//------------------------------------------------------------------------------
// Advance to the next position, doing the bookeeping.
// Advance to the next position, doing the bookkeeping.
int VP8IteratorNext(VP8EncIterator* const it,
const uint8_t* const block_to_save) {
void VP8IteratorSaveBoundary(VP8EncIterator* const it) {
VP8Encoder* const enc = it->enc_;
if (block_to_save) {
const int x = it->x_, y = it->y_;
const uint8_t* const ysrc = block_to_save + Y_OFF;
const uint8_t* const usrc = block_to_save + U_OFF;
if (x < enc->mb_w_ - 1) { // left
int i;
for (i = 0; i < 16; ++i) {
enc->y_left_[i] = ysrc[15 + i * BPS];
}
for (i = 0; i < 8; ++i) {
enc->u_left_[i] = usrc[7 + i * BPS];
enc->v_left_[i] = usrc[15 + i * BPS];
}
// top-left (before 'top'!)
enc->y_left_[-1] = enc->y_top_[x * 16 + 15];
enc->u_left_[-1] = enc->uv_top_[x * 16 + 0 + 7];
enc->v_left_[-1] = enc->uv_top_[x * 16 + 8 + 7];
const int x = it->x_, y = it->y_;
const uint8_t* const ysrc = it->yuv_out_ + Y_OFF;
const uint8_t* const uvsrc = it->yuv_out_ + U_OFF;
if (x < enc->mb_w_ - 1) { // left
int i;
for (i = 0; i < 16; ++i) {
it->y_left_[i] = ysrc[15 + i * BPS];
}
if (y < enc->mb_h_ - 1) { // top
memcpy(enc->y_top_ + x * 16, ysrc + 15 * BPS, 16);
memcpy(enc->uv_top_ + x * 16, usrc + 7 * BPS, 8 + 8);
for (i = 0; i < 8; ++i) {
it->u_left_[i] = uvsrc[7 + i * BPS];
it->v_left_[i] = uvsrc[15 + i * BPS];
}
// top-left (before 'top'!)
it->y_left_[-1] = it->y_top_[15];
it->u_left_[-1] = it->uv_top_[0 + 7];
it->v_left_[-1] = it->uv_top_[8 + 7];
}
if (y < enc->mb_h_ - 1) { // top
memcpy(it->y_top_, ysrc + 15 * BPS, 16);
memcpy(it->uv_top_, uvsrc + 7 * BPS, 8 + 8);
}
}
it->mb_++;
int VP8IteratorNext(VP8EncIterator* const it) {
it->preds_ += 4;
it->nz_++;
it->x_++;
if (it->x_ == enc->mb_w_) {
it->x_ = 0;
it->y_++;
it->bw_ = &enc->parts_[it->y_ & (enc->num_parts_ - 1)];
it->preds_ = enc->preds_ + it->y_ * 4 * enc->preds_w_;
it->nz_ = enc->nz_;
InitLeft(it);
it->mb_ += 1;
it->nz_ += 1;
it->y_top_ += 16;
it->uv_top_ += 16;
it->x_ += 1;
if (it->x_ == it->enc_->mb_w_) {
VP8IteratorSetRow(it, ++it->y_);
}
return (0 < --it->done_);
return (0 < --it->count_down_);
}
//------------------------------------------------------------------------------
@ -368,15 +405,15 @@ void VP8IteratorStartI4(VP8EncIterator* const it) {
// Import the boundary samples
for (i = 0; i < 17; ++i) { // left
it->i4_boundary_[i] = enc->y_left_[15 - i];
it->i4_boundary_[i] = it->y_left_[15 - i];
}
for (i = 0; i < 16; ++i) { // top
it->i4_boundary_[17 + i] = enc->y_top_[it->x_ * 16 + i];
it->i4_boundary_[17 + i] = it->y_top_[i];
}
// top-right samples have a special case on the far right of the picture
if (it->x_ < enc->mb_w_ - 1) {
for (i = 16; i < 16 + 4; ++i) {
it->i4_boundary_[17 + i] = enc->y_top_[it->x_ * 16 + i];
it->i4_boundary_[17 + i] = it->y_top_[i];
}
} else { // else, replicate the last valid pixel four times
for (i = 16; i < 16 + 4; ++i) {
@ -417,6 +454,3 @@ int VP8IteratorRotateI4(VP8EncIterator* const it,
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Enhancement layer (for YUV444/422)
@ -13,10 +15,6 @@
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
void VP8EncInitLayer(VP8Encoder* const enc) {
@ -44,6 +42,3 @@ void VP8EncDeleteLayer(VP8Encoder* enc) {
free(enc->layer_data_);
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebPPicture utils: colorspace conversion, crop, ...
@ -14,14 +16,15 @@
#include <math.h>
#include "./vp8enci.h"
#include "../utils/alpha_processing.h"
#include "../utils/random.h"
#include "../utils/rescaler.h"
#include "../utils/utils.h"
#include "../dsp/dsp.h"
#include "../dsp/yuv.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
// Uncomment to disable gamma-compression during RGB->U/V averaging
#define USE_GAMMA_COMPRESSION
#define HALVE(x) (((x) + 1) >> 1)
#define IS_YUV_CSP(csp, YUV_CSP) (((csp) & WEBP_CSP_UV_MASK) == (YUV_CSP))
@ -32,6 +35,10 @@ static const union {
} test_endian = { 0xff000000u };
#define ALPHA_IS_LAST (test_endian.bytes[3] == 0xff)
static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
return (0xff000000u | (r << 16) | (g << 8) | b);
}
//------------------------------------------------------------------------------
// WebPPicture
//------------------------------------------------------------------------------
@ -116,6 +123,7 @@ int WebPPictureAlloc(WebPPicture* picture) {
picture->v0 = mem;
mem += uv0_size;
}
(void)mem; // makes the static analyzer happy
} else {
void* memory;
const uint64_t argb_size = (uint64_t)width * height;
@ -393,6 +401,28 @@ static void RescalePlane(const uint8_t* src,
}
}
static void AlphaMultiplyARGB(WebPPicture* const pic, int inverse) {
uint32_t* ptr = pic->argb;
int y;
for (y = 0; y < pic->height; ++y) {
WebPMultARGBRow(ptr, pic->width, inverse);
ptr += pic->argb_stride;
}
}
static void AlphaMultiplyY(WebPPicture* const pic, int inverse) {
const uint8_t* ptr_a = pic->a;
if (ptr_a != NULL) {
uint8_t* ptr_y = pic->y;
int y;
for (y = 0; y < pic->height; ++y) {
WebPMultRow(ptr_y, ptr_a, pic->width, inverse);
ptr_y += pic->y_stride;
ptr_a += pic->a_stride;
}
}
}
int WebPPictureRescale(WebPPicture* pic, int width, int height) {
WebPPicture tmp;
int prev_width, prev_height;
@ -423,9 +453,19 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
WebPPictureFree(&tmp);
return 0;
}
// If present, we need to rescale alpha first (for AlphaMultiplyY).
if (pic->a != NULL) {
RescalePlane(pic->a, prev_width, prev_height, pic->a_stride,
tmp.a, width, height, tmp.a_stride, work, 1);
}
// We take transparency into account on the luma plane only. That's not
// totally exact blending, but still is a good approximation.
AlphaMultiplyY(pic, 0);
RescalePlane(pic->y, prev_width, prev_height, pic->y_stride,
tmp.y, width, height, tmp.y_stride, work, 1);
AlphaMultiplyY(&tmp, 1);
RescalePlane(pic->u,
HALVE(prev_width), HALVE(prev_height), pic->uv_stride,
tmp.u,
@ -435,10 +475,6 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
tmp.v,
HALVE(width), HALVE(height), tmp.uv_stride, work, 1);
if (tmp.a != NULL) {
RescalePlane(pic->a, prev_width, prev_height, pic->a_stride,
tmp.a, width, height, tmp.a_stride, work, 1);
}
#ifdef WEBP_EXPERIMENTAL_FEATURES
if (tmp.u0 != NULL) {
const int s = IS_YUV_CSP(tmp.colorspace, WEBP_YUV422) ? 2 : 1;
@ -456,12 +492,16 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
WebPPictureFree(&tmp);
return 0;
}
// In order to correctly interpolate colors, we need to apply the alpha
// weighting first (black-matting), scale the RGB values, and remove
// the premultiplication afterward (while preserving the alpha channel).
AlphaMultiplyARGB(pic, 0);
RescalePlane((const uint8_t*)pic->argb, prev_width, prev_height,
pic->argb_stride * 4,
(uint8_t*)tmp.argb, width, height,
tmp.argb_stride * 4,
work, 4);
AlphaMultiplyARGB(&tmp, 1);
}
WebPPictureFree(pic);
free(work);
@ -550,20 +590,101 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
//------------------------------------------------------------------------------
// RGB -> YUV conversion
// TODO: we can do better than simply 2x2 averaging on U/V samples.
#define SUM4(ptr) ((ptr)[0] + (ptr)[step] + \
(ptr)[rgb_stride] + (ptr)[rgb_stride + step])
#define SUM2H(ptr) (2 * (ptr)[0] + 2 * (ptr)[step])
#define SUM2V(ptr) (2 * (ptr)[0] + 2 * (ptr)[rgb_stride])
#define SUM1(ptr) (4 * (ptr)[0])
static int RGBToY(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX));
}
static int RGBToU(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
}
static int RGBToV(int r, int g, int b, VP8Random* const rg) {
return VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
}
//------------------------------------------------------------------------------
#if defined(USE_GAMMA_COMPRESSION)
// gamma-compensates loss of resolution during chroma subsampling
#define kGamma 0.80
#define kGammaFix 12 // fixed-point precision for linear values
#define kGammaScale ((1 << kGammaFix) - 1)
#define kGammaTabFix 7 // fixed-point fractional bits precision
#define kGammaTabScale (1 << kGammaTabFix)
#define kGammaTabRounder (kGammaTabScale >> 1)
#define kGammaTabSize (1 << (kGammaFix - kGammaTabFix))
static int kLinearToGammaTab[kGammaTabSize + 1];
static uint16_t kGammaToLinearTab[256];
static int kGammaTablesOk = 0;
static void InitGammaTables(void) {
if (!kGammaTablesOk) {
int v;
const double scale = 1. / kGammaScale;
for (v = 0; v <= 255; ++v) {
kGammaToLinearTab[v] =
(uint16_t)(pow(v / 255., kGamma) * kGammaScale + .5);
}
for (v = 0; v <= kGammaTabSize; ++v) {
const double x = scale * (v << kGammaTabFix);
kLinearToGammaTab[v] = (int)(pow(x, 1. / kGamma) * 255. + .5);
}
kGammaTablesOk = 1;
}
}
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
return kGammaToLinearTab[v];
}
// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
// U/V value, suitable for RGBToU/V calls.
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
const int v = base_value << shift; // final uplifted value
const int tab_pos = v >> (kGammaTabFix + 2); // integer part
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
const int v0 = kLinearToGammaTab[tab_pos];
const int v1 = kLinearToGammaTab[tab_pos + 1];
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
return (y + kGammaTabRounder) >> kGammaTabFix; // descale
}
#else
static void InitGammaTables(void) {}
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; }
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
(void)shift;
return v;
}
#endif // USE_GAMMA_COMPRESSION
//------------------------------------------------------------------------------
#define SUM4(ptr) LinearToGamma( \
GammaToLinear((ptr)[0]) + \
GammaToLinear((ptr)[step]) + \
GammaToLinear((ptr)[rgb_stride]) + \
GammaToLinear((ptr)[rgb_stride + step]), 0) \
#define SUM2H(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[step]), 1)
#define SUM2V(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1)
#define SUM1(ptr) \
LinearToGamma(GammaToLinear((ptr)[0]), 2)
#define RGB_TO_UV(x, y, SUM) { \
const int src = (2 * (step * (x) + (y) * rgb_stride)); \
const int dst = (x) + (y) * picture->uv_stride; \
const int r = SUM(r_ptr + src); \
const int g = SUM(g_ptr + src); \
const int b = SUM(b_ptr + src); \
picture->u[dst] = VP8RGBToU(r, g, b); \
picture->v[dst] = VP8RGBToV(r, g, b); \
picture->u[dst] = RGBToU(r, g, b, &rg); \
picture->v[dst] = RGBToV(r, g, b, &rg); \
}
#define RGB_TO_UV0(x_in, x_out, y, SUM) { \
@ -572,8 +693,8 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
const int r = SUM(r_ptr + src); \
const int g = SUM(g_ptr + src); \
const int b = SUM(b_ptr + src); \
picture->u0[dst] = VP8RGBToU(r, g, b); \
picture->v0[dst] = VP8RGBToV(r, g, b); \
picture->u0[dst] = RGBToU(r, g, b, &rg); \
picture->v0[dst] = RGBToV(r, g, b, &rg); \
}
static void MakeGray(WebPPicture* const picture) {
@ -592,12 +713,14 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
const uint8_t* const a_ptr,
int step, // bytes per pixel
int rgb_stride, // bytes per scanline
float dithering,
WebPPicture* const picture) {
const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK;
int x, y;
const int width = picture->width;
const int height = picture->height;
const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
VP8Random rg;
picture->colorspace = uv_csp;
picture->use_argb = 0;
@ -606,12 +729,15 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
}
if (!WebPPictureAlloc(picture)) return 0;
VP8InitRandom(&rg, dithering);
InitGammaTables();
// Import luma plane
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
picture->y[x + y * picture->y_stride] =
VP8RGBToY(r_ptr[offset], g_ptr[offset], b_ptr[offset]);
RGBToY(r_ptr[offset], g_ptr[offset], b_ptr[offset], &rg);
}
}
@ -659,6 +785,7 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
if (has_alpha) {
assert(step >= 4);
assert(picture->a != NULL);
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
picture->a[x + y * picture->a_stride] =
@ -681,7 +808,7 @@ static int Import(WebPPicture* const picture,
if (!picture->use_argb) {
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
picture);
0.f /* no dithering */, picture);
}
if (import_alpha) {
picture->colorspace |= WEBP_CSP_ALPHA_BIT;
@ -696,10 +823,7 @@ static int Import(WebPPicture* const picture,
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
const uint32_t argb =
0xff000000u |
(r_ptr[offset] << 16) |
(g_ptr[offset] << 8) |
(b_ptr[offset]);
MakeARGB32(r_ptr[offset], g_ptr[offset], b_ptr[offset]);
picture->argb[x + y * picture->argb_stride] = argb;
}
}
@ -709,7 +833,7 @@ static int Import(WebPPicture* const picture,
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int offset = step * x + y * rgb_stride;
const uint32_t argb = (a_ptr[offset] << 24) |
const uint32_t argb = ((uint32_t)a_ptr[offset] << 24) |
(r_ptr[offset] << 16) |
(g_ptr[offset] << 8) |
(b_ptr[offset]);
@ -760,8 +884,7 @@ int WebPPictureImportBGRX(WebPPicture* picture,
int WebPPictureYUVAToARGB(WebPPicture* picture) {
if (picture == NULL) return 0;
if (picture->memory_ == NULL || picture->y == NULL ||
picture->u == NULL || picture->v == NULL) {
if (picture->y == NULL || picture->u == NULL || picture->v == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
}
if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) {
@ -784,7 +907,7 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
WebPUpsampleLinePairFunc upsample = WebPGetLinePairConverter(ALPHA_IS_LAST);
// First row, with replicated top samples.
upsample(NULL, cur_y, cur_u, cur_v, cur_u, cur_v, NULL, dst, width);
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
cur_y += picture->y_stride;
dst += argb_stride;
// Center rows.
@ -809,7 +932,7 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
const uint8_t* const src = picture->a + y * picture->a_stride;
int x;
for (x = 0; x < width; ++x) {
argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | (src[x] << 24);
argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24);
}
}
}
@ -817,7 +940,8 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
return 1;
}
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
float dithering) {
if (picture == NULL) return 0;
if (picture->argb == NULL) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
@ -833,7 +957,8 @@ int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
PictureResetARGB(&tmp); // reset ARGB buffer so that it's not free()'d.
tmp.use_argb = 0;
tmp.colorspace = colorspace & WEBP_CSP_UV_MASK;
if (!ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, &tmp)) {
if (!ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, dithering,
&tmp)) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
// Copy back the YUV specs into 'picture'.
@ -845,6 +970,10 @@ int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
return 1;
}
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
return WebPPictureARGBToYUVADithered(picture, colorspace, 0.f);
}
//------------------------------------------------------------------------------
// Helper: clean up fully transparent area to help compressibility.
@ -910,6 +1039,91 @@ void WebPCleanupTransparentArea(WebPPicture* pic) {
#undef SIZE
#undef SIZE2
//------------------------------------------------------------------------------
// Blend color and remove transparency info
#define BLEND(V0, V1, ALPHA) \
((((V0) * (255 - (ALPHA)) + (V1) * (ALPHA)) * 0x101) >> 16)
#define BLEND_10BIT(V0, V1, ALPHA) \
((((V0) * (1020 - (ALPHA)) + (V1) * (ALPHA)) * 0x101) >> 18)
void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
const int red = (background_rgb >> 16) & 0xff;
const int green = (background_rgb >> 8) & 0xff;
const int blue = (background_rgb >> 0) & 0xff;
VP8Random rg;
int x, y;
if (pic == NULL) return;
VP8InitRandom(&rg, 0.f);
if (!pic->use_argb) {
const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop
const int Y0 = RGBToY(red, green, blue, &rg);
// VP8RGBToU/V expects the u/v values summed over four pixels
const int U0 = RGBToU(4 * red, 4 * green, 4 * blue, &rg);
const int V0 = RGBToV(4 * red, 4 * green, 4 * blue, &rg);
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
if (!has_alpha || pic->a == NULL) return; // nothing to do
for (y = 0; y < pic->height; ++y) {
// Luma blending
uint8_t* const y_ptr = pic->y + y * pic->y_stride;
uint8_t* const a_ptr = pic->a + y * pic->a_stride;
for (x = 0; x < pic->width; ++x) {
const int alpha = a_ptr[x];
if (alpha < 0xff) {
y_ptr[x] = BLEND(Y0, y_ptr[x], a_ptr[x]);
}
}
// Chroma blending every even line
if ((y & 1) == 0) {
uint8_t* const u = pic->u + (y >> 1) * pic->uv_stride;
uint8_t* const v = pic->v + (y >> 1) * pic->uv_stride;
uint8_t* const a_ptr2 =
(y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride;
for (x = 0; x < uv_width; ++x) {
// Average four alpha values into a single blending weight.
// TODO(skal): might lead to visible contouring. Can we do better?
const int alpha =
a_ptr[2 * x + 0] + a_ptr[2 * x + 1] +
a_ptr2[2 * x + 0] + a_ptr2[2 * x + 1];
u[x] = BLEND_10BIT(U0, u[x], alpha);
v[x] = BLEND_10BIT(V0, v[x], alpha);
}
if (pic->width & 1) { // rightmost pixel
const int alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
u[x] = BLEND_10BIT(U0, u[x], alpha);
v[x] = BLEND_10BIT(V0, v[x], alpha);
}
}
memset(a_ptr, 0xff, pic->width);
}
} else {
uint32_t* argb = pic->argb;
const uint32_t background = MakeARGB32(red, green, blue);
for (y = 0; y < pic->height; ++y) {
for (x = 0; x < pic->width; ++x) {
const int alpha = (argb[x] >> 24) & 0xff;
if (alpha != 0xff) {
if (alpha > 0) {
int r = (argb[x] >> 16) & 0xff;
int g = (argb[x] >> 8) & 0xff;
int b = (argb[x] >> 0) & 0xff;
r = BLEND(red, r, alpha);
g = BLEND(green, g, alpha);
b = BLEND(blue, b, alpha);
argb[x] = MakeARGB32(r, g, b);
} else {
argb[x] = background;
}
}
}
argb += pic->argb_stride;
}
}
}
#undef BLEND
#undef BLEND_10BIT
//------------------------------------------------------------------------------
// local-min distortion
//
@ -1086,10 +1300,10 @@ size_t NAME(const uint8_t* in, int w, int h, int bps, float q, \
return Encode(in, w, h, bps, IMPORTER, q, 0, out); \
}
ENCODE_FUNC(WebPEncodeRGB, WebPPictureImportRGB);
ENCODE_FUNC(WebPEncodeBGR, WebPPictureImportBGR);
ENCODE_FUNC(WebPEncodeRGBA, WebPPictureImportRGBA);
ENCODE_FUNC(WebPEncodeBGRA, WebPPictureImportBGRA);
ENCODE_FUNC(WebPEncodeRGB, WebPPictureImportRGB)
ENCODE_FUNC(WebPEncodeBGR, WebPPictureImportBGR)
ENCODE_FUNC(WebPEncodeRGBA, WebPPictureImportRGBA)
ENCODE_FUNC(WebPEncodeBGRA, WebPPictureImportBGRA)
#undef ENCODE_FUNC
@ -1099,15 +1313,12 @@ size_t NAME(const uint8_t* in, int w, int h, int bps, uint8_t** out) { \
return Encode(in, w, h, bps, IMPORTER, LOSSLESS_DEFAULT_QUALITY, 1, out); \
}
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessRGB, WebPPictureImportRGB);
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessBGR, WebPPictureImportBGR);
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessRGBA, WebPPictureImportRGBA);
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessBGRA, WebPPictureImportBGRA);
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessRGB, WebPPictureImportRGB)
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessBGR, WebPPictureImportBGR)
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessRGBA, WebPPictureImportRGBA)
LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessBGRA, WebPPictureImportBGRA)
#undef LOSSLESS_ENCODE_FUNC
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Quantization
@ -11,6 +13,7 @@
#include <assert.h>
#include <math.h>
#include <stdlib.h> // for abs()
#include "./vp8enci.h"
#include "./cost.h"
@ -22,18 +25,78 @@
#define MID_ALPHA 64 // neutral value for susceptibility
#define MIN_ALPHA 30 // lowest usable value for susceptibility
#define MAX_ALPHA 100 // higher meaninful value for susceptibility
#define MAX_ALPHA 100 // higher meaningful value for susceptibility
#define SNS_TO_DQ 0.9 // Scaling constant between the sns value and the QP
// power-law modulation. Must be strictly less than 1.
#define I4_PENALTY 4000 // Rate-penalty for quick i4/i16 decision
// number of non-zero coeffs below which we consider the block very flat
// (and apply a penalty to complex predictions)
#define FLATNESS_LIMIT_I16 10 // I16 mode
#define FLATNESS_LIMIT_I4 3 // I4 mode
#define FLATNESS_LIMIT_UV 2 // UV mode
#define FLATNESS_PENALTY 140 // roughly ~1bit per block
#define MULT_8B(a, b) (((a) * (b) + 128) >> 8)
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
// #define DEBUG_BLOCK
//------------------------------------------------------------------------------
#if defined(DEBUG_BLOCK)
#include <stdio.h>
#include <stdlib.h>
static void PrintBlockInfo(const VP8EncIterator* const it,
const VP8ModeScore* const rd) {
int i, j;
const int is_i16 = (it->mb_->type_ == 1);
printf("SOURCE / OUTPUT / ABS DELTA\n");
for (j = 0; j < 24; ++j) {
if (j == 16) printf("\n"); // newline before the U/V block
for (i = 0; i < 16; ++i) printf("%3d ", it->yuv_in_[i + j * BPS]);
printf(" ");
for (i = 0; i < 16; ++i) printf("%3d ", it->yuv_out_[i + j * BPS]);
printf(" ");
for (i = 0; i < 16; ++i) {
printf("%1d ", abs(it->yuv_out_[i + j * BPS] - it->yuv_in_[i + j * BPS]));
}
printf("\n");
}
printf("\nD:%d SD:%d R:%d H:%d nz:0x%x score:%d\n",
(int)rd->D, (int)rd->SD, (int)rd->R, (int)rd->H, (int)rd->nz,
(int)rd->score);
if (is_i16) {
printf("Mode: %d\n", rd->mode_i16);
printf("y_dc_levels:");
for (i = 0; i < 16; ++i) printf("%3d ", rd->y_dc_levels[i]);
printf("\n");
} else {
printf("Modes[16]: ");
for (i = 0; i < 16; ++i) printf("%d ", rd->modes_i4[i]);
printf("\n");
}
printf("y_ac_levels:\n");
for (j = 0; j < 16; ++j) {
for (i = is_i16 ? 1 : 0; i < 16; ++i) {
printf("%4d ", rd->y_ac_levels[j][i]);
}
printf("\n");
}
printf("\n");
printf("uv_levels (mode=%d):\n", rd->mode_uv);
for (j = 0; j < 8; ++j) {
for (i = 0; i < 16; ++i) {
printf("%4d ", rd->uv_levels[j][i]);
}
printf("\n");
}
}
#endif // DEBUG_BLOCK
//------------------------------------------------------------------------------
@ -102,31 +165,13 @@ static const uint16_t kAcTable2[128] = {
385, 393, 401, 409, 416, 424, 432, 440
};
static const uint16_t kCoeffThresh[16] = {
0, 10, 20, 30,
10, 20, 30, 30,
20, 30, 30, 30,
30, 30, 30, 30
static const uint8_t kBiasMatrices[3][2] = { // [luma-ac,luma-dc,chroma][dc,ac]
{ 96, 110 }, { 96, 108 }, { 110, 115 }
};
// TODO(skal): tune more. Coeff thresholding?
static const uint8_t kBiasMatrices[3][16] = { // [3] = [luma-ac,luma-dc,chroma]
{ 96, 96, 96, 96,
96, 96, 96, 96,
96, 96, 96, 96,
96, 96, 96, 96 },
{ 96, 96, 96, 96,
96, 96, 96, 96,
96, 96, 96, 96,
96, 96, 96, 96 },
{ 96, 96, 96, 96,
96, 96, 96, 96,
96, 96, 96, 96,
96, 96, 96, 96 }
};
// Sharpening by (slightly) raising the hi-frequency coeffs (only for trellis).
// Sharpening by (slightly) raising the hi-frequency coeffs.
// Hack-ish but helpful for mid-bitrate range. Use with care.
#define SHARPEN_BITS 11 // number of descaling bits for sharpening bias
static const uint8_t kFreqSharpening[16] = {
0, 30, 60, 90,
30, 60, 90, 90,
@ -139,20 +184,30 @@ static const uint8_t kFreqSharpening[16] = {
// Returns the average quantizer
static int ExpandMatrix(VP8Matrix* const m, int type) {
int i;
int sum = 0;
int i, sum;
for (i = 0; i < 2; ++i) {
const int is_ac_coeff = (i > 0);
const int bias = kBiasMatrices[type][is_ac_coeff];
m->iq_[i] = (1 << QFIX) / m->q_[i];
m->bias_[i] = BIAS(bias);
// zthresh_ is the exact value such that QUANTDIV(coeff, iQ, B) is:
// * zero if coeff <= zthresh
// * non-zero if coeff > zthresh
m->zthresh_[i] = ((1 << QFIX) - 1 - m->bias_[i]) / m->iq_[i];
}
for (i = 2; i < 16; ++i) {
m->q_[i] = m->q_[1];
m->iq_[i] = m->iq_[1];
m->bias_[i] = m->bias_[1];
m->zthresh_[i] = m->zthresh_[1];
}
for (i = 0; i < 16; ++i) {
const int j = kZigzag[i];
const int bias = kBiasMatrices[type][j];
m->iq_[j] = (1 << QFIX) / m->q_[j];
m->bias_[j] = BIAS(bias);
// TODO(skal): tune kCoeffThresh[]
m->zthresh_[j] = ((256 /*+ kCoeffThresh[j]*/ - bias) * m->q_[j] + 127) >> 8;
m->sharpen_[j] = (kFreqSharpening[j] * m->q_[j]) >> 11;
sum += m->q_[j];
for (sum = 0, i = 0; i < 16; ++i) {
if (type == 0) { // we only use sharpening for AC luma coeffs
m->sharpen_[i] = (kFreqSharpening[i] * m->q_[i]) >> SHARPEN_BITS;
} else {
m->sharpen_[i] = 0;
}
sum += m->q_[i];
}
return (sum + 8) >> 4;
}
@ -180,17 +235,17 @@ static void SetupMatrices(VP8Encoder* enc) {
q16 = ExpandMatrix(&m->y2_, 1);
quv = ExpandMatrix(&m->uv_, 2);
// TODO: Switch to kLambda*[] tables?
{
m->lambda_i4_ = (3 * q4 * q4) >> 7;
m->lambda_i16_ = (3 * q16 * q16);
m->lambda_uv_ = (3 * quv * quv) >> 6;
m->lambda_mode_ = (1 * q4 * q4) >> 7;
m->lambda_trellis_i4_ = (7 * q4 * q4) >> 3;
m->lambda_trellis_i16_ = (q16 * q16) >> 2;
m->lambda_trellis_uv_ = (quv *quv) << 1;
m->tlambda_ = (tlambda_scale * q4) >> 5;
}
m->lambda_i4_ = (3 * q4 * q4) >> 7;
m->lambda_i16_ = (3 * q16 * q16);
m->lambda_uv_ = (3 * quv * quv) >> 6;
m->lambda_mode_ = (1 * q4 * q4) >> 7;
m->lambda_trellis_i4_ = (7 * q4 * q4) >> 3;
m->lambda_trellis_i16_ = (q16 * q16) >> 2;
m->lambda_trellis_uv_ = (quv *quv) << 1;
m->tlambda_ = (tlambda_scale * q4) >> 5;
m->min_disto_ = 10 * m->y1_.q_[0]; // quantization-aware min disto
m->max_edge_ = 0;
}
}
@ -199,16 +254,21 @@ static void SetupMatrices(VP8Encoder* enc) {
// Very small filter-strength values have close to no visual effect. So we can
// save a little decoding-CPU by turning filtering off for these.
#define FSTRENGTH_CUTOFF 3
#define FSTRENGTH_CUTOFF 2
static void SetupFilterStrength(VP8Encoder* const enc) {
int i;
const int level0 = enc->config_->filter_strength;
// level0 is in [0..500]. Using '-f 50' as filter_strength is mid-filtering.
const int level0 = 5 * enc->config_->filter_strength;
for (i = 0; i < NUM_MB_SEGMENTS; ++i) {
// Segments with lower quantizer will be less filtered. TODO: tune (wrt SNS)
const int level = level0 * 256 * enc->dqm_[i].quant_ / 128;
const int f = level / (256 + enc->dqm_[i].beta_);
enc->dqm_[i].fstrength_ = (f < FSTRENGTH_CUTOFF) ? 0 : (f > 63) ? 63 : f;
VP8SegmentInfo* const m = &enc->dqm_[i];
// We focus on the quantization of AC coeffs.
const int qstep = kAcTable[clip(m->quant_, 0, 127)] >> 2;
const int base_strength =
VP8FilterStrengthFromDelta(enc->filter_hdr_.sharpness_, qstep);
// Segments with lower complexity ('beta') will be less filtered.
const int f = base_strength * level0 / (256 + m->beta_);
m->fstrength_ = (f < FSTRENGTH_CUTOFF) ? 0 : (f > 63) ? 63 : f;
}
// We record the initial strength (mainly for the case of 1-segment only).
enc->filter_hdr_.level_ = enc->dqm_[0].fstrength_;
@ -232,7 +292,7 @@ static double QualityToCompression(double c) {
// exponent is somewhere between 2.8 and 3.2, but we're mostly interested
// in the mid-quant range. So we scale the compressibility inversely to
// this power-law: quant ~= compression ^ 1/3. This law holds well for
// low quant. Finer modelling for high-quant would make use of kAcTable[]
// low quant. Finer modeling for high-quant would make use of kAcTable[]
// more explicitly.
const double v = pow(linear_c, 1 / 3.);
return v;
@ -365,16 +425,14 @@ const int VP8I4ModeOffsets[NUM_BMODES] = {
};
void VP8MakeLuma16Preds(const VP8EncIterator* const it) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const left = it->x_ ? enc->y_left_ : NULL;
const uint8_t* const top = it->y_ ? enc->y_top_ + it->x_ * 16 : NULL;
const uint8_t* const left = it->x_ ? it->y_left_ : NULL;
const uint8_t* const top = it->y_ ? it->y_top_ : NULL;
VP8EncPredLuma16(it->yuv_p_, left, top);
}
void VP8MakeChroma8Preds(const VP8EncIterator* const it) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const left = it->x_ ? enc->u_left_ : NULL;
const uint8_t* const top = it->y_ ? enc->uv_top_ + it->x_ * 16 : NULL;
const uint8_t* const left = it->x_ ? it->u_left_ : NULL;
const uint8_t* const top = it->y_ ? it->uv_top_ : NULL;
VP8EncPredChroma8(it->yuv_p_, left, top);
}
@ -430,6 +488,7 @@ static void InitScore(VP8ModeScore* const rd) {
rd->D = 0;
rd->SD = 0;
rd->R = 0;
rd->H = 0;
rd->nz = 0;
rd->score = MAX_COST;
}
@ -438,6 +497,7 @@ static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
dst->D = src->D;
dst->SD = src->SD;
dst->R = src->R;
dst->H = src->H;
dst->nz = src->nz; // note that nz is not accumulated, but just copied.
dst->score = src->score;
}
@ -446,6 +506,7 @@ static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
dst->D += src->D;
dst->SD += src->SD;
dst->R += src->R;
dst->H += src->H;
dst->nz |= src->nz; // here, new nz bits are accumulated.
dst->score += src->score;
}
@ -474,7 +535,7 @@ typedef struct {
static WEBP_INLINE void SetRDScore(int lambda, VP8ModeScore* const rd) {
// TODO: incorporate the "* 256" in the tables?
rd->score = rd->R * lambda + 256 * (rd->D + rd->SD);
rd->score = (rd->R + rd->H) * lambda + 256 * (rd->D + rd->SD);
}
static WEBP_INLINE score_t RDScoreTrellis(int lambda, score_t rate,
@ -537,11 +598,10 @@ static int TrellisQuantizeBlock(const VP8EncIterator* const it,
// note: it's important to take sign of the _original_ coeff,
// so we don't have to consider level < 0 afterward.
const int sign = (in[j] < 0);
int coeff0 = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
int level0;
if (coeff0 > 2047) coeff0 = 2047;
const int coeff0 = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
int level0 = QUANTDIV(coeff0, iQ, B);
if (level0 > MAX_LEVEL) level0 = MAX_LEVEL;
level0 = QUANTDIV(coeff0, iQ, B);
// test all alternate level values around level0.
for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) {
Node* const cur = &NODE(n, m);
@ -553,7 +613,7 @@ static int TrellisQuantizeBlock(const VP8EncIterator* const it,
cur->sign = sign;
cur->level = level;
cur->ctx = (level == 0) ? 0 : (level == 1) ? 1 : 2;
if (level >= 2048 || level < 0) { // node is dead?
if (level > MAX_LEVEL || level < 0) { // node is dead?
cur->cost = MAX_COST;
continue;
}
@ -646,10 +706,10 @@ static int ReconstructIntra16(VP8EncIterator* const it,
VP8ModeScore* const rd,
uint8_t* const yuv_out,
int mode) {
const VP8Encoder* const enc = it->enc_;
VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
const uint8_t* const src = it->yuv_in_ + Y_OFF;
const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
int nz = 0;
int n;
int16_t tmp[16][16], dc_tmp[16];
@ -658,7 +718,7 @@ static int ReconstructIntra16(VP8EncIterator* const it,
VP8FTransform(src + VP8Scan[n], ref + VP8Scan[n], tmp[n]);
}
VP8FTransformWHT(tmp[0], dc_tmp);
nz |= VP8EncQuantizeBlock(dc_tmp, rd->y_dc_levels, 0, &dqm->y2_) << 24;
nz |= VP8EncQuantizeBlockWHT(dc_tmp, rd->y_dc_levels, &dqm->y2_) << 24;
if (DO_TRELLIS_I16 && it->do_trellis_) {
int x, y;
@ -753,7 +813,18 @@ static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd,
//------------------------------------------------------------------------------
// RD-opt decision. Reconstruct each modes, evalue distortion and bit-cost.
// Pick the mode is lower RD-cost = Rate + lamba * Distortion.
// Pick the mode is lower RD-cost = Rate + lambda * Distortion.
static void StoreMaxDelta(VP8SegmentInfo* const dqm, const int16_t DCs[16]) {
// We look at the first three AC coefficients to determine what is the average
// delta between each sub-4x4 block.
const int v0 = abs(DCs[1]);
const int v1 = abs(DCs[4]);
const int v2 = abs(DCs[5]);
int max_v = (v0 > v1) ? v1 : v0;
max_v = (v2 > max_v) ? v2 : max_v;
if (max_v > dqm->max_edge_) dqm->max_edge_ = max_v;
}
static void SwapPtr(uint8_t** a, uint8_t** b) {
uint8_t* const tmp = *a;
@ -765,9 +836,23 @@ static void SwapOut(VP8EncIterator* const it) {
SwapPtr(&it->yuv_out_, &it->yuv_out2_);
}
static score_t IsFlat(const int16_t* levels, int num_blocks, score_t thresh) {
score_t score = 0;
while (num_blocks-- > 0) { // TODO(skal): refine positional scoring?
int i;
for (i = 1; i < 16; ++i) { // omit DC, we're only interested in AC
score += (levels[i] != 0);
if (score > thresh) return 0;
}
levels += 16;
}
return 1;
}
static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* const rd) {
const VP8Encoder* const enc = it->enc_;
const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
const int kNumBlocks = 16;
VP8Encoder* const enc = it->enc_;
VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_i16_;
const int tlambda = dqm->tlambda_;
const uint8_t* const src = it->yuv_in_ + Y_OFF;
@ -786,8 +871,13 @@ static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* const rd) {
rd16.D = VP8SSE16x16(src, tmp_dst);
rd16.SD = tlambda ? MULT_8B(tlambda, VP8TDisto16x16(src, tmp_dst, kWeightY))
: 0;
rd16.H = VP8FixedCostsI16[mode];
rd16.R = VP8GetCostLuma16(it, &rd16);
rd16.R += VP8FixedCostsI16[mode];
if (mode > 0 &&
IsFlat(rd16.y_ac_levels[0], kNumBlocks, FLATNESS_LIMIT_I16)) {
// penalty to avoid flat area to be mispredicted by complex mode
rd16.R += FLATNESS_PENALTY * kNumBlocks;
}
// Since we always examine Intra16 first, we can overwrite *rd directly.
SetRDScore(lambda, &rd16);
@ -802,6 +892,13 @@ static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* const rd) {
}
SetRDScore(dqm->lambda_mode_, rd); // finalize score for mode decision.
VP8SetIntra16Mode(it, rd->mode_i16);
// we have a blocky macroblock (only DCs are non-zero) with fairly high
// distortion, record max delta so we can later adjust the minimal filtering
// strength needed to smooth these blocks out.
if ((rd->nz & 0xffff) == 0 && rd->D > dqm->min_disto_) {
StoreMaxDelta(dqm, rd->y_dc_levels);
}
}
//------------------------------------------------------------------------------
@ -831,9 +928,11 @@ static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
}
InitScore(&rd_best);
rd_best.score = 211; // '211' is the value of VP8BitCost(0, 145)
rd_best.H = 211; // '211' is the value of VP8BitCost(0, 145)
SetRDScore(dqm->lambda_mode_, &rd_best);
VP8IteratorStartI4(it);
do {
const int kNumBlocks = 1;
VP8ModeScore rd_i4;
int mode;
int best_mode = -1;
@ -857,8 +956,11 @@ static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
rd_tmp.SD =
tlambda ? MULT_8B(tlambda, VP8TDisto4x4(src, tmp_dst, kWeightY))
: 0;
rd_tmp.H = mode_costs[mode];
rd_tmp.R = VP8GetCostLuma4(it, tmp_levels);
rd_tmp.R += mode_costs[mode];
if (mode > 0 && IsFlat(tmp_levels, kNumBlocks, FLATNESS_LIMIT_I4)) {
rd_tmp.R += FLATNESS_PENALTY * kNumBlocks;
}
SetRDScore(lambda, &rd_tmp);
if (best_mode < 0 || rd_tmp.score < rd_i4.score) {
@ -870,14 +972,17 @@ static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
}
SetRDScore(dqm->lambda_mode_, &rd_i4);
AddScore(&rd_best, &rd_i4);
total_header_bits += mode_costs[best_mode];
if (rd_best.score >= rd->score ||
total_header_bits > enc->max_i4_header_bits_) {
if (rd_best.score >= rd->score) {
return 0;
}
total_header_bits += (int)rd_i4.H; // <- equal to mode_costs[best_mode];
if (total_header_bits > enc->max_i4_header_bits_) {
return 0;
}
// Copy selected samples if not in the right place already.
if (best_block != best_blocks + VP8Scan[it->i4_])
if (best_block != best_blocks + VP8Scan[it->i4_]) {
VP8Copy4x4(best_block, best_blocks + VP8Scan[it->i4_]);
}
rd->modes_i4[it->i4_] = best_mode;
it->top_nz_[it->i4_ & 3] = it->left_nz_[it->i4_ >> 2] = (rd_i4.nz ? 1 : 0);
} while (VP8IteratorRotateI4(it, best_blocks));
@ -893,6 +998,7 @@ static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
//------------------------------------------------------------------------------
static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
const int kNumBlocks = 8;
const VP8Encoder* const enc = it->enc_;
const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_uv_;
@ -913,8 +1019,11 @@ static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
// Compute RD-score
rd_uv.D = VP8SSE16x8(src, tmp_dst);
rd_uv.SD = 0; // TODO: should we call TDisto? it tends to flatten areas.
rd_uv.H = VP8FixedCostsUV[mode];
rd_uv.R = VP8GetCostUV(it, &rd_uv);
rd_uv.R += VP8FixedCostsUV[mode];
if (mode > 0 && IsFlat(rd_uv.uv_levels[0], kNumBlocks, FLATNESS_LIMIT_UV)) {
rd_uv.R += FLATNESS_PENALTY * kNumBlocks;
}
SetRDScore(lambda, &rd_uv);
if (mode == 0 || rd_uv.score < rd_best.score) {
@ -1045,6 +1154,3 @@ int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd,
return is_skipped;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Header syntax writing
@ -16,10 +18,6 @@
#include "../webp/mux_types.h" // ALPHA_FLAG
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// Helper functions
@ -423,6 +421,3 @@ int VP8EncWrite(VP8Encoder* const enc) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Paginated token buffer
@ -18,12 +20,9 @@
#include <stdlib.h>
#include <string.h>
#include "./cost.h"
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if !defined(DISABLE_TOKEN_BUFFER)
// we use pages to reduce the number of memcpy()
@ -236,6 +235,29 @@ int VP8EmitTokens(VP8TBuffer* const b, VP8BitWriter* const bw,
return 1;
}
// Size estimation
size_t VP8EstimateTokenSize(VP8TBuffer* const b, const uint8_t* const probas) {
size_t size = 0;
const VP8Tokens* p = b->pages_;
if (b->error_) return 0;
while (p != NULL) {
const VP8Tokens* const next = p->next_;
const int N = (next == NULL) ? b->left_ : 0;
int n = MAX_NUM_TOKEN;
while (n-- > N) {
const uint16_t token = p->tokens_[n];
const int bit = token & (1 << 15);
if (token & FIXED_PROBA_BIT) {
size += VP8BitCost(bit, token & 0xffu);
} else {
size += VP8BitCost(bit, probas[token & 0x3fffu]);
}
}
p = next;
}
return size;
}
//------------------------------------------------------------------------------
#else // DISABLE_TOKEN_BUFFER
@ -249,6 +271,3 @@ void VP8TBufferClear(VP8TBuffer* const b) {
#endif // !DISABLE_TOKEN_BUFFER
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,27 +1,24 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Token probabilities
// Coding of token probabilities, intra modes and segments.
//
// Author: Skal (pascal.massimino@gmail.com)
#include "./vp8enci.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// Default probabilities
// Paragraph 13.5
const uint8_t
VP8CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = {
// genereated using vp8_default_coef_probs() in entropy.c:129
{ { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
@ -318,7 +315,7 @@ void VP8CodeIntraModes(VP8Encoder* const enc) {
VP8EncIterator it;
VP8IteratorInit(enc, &it);
do {
const VP8MBInfo* mb = it.mb_;
const VP8MBInfo* const mb = it.mb_;
const uint8_t* preds = it.preds_;
if (enc->segment_hdr_.update_map_) {
PutSegment(bw, mb->segment_, enc->proba_.segments_);
@ -343,7 +340,7 @@ void VP8CodeIntraModes(VP8Encoder* const enc) {
}
}
PutUVMode(bw, mb->uv_mode_);
} while (VP8IteratorNext(&it, 0));
} while (VP8IteratorNext(&it));
}
//------------------------------------------------------------------------------
@ -505,6 +502,3 @@ void VP8WriteProbas(VP8BitWriter* const bw, const VP8Proba* const probas) {
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP encoder: internal header.
@ -18,7 +20,7 @@
#include "../utils/bit_writer.h"
#include "../utils/thread.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -27,7 +29,7 @@ extern "C" {
// version numbers
#define ENC_MAJ_VERSION 0
#define ENC_MIN_VERSION 3
#define ENC_MIN_VERSION 4
#define ENC_REV_VERSION 0
// intra prediction modes
@ -72,7 +74,7 @@ typedef enum { // Rate-distortion optimization levels
// The predicted blocks can be accessed using offsets to yuv_p_ and
// the arrays VP8*ModeOffsets[];
// +----+ YUV Samples area. See VP8Scan[] for accessing the blocks.
// Y_OFF |YYYY| <- original samples (enc->yuv_in_)
// Y_OFF |YYYY| <- original samples ('yuv_in_')
// |YYYY|
// |YYYY|
// |YYYY|
@ -246,16 +248,19 @@ typedef struct {
int beta_; // filter-susceptibility, range [0,255].
int quant_; // final segment quantizer.
int fstrength_; // final in-loop filtering strength
int max_edge_; // max edge delta (for filtering strength)
int min_disto_; // minimum distortion required to trigger filtering record
// reactivities
int lambda_i16_, lambda_i4_, lambda_uv_;
int lambda_mode_, lambda_trellis_, tlambda_;
int lambda_trellis_i16_, lambda_trellis_i4_, lambda_trellis_uv_;
} VP8SegmentInfo;
// Handy transcient struct to accumulate score and info during RD-optimization
// Handy transient struct to accumulate score and info during RD-optimization
// and mode evaluation.
typedef struct {
score_t D, SD, R, score; // Distortion, spectral distortion, rate, score.
score_t D, SD; // Distortion, spectral distortion
score_t H, R, score; // header bits, rate, score.
int16_t y_dc_levels[16]; // Quantized levels for luma-DC, luma-AC, chroma.
int16_t y_ac_levels[16][16];
int16_t uv_levels[4 + 4][16];
@ -269,12 +274,11 @@ typedef struct {
// right neighbouring data (samples, predictions, contexts, ...)
typedef struct {
int x_, y_; // current macroblock
int y_offset_, uv_offset_; // offset to the luma / chroma planes
int y_stride_, uv_stride_; // respective strides
uint8_t* yuv_in_; // borrowed from enc_ (for now)
uint8_t* yuv_out_; // ''
uint8_t* yuv_out2_; // ''
uint8_t* yuv_p_; // ''
uint8_t* yuv_in_; // input samples
uint8_t* yuv_out_; // output samples
uint8_t* yuv_out2_; // secondary buffer swapped with yuv_out_.
uint8_t* yuv_p_; // scratch buffer for prediction
VP8Encoder* enc_; // back-pointer
VP8MBInfo* mb_; // current macroblock
VP8BitWriter* bw_; // current bit-writer
@ -290,24 +294,43 @@ typedef struct {
uint64_t uv_bits_; // macroblock bit-cost for chroma
LFStats* lf_stats_; // filter stats (borrowed from enc_)
int do_trellis_; // if true, perform extra level optimisation
int done_; // true when scan is finished
int count_down_; // number of mb still to be processed
int count_down0_; // starting counter value (for progress)
int percent0_; // saved initial progress percent
uint8_t* y_left_; // left luma samples (addressable from index -1 to 15).
uint8_t* u_left_; // left u samples (addressable from index -1 to 7)
uint8_t* v_left_; // left v samples (addressable from index -1 to 7)
uint8_t* y_top_; // top luma samples at position 'x_'
uint8_t* uv_top_; // top u/v samples at position 'x_', packed as 16 bytes
// memory for storing y/u/v_left_ and yuv_in_/out_*
uint8_t yuv_left_mem_[17 + 16 + 16 + 8 + ALIGN_CST]; // memory for *_left_
uint8_t yuv_mem_[3 * YUV_SIZE + PRED_SIZE + ALIGN_CST]; // memory for yuv_*
} VP8EncIterator;
// in iterator.c
// must be called first.
// must be called first
void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it);
// restart a scan.
// restart a scan
void VP8IteratorReset(VP8EncIterator* const it);
// import samples from source
void VP8IteratorImport(const VP8EncIterator* const it);
// reset iterator position to row 'y'
void VP8IteratorSetRow(VP8EncIterator* const it, int y);
// set count down (=number of iterations to go)
void VP8IteratorSetCountDown(VP8EncIterator* const it, int count_down);
// return true if iteration is finished
int VP8IteratorIsDone(const VP8EncIterator* const it);
// Import uncompressed samples from source.
// If tmp_32 is not NULL, import boundary samples too.
// tmp_32 is a 32-bytes scratch buffer that must be aligned in memory.
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* tmp_32);
// export decimated samples
void VP8IteratorExport(const VP8EncIterator* const it);
// go to next macroblock. Returns !done_. If *block_to_save is non-null, will
// save the boundary values to top_/left_ arrays. block_to_save can be
// it->yuv_out_ or it->yuv_in_.
int VP8IteratorNext(VP8EncIterator* const it,
const uint8_t* const block_to_save);
// go to next macroblock. Returns false if not finished.
int VP8IteratorNext(VP8EncIterator* const it);
// save the yuv_out_ boundary values to top_/left_ arrays for next iterations.
void VP8IteratorSaveBoundary(VP8EncIterator* const it);
// Report progression based on macroblock rows. Return 0 for user-abort request.
int VP8IteratorProgress(const VP8EncIterator* const it,
int final_delta_percent);
@ -358,6 +381,9 @@ int VP8RecordCoeffTokens(int ctx, int coeff_type, int first, int last,
const int16_t* const coeffs,
VP8TBuffer* const tokens);
// Estimate the final coded size given a set of 'probas'.
size_t VP8EstimateTokenSize(VP8TBuffer* const b, const uint8_t* const probas);
// unused for now
void VP8TokenToStats(const VP8TBuffer* const b, proba_t* const stats);
@ -433,17 +459,9 @@ struct VP8Encoder {
VP8MBInfo* mb_info_; // contextual macroblock infos (mb_w_ + 1)
uint8_t* preds_; // predictions modes: (4*mb_w+1) * (4*mb_h+1)
uint32_t* nz_; // non-zero bit context: mb_w+1
uint8_t* yuv_in_; // input samples
uint8_t* yuv_out_; // output samples
uint8_t* yuv_out2_; // secondary scratch out-buffer. swapped with yuv_out_.
uint8_t* yuv_p_; // scratch buffer for prediction
uint8_t *y_top_; // top luma samples.
uint8_t *uv_top_; // top u/v samples.
// U and V are packed into 16 pixels (8 U + 8 V)
uint8_t *y_left_; // left luma samples (adressable from index -1 to 15).
uint8_t *u_left_; // left u samples (adressable from index -1 to 7)
uint8_t *v_left_; // left v samples (adressable from index -1 to 7)
// U and V are packed into 16 bytes (8 U + 8 V)
LFStats *lf_stats_; // autofilter stats (if NULL, autofilter is off)
};
@ -539,9 +557,13 @@ void VP8InitFilter(VP8EncIterator* const it);
void VP8StoreFilterStats(VP8EncIterator* const it);
void VP8AdjustFilterStrength(VP8EncIterator* const it);
// returns the approximate filtering strength needed to smooth a edge
// step of 'delta', given a sharpness parameter 'sharpness'.
int VP8FilterStrengthFromDelta(int sharpness, int delta);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// main entry for the lossless encoder.
@ -23,10 +25,6 @@
#include "../utils/utils.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define PALETTE_KEY_RIGHT_SHIFT 22 // Key for 1K buffer.
#define MAX_HUFF_IMAGE_SIZE (16 * 1024 * 1024)
#define MAX_COLORS_FOR_GRAPH 64
@ -86,7 +84,7 @@ static int AnalyzeAndCreatePalette(const WebPPicture* const pic,
argb += pic->argb_stride;
}
// TODO(skal): could we reuse in_use[] to speed up ApplyPalette()?
// TODO(skal): could we reuse in_use[] to speed up EncodePalette()?
num_colors = 0;
for (i = 0; i < (int)(sizeof(in_use) / sizeof(in_use[0])); ++i) {
if (in_use[i]) {
@ -166,9 +164,6 @@ static int VP8LEncAnalyze(VP8LEncoder* const enc, WebPImageHint image_hint) {
}
if (pred_entropy < 0.95 * non_pred_entropy) {
enc->use_predict_ = 1;
// TODO(vikasa): Observed some correlation of cross_color transform with
// predict. Need to investigate this further and add separate heuristic
// for setting use_cross_color flag.
enc->use_cross_color_ = 1;
}
}
@ -449,12 +444,12 @@ static void StoreImageToBitMask(
int bits, n_bits;
int code, distance;
PrefixEncode(v->len, &code, &n_bits, &bits);
VP8LPrefixEncode(v->len, &code, &n_bits, &bits);
WriteHuffmanCode(bw, codes, 256 + code);
VP8LWriteBits(bw, n_bits, bits);
distance = PixOrCopyDistance(v);
PrefixEncode(distance, &code, &n_bits, &bits);
VP8LPrefixEncode(distance, &code, &n_bits, &bits);
WriteHuffmanCode(bw, codes + 4, code);
VP8LWriteBits(bw, n_bits, bits);
}
@ -700,7 +695,7 @@ static int ApplyCrossColorFilter(const VP8LEncoder* const enc,
const int ccolor_transform_bits = enc->transform_bits_;
const int transform_width = VP8LSubSampleSize(width, ccolor_transform_bits);
const int transform_height = VP8LSubSampleSize(height, ccolor_transform_bits);
const int step = (quality == 0) ? 32 : 8;
const int step = (quality < 25) ? 32 : (quality > 50) ? 8 : 16;
VP8LColorSpaceTransform(width, height, ccolor_transform_bits, step,
enc->argb_, enc->transform_data_);
@ -811,34 +806,66 @@ static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc,
return err;
}
// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.
static void BundleColorMap(const uint8_t* const row, int width,
int xbits, uint32_t* const dst) {
int x;
if (xbits > 0) {
const int bit_depth = 1 << (3 - xbits);
const int mask = (1 << xbits) - 1;
uint32_t code = 0xff000000;
for (x = 0; x < width; ++x) {
const int xsub = x & mask;
if (xsub == 0) {
code = 0xff000000;
static void ApplyPalette(uint32_t* src, uint32_t* dst,
uint32_t src_stride, uint32_t dst_stride,
const uint32_t* palette, int palette_size,
int width, int height, int xbits, uint8_t* row) {
int i, x, y;
int use_LUT = 1;
for (i = 0; i < palette_size; ++i) {
if ((palette[i] & 0xffff00ffu) != 0) {
use_LUT = 0;
break;
}
}
if (use_LUT) {
uint8_t inv_palette[MAX_PALETTE_SIZE] = { 0 };
for (i = 0; i < palette_size; ++i) {
const int color = (palette[i] >> 8) & 0xff;
inv_palette[color] = i;
}
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const int color = (src[x] >> 8) & 0xff;
row[x] = inv_palette[color];
}
code |= row[x] << (8 + bit_depth * xsub);
dst[x >> xbits] = code;
VP8LBundleColorMap(row, width, xbits, dst);
src += src_stride;
dst += dst_stride;
}
} else {
for (x = 0; x < width; ++x) dst[x] = 0xff000000 | (row[x] << 8);
// Use 1 pixel cache for ARGB pixels.
uint32_t last_pix = palette[0];
int last_idx = 0;
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const uint32_t pix = src[x];
if (pix != last_pix) {
for (i = 0; i < palette_size; ++i) {
if (pix == palette[i]) {
last_idx = i;
last_pix = pix;
break;
}
}
}
row[x] = last_idx;
}
VP8LBundleColorMap(row, width, xbits, dst);
src += src_stride;
dst += dst_stride;
}
}
}
// Note: Expects "enc->palette_" to be set properly.
// Also, "enc->palette_" will be modified after this call and should not be used
// later.
static WebPEncodingError ApplyPalette(VP8LBitWriter* const bw,
VP8LEncoder* const enc, int quality) {
static WebPEncodingError EncodePalette(VP8LBitWriter* const bw,
VP8LEncoder* const enc, int quality) {
WebPEncodingError err = VP8_ENC_OK;
int i, x, y;
int i;
const WebPPicture* const pic = enc->pic_;
uint32_t* src = pic->argb;
uint32_t* dst;
@ -861,23 +888,11 @@ static WebPEncodingError ApplyPalette(VP8LBitWriter* const bw,
if (err != VP8_ENC_OK) goto Error;
dst = enc->argb_;
row = WebPSafeMalloc((uint64_t)width, sizeof(*row));
row = (uint8_t*)WebPSafeMalloc((uint64_t)width, sizeof(*row));
if (row == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
const uint32_t pix = src[x];
for (i = 0; i < palette_size; ++i) {
if (pix == palette[i]) {
row[x] = i;
break;
}
}
}
BundleColorMap(row, width, xbits, dst);
src += pic->argb_stride;
dst += enc->current_width_;
}
ApplyPalette(src, dst, pic->argb_stride, enc->current_width_,
palette, palette_size, width, height, xbits, row);
// Save palette to bitstream.
VP8LWriteBits(bw, 1, TRANSFORM_PRESENT);
@ -899,13 +914,10 @@ static WebPEncodingError ApplyPalette(VP8LBitWriter* const bw,
// -----------------------------------------------------------------------------
static int GetHistoBits(const WebPConfig* const config,
const WebPPicture* const pic) {
const int width = pic->width;
const int height = pic->height;
static int GetHistoBits(int method, int use_palette, int width, int height) {
const uint64_t hist_size = sizeof(VP8LHistogram);
// Make tile size a function of encoding method (Range: 0 to 6).
int histo_bits = 7 - config->method;
int histo_bits = (use_palette ? 9 : 7) - method;
while (1) {
const uint64_t huff_image_size = VP8LSubSampleSize(width, histo_bits) *
VP8LSubSampleSize(height, histo_bits) *
@ -917,13 +929,14 @@ static int GetHistoBits(const WebPConfig* const config,
(histo_bits > MAX_HUFFMAN_BITS) ? MAX_HUFFMAN_BITS : histo_bits;
}
static void InitEncParams(VP8LEncoder* const enc) {
static void FinishEncParams(VP8LEncoder* const enc) {
const WebPConfig* const config = enc->config_;
const WebPPicture* const picture = enc->pic_;
const WebPPicture* const pic = enc->pic_;
const int method = config->method;
const float quality = config->quality;
const int use_palette = enc->use_palette_;
enc->transform_bits_ = (method < 4) ? 5 : (method > 4) ? 3 : 4;
enc->histo_bits_ = GetHistoBits(config, picture);
enc->histo_bits_ = GetHistoBits(method, use_palette, pic->width, pic->height);
enc->cache_bits_ = (quality <= 25.f) ? 0 : 7;
}
@ -939,6 +952,9 @@ static VP8LEncoder* VP8LEncoderNew(const WebPConfig* const config,
}
enc->config_ = config;
enc->pic_ = picture;
VP8LDspInit();
return enc;
}
@ -965,8 +981,6 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
goto Error;
}
InitEncParams(enc);
// ---------------------------------------------------------------------------
// Analyze image (entropy, num_palettes etc)
@ -975,8 +989,10 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
goto Error;
}
FinishEncParams(enc);
if (enc->use_palette_) {
err = ApplyPalette(bw, enc, quality);
err = EncodePalette(bw, enc, quality);
if (err != VP8_ENC_OK) goto Error;
// Color cache is disabled for palette.
enc->cache_bits_ = 0;
@ -1150,6 +1166,3 @@ int VP8LEncodeImage(const WebPConfig* const config,
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Lossless encoder: internal header.
@ -17,7 +19,7 @@
#include "../webp/encode.h"
#include "../webp/format_constants.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -61,7 +63,7 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// WebP encoder: main entry point
@ -20,10 +22,6 @@
// #define PRINT_MEMORY_INFO
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#ifdef PRINT_MEMORY_INFO
#include <stdio.h>
#endif
@ -134,7 +132,7 @@ static void MapConfigToTools(VP8Encoder* const enc) {
enc->do_search_ = (config->target_size > 0 || config->target_PSNR > 0);
if (!config->low_memory) {
#if !defined(DISABLE_TOKEN_BUFFER)
enc->use_tokens_ = (method >= 3) && !enc->do_search_;
enc->use_tokens_ = (enc->rd_opt_level_ >= RD_OPT_BASIC); // need rd stats
#endif
if (enc->use_tokens_) {
enc->num_parts_ = 1; // doesn't work with multi-partition
@ -155,7 +153,7 @@ static void MapConfigToTools(VP8Encoder* const enc) {
// non-zero: 196
// lf-stats: 2048
// total: 68635
// Transcient object sizes:
// Transient object sizes:
// VP8EncIterator: 352
// VP8ModeScore: 912
// VP8SegmentInfo: 532
@ -173,20 +171,16 @@ static VP8Encoder* InitVP8Encoder(const WebPConfig* const config,
const int preds_h = 4 * mb_h + 1;
const size_t preds_size = preds_w * preds_h * sizeof(uint8_t);
const int top_stride = mb_w * 16;
const size_t nz_size = (mb_w + 1) * sizeof(uint32_t);
const size_t cache_size = (3 * YUV_SIZE + PRED_SIZE) * sizeof(uint8_t);
const size_t nz_size = (mb_w + 1) * sizeof(uint32_t) + ALIGN_CST;
const size_t info_size = mb_w * mb_h * sizeof(VP8MBInfo);
const size_t samples_size = (2 * top_stride + // top-luma/u/v
16 + 16 + 16 + 8 + 1 + // left y/u/v
2 * ALIGN_CST) // align all
* sizeof(uint8_t);
const size_t samples_size = 2 * top_stride * sizeof(uint8_t) // top-luma/u/v
+ ALIGN_CST; // align all
const size_t lf_stats_size =
config->autofilter ? sizeof(LFStats) + ALIGN_CST : 0;
VP8Encoder* enc;
uint8_t* mem;
const uint64_t size = (uint64_t)sizeof(VP8Encoder) // main struct
+ ALIGN_CST // cache alignment
+ cache_size // working caches
+ info_size // modes info
+ preds_size // prediction modes
+ samples_size // top/left samples
@ -197,16 +191,15 @@ static VP8Encoder* InitVP8Encoder(const WebPConfig* const config,
printf("===================================\n");
printf("Memory used:\n"
" encoder: %ld\n"
" block cache: %ld\n"
" info: %ld\n"
" preds: %ld\n"
" top samples: %ld\n"
" non-zero: %ld\n"
" lf-stats: %ld\n"
" total: %ld\n",
sizeof(VP8Encoder) + ALIGN_CST, cache_size, info_size,
sizeof(VP8Encoder) + ALIGN_CST, info_size,
preds_size, samples_size, nz_size, lf_stats_size, size);
printf("Transcient object sizes:\n"
printf("Transient object sizes:\n"
" VP8EncIterator: %ld\n"
" VP8ModeScore: %ld\n"
" VP8SegmentInfo: %ld\n"
@ -231,19 +224,11 @@ static VP8Encoder* InitVP8Encoder(const WebPConfig* const config,
enc->mb_w_ = mb_w;
enc->mb_h_ = mb_h;
enc->preds_w_ = preds_w;
enc->yuv_in_ = (uint8_t*)mem;
mem += YUV_SIZE;
enc->yuv_out_ = (uint8_t*)mem;
mem += YUV_SIZE;
enc->yuv_out2_ = (uint8_t*)mem;
mem += YUV_SIZE;
enc->yuv_p_ = (uint8_t*)mem;
mem += PRED_SIZE;
enc->mb_info_ = (VP8MBInfo*)mem;
mem += info_size;
enc->preds_ = ((uint8_t*)mem) + 1 + enc->preds_w_;
mem += preds_w * preds_h * sizeof(uint8_t);
enc->nz_ = 1 + (uint32_t*)mem;
enc->nz_ = 1 + (uint32_t*)DO_ALIGN(mem);
mem += nz_size;
enc->lf_stats_ = lf_stats_size ? (LFStats*)DO_ALIGN(mem) : NULL;
mem += lf_stats_size;
@ -253,13 +238,7 @@ static VP8Encoder* InitVP8Encoder(const WebPConfig* const config,
enc->y_top_ = (uint8_t*)mem;
enc->uv_top_ = enc->y_top_ + top_stride;
mem += 2 * top_stride;
mem = (uint8_t*)DO_ALIGN(mem + 1);
enc->y_left_ = (uint8_t*)mem;
mem += 16 + 16;
enc->u_left_ = (uint8_t*)mem;
mem += 16;
enc->v_left_ = (uint8_t*)mem;
mem += 8;
assert(mem <= (uint8_t*)enc + size);
enc->config_ = config;
enc->profile_ = use_filter ? ((config->filter_type == 1) ? 0 : 1) : 2;
@ -298,7 +277,7 @@ static int DeleteVP8Encoder(VP8Encoder* enc) {
//------------------------------------------------------------------------------
static double GetPSNR(uint64_t err, uint64_t size) {
return err ? 10. * log10(255. * 255. * size / err) : 99.;
return (err > 0 && size > 0) ? 10. * log10(255. * 255. * size / err) : 99.;
}
static void FinalizePSNR(const VP8Encoder* const enc) {
@ -375,7 +354,17 @@ int WebPEncode(const WebPConfig* config, WebPPicture* pic) {
VP8Encoder* enc = NULL;
if (pic->y == NULL || pic->u == NULL || pic->v == NULL) {
// Make sure we have YUVA samples.
if (!WebPPictureARGBToYUVA(pic, WEBP_YUV420)) return 0;
float dithering = 0.f;
if (config->preprocessing & 2) {
const float x = config->quality / 100.f;
const float x2 = x * x;
// slowly decreasing from max dithering at low quality (q->0)
// to 0.5 dithering amplitude at high quality (q->100)
dithering = 1.0f + (0.5f - 1.0f) * x2 * x2;
}
if (!WebPPictureARGBToYUVADithered(pic, WEBP_YUV420, dithering)) {
return 0;
}
}
enc = InitVP8Encoder(config, pic);
@ -386,9 +375,9 @@ int WebPEncode(const WebPConfig* config, WebPPicture* pic) {
// Analysis is done, proceed to actual coding.
ok = ok && VP8EncStartAlpha(enc); // possibly done in parallel
if (!enc->use_tokens_) {
ok = VP8EncLoop(enc);
ok = ok && VP8EncLoop(enc);
} else {
ok = VP8EncTokenLoop(enc);
ok = ok && VP8EncTokenLoop(enc);
}
ok = ok && VP8EncFinishAlpha(enc);
#ifdef WEBP_EXPERIMENTAL_FEATURES
@ -413,6 +402,3 @@ int WebPEncode(const WebPConfig* config, WebPPicture* pic) {
return ok;
}
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -13,6 +13,6 @@ libwebpmuxinclude_HEADERS += ../webp/mux_types.h
libwebpmuxinclude_HEADERS += ../webp/types.h
libwebpmux_la_LIBADD = ../libwebp.la
libwebpmux_la_LDFLAGS = -no-undefined -version-info 0:0:0
libwebpmux_la_LDFLAGS = -no-undefined -version-info 1:0:0
libwebpmuxincludedir = $(includedir)/webp
pkgconfig_DATA = libwebpmux.pc

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Set and delete APIs for mux.
@ -14,10 +16,6 @@
#include "./muxi.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// Life of a mux object.
@ -37,21 +35,22 @@ WebPMux* WebPNewInternal(int version) {
}
}
static void DeleteAllChunks(WebPChunk** const chunk_list) {
while (*chunk_list) {
*chunk_list = ChunkDelete(*chunk_list);
// Delete all images in 'wpi_list'.
static void DeleteAllImages(WebPMuxImage** const wpi_list) {
while (*wpi_list != NULL) {
*wpi_list = MuxImageDelete(*wpi_list);
}
}
static void MuxRelease(WebPMux* const mux) {
if (mux == NULL) return;
MuxImageDeleteAll(&mux->images_);
DeleteAllChunks(&mux->vp8x_);
DeleteAllChunks(&mux->iccp_);
DeleteAllChunks(&mux->anim_);
DeleteAllChunks(&mux->exif_);
DeleteAllChunks(&mux->xmp_);
DeleteAllChunks(&mux->unknown_);
DeleteAllImages(&mux->images_);
ChunkListDelete(&mux->vp8x_);
ChunkListDelete(&mux->iccp_);
ChunkListDelete(&mux->anim_);
ChunkListDelete(&mux->exif_);
ChunkListDelete(&mux->xmp_);
ChunkListDelete(&mux->unknown_);
}
void WebPMuxDelete(WebPMux* mux) {
@ -66,79 +65,56 @@ void WebPMuxDelete(WebPMux* mux) {
// Handy MACRO, makes MuxSet() very symmetric to MuxGet().
#define SWITCH_ID_LIST(INDEX, LIST) \
if (idx == (INDEX)) { \
err = ChunkAssignData(&chunk, data, copy_data, kChunks[(INDEX)].tag); \
err = ChunkAssignData(&chunk, data, copy_data, tag); \
if (err == WEBP_MUX_OK) { \
err = ChunkSetNth(&chunk, (LIST), nth); \
} \
return err; \
}
static WebPMuxError MuxSet(WebPMux* const mux, CHUNK_INDEX idx, uint32_t nth,
static WebPMuxError MuxSet(WebPMux* const mux, uint32_t tag, uint32_t nth,
const WebPData* const data, int copy_data) {
WebPChunk chunk;
WebPMuxError err = WEBP_MUX_NOT_FOUND;
const CHUNK_INDEX idx = ChunkGetIndexFromTag(tag);
assert(mux != NULL);
assert(!IsWPI(kChunks[idx].id));
ChunkInit(&chunk);
SWITCH_ID_LIST(IDX_VP8X, &mux->vp8x_);
SWITCH_ID_LIST(IDX_ICCP, &mux->iccp_);
SWITCH_ID_LIST(IDX_ANIM, &mux->anim_);
SWITCH_ID_LIST(IDX_EXIF, &mux->exif_);
SWITCH_ID_LIST(IDX_XMP, &mux->xmp_);
if (idx == IDX_UNKNOWN && data->size > TAG_SIZE) {
// For raw-data unknown chunk, the first four bytes should be the tag to be
// used for the chunk.
const WebPData tmp = { data->bytes + TAG_SIZE, data->size - TAG_SIZE };
err = ChunkAssignData(&chunk, &tmp, copy_data, GetLE32(data->bytes + 0));
if (err == WEBP_MUX_OK)
err = ChunkSetNth(&chunk, &mux->unknown_, nth);
}
SWITCH_ID_LIST(IDX_VP8X, &mux->vp8x_);
SWITCH_ID_LIST(IDX_ICCP, &mux->iccp_);
SWITCH_ID_LIST(IDX_ANIM, &mux->anim_);
SWITCH_ID_LIST(IDX_EXIF, &mux->exif_);
SWITCH_ID_LIST(IDX_XMP, &mux->xmp_);
SWITCH_ID_LIST(IDX_UNKNOWN, &mux->unknown_);
return err;
}
#undef SWITCH_ID_LIST
static WebPMuxError MuxAddChunk(WebPMux* const mux, uint32_t nth, uint32_t tag,
const uint8_t* data, size_t size,
int copy_data) {
const CHUNK_INDEX idx = ChunkGetIndexFromTag(tag);
const WebPData chunk_data = { data, size };
assert(mux != NULL);
assert(size <= MAX_CHUNK_PAYLOAD);
assert(idx != IDX_NIL);
return MuxSet(mux, idx, nth, &chunk_data, copy_data);
}
// Create data for frame/fragment given image data, offsets and duration.
static WebPMuxError CreateFrameFragmentData(
const WebPData* const image, int x_offset, int y_offset, int duration,
WebPMuxAnimDispose dispose_method, int is_lossless, int is_frame,
int width, int height, const WebPMuxFrameInfo* const info, int is_frame,
WebPData* const frame_frgm) {
int width;
int height;
uint8_t* frame_frgm_bytes;
const size_t frame_frgm_size = kChunks[is_frame ? IDX_ANMF : IDX_FRGM].size;
const int ok = is_lossless ?
VP8LGetInfo(image->bytes, image->size, &width, &height, NULL) :
VP8GetInfo(image->bytes, image->size, image->size, &width, &height);
if (!ok) return WEBP_MUX_INVALID_ARGUMENT;
assert(width > 0 && height > 0 && duration >= 0);
assert(dispose_method == (dispose_method & 1));
assert(width > 0 && height > 0 && info->duration >= 0);
assert(info->dispose_method == (info->dispose_method & 1));
// Note: assertion on upper bounds is done in PutLE24().
frame_frgm_bytes = (uint8_t*)malloc(frame_frgm_size);
if (frame_frgm_bytes == NULL) return WEBP_MUX_MEMORY_ERROR;
PutLE24(frame_frgm_bytes + 0, x_offset / 2);
PutLE24(frame_frgm_bytes + 3, y_offset / 2);
PutLE24(frame_frgm_bytes + 0, info->x_offset / 2);
PutLE24(frame_frgm_bytes + 3, info->y_offset / 2);
if (is_frame) {
PutLE24(frame_frgm_bytes + 6, width - 1);
PutLE24(frame_frgm_bytes + 9, height - 1);
PutLE24(frame_frgm_bytes + 12, duration);
frame_frgm_bytes[15] = (dispose_method & 1);
PutLE24(frame_frgm_bytes + 12, info->duration);
frame_frgm_bytes[15] =
(info->blend_method == WEBP_MUX_NO_BLEND ? 2 : 0) |
(info->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ? 1 : 0);
}
frame_frgm->bytes = frame_frgm_bytes;
@ -191,15 +167,9 @@ static WebPMuxError DeleteChunks(WebPChunk** chunk_list, uint32_t tag) {
static WebPMuxError MuxDeleteAllNamedData(WebPMux* const mux, uint32_t tag) {
const WebPChunkId id = ChunkGetIdFromTag(tag);
WebPChunk** chunk_list;
assert(mux != NULL);
if (IsWPI(id)) return WEBP_MUX_INVALID_ARGUMENT;
chunk_list = MuxGetChunkListFromId(mux, id);
if (chunk_list == NULL) return WEBP_MUX_INVALID_ARGUMENT;
return DeleteChunks(chunk_list, tag);
return DeleteChunks(MuxGetChunkListFromId(mux, id), tag);
}
//------------------------------------------------------------------------------
@ -207,14 +177,12 @@ static WebPMuxError MuxDeleteAllNamedData(WebPMux* const mux, uint32_t tag) {
WebPMuxError WebPMuxSetChunk(WebPMux* mux, const char fourcc[4],
const WebPData* chunk_data, int copy_data) {
CHUNK_INDEX idx;
uint32_t tag;
WebPMuxError err;
if (mux == NULL || fourcc == NULL || chunk_data == NULL ||
chunk_data->bytes == NULL || chunk_data->size > MAX_CHUNK_PAYLOAD) {
return WEBP_MUX_INVALID_ARGUMENT;
}
idx = ChunkGetIndexFromFourCC(fourcc);
tag = ChunkGetTagFromFourCC(fourcc);
// Delete existing chunk(s) with the same 'fourcc'.
@ -222,7 +190,7 @@ WebPMuxError WebPMuxSetChunk(WebPMux* mux, const char fourcc[4],
if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err;
// Add the given chunk.
return MuxSet(mux, idx, 1, chunk_data, copy_data);
return MuxSet(mux, tag, 1, chunk_data, copy_data);
}
// Creates a chunk from given 'data' and sets it as 1st chunk in 'chunk_list'.
@ -257,7 +225,9 @@ static WebPMuxError SetAlphaAndImageChunks(
&wpi->alpha_);
if (err != WEBP_MUX_OK) return err;
}
return AddDataToChunkList(&image, copy_data, image_tag, &wpi->img_);
err = AddDataToChunkList(&image, copy_data, image_tag, &wpi->img_);
if (err != WEBP_MUX_OK) return err;
return MuxImageFinalize(wpi) ? WEBP_MUX_OK : WEBP_MUX_INVALID_ARGUMENT;
}
WebPMuxError WebPMuxSetImage(WebPMux* mux, const WebPData* bitstream,
@ -273,7 +243,7 @@ WebPMuxError WebPMuxSetImage(WebPMux* mux, const WebPData* bitstream,
if (mux->images_ != NULL) {
// Only one 'simple image' can be added in mux. So, remove present images.
MuxImageDeleteAll(&mux->images_);
DeleteAllImages(&mux->images_);
}
MuxImageInit(&wpi);
@ -331,24 +301,25 @@ WebPMuxError WebPMuxPushFrame(WebPMux* mux, const WebPMuxFrameInfo* frame,
assert(wpi.img_ != NULL); // As SetAlphaAndImageChunks() was successful.
{
const int is_lossless = (wpi.img_->tag_ == kChunks[IDX_VP8L].tag);
const int x_offset = frame->x_offset & ~1; // Snap offsets to even.
const int y_offset = frame->y_offset & ~1;
const int duration = is_frame ? frame->duration : 1 /* unused */;
const WebPMuxAnimDispose dispose_method =
is_frame ? frame->dispose_method : 0 /* unused */;
const uint32_t tag = kChunks[is_frame ? IDX_ANMF : IDX_FRGM].tag;
WebPData frame_frgm;
if (x_offset < 0 || x_offset >= MAX_POSITION_OFFSET ||
y_offset < 0 || y_offset >= MAX_POSITION_OFFSET ||
(duration < 0 || duration >= MAX_DURATION) ||
dispose_method != (dispose_method & 1)) {
const uint32_t tag = kChunks[is_frame ? IDX_ANMF : IDX_FRGM].tag;
WebPMuxFrameInfo tmp = *frame;
tmp.x_offset &= ~1; // Snap offsets to even.
tmp.y_offset &= ~1;
if (!is_frame) { // Reset unused values.
tmp.duration = 1;
tmp.dispose_method = WEBP_MUX_DISPOSE_NONE;
tmp.blend_method = WEBP_MUX_BLEND;
}
if (tmp.x_offset < 0 || tmp.x_offset >= MAX_POSITION_OFFSET ||
tmp.y_offset < 0 || tmp.y_offset >= MAX_POSITION_OFFSET ||
(tmp.duration < 0 || tmp.duration >= MAX_DURATION) ||
tmp.dispose_method != (tmp.dispose_method & 1)) {
err = WEBP_MUX_INVALID_ARGUMENT;
goto Err;
}
err = CreateFrameFragmentData(&wpi.img_->data_, x_offset, y_offset,
duration, dispose_method, is_lossless,
is_frame, &frame_frgm);
err = CreateFrameFragmentData(wpi.width_, wpi.height_, &tmp, is_frame,
&frame_frgm);
if (err != WEBP_MUX_OK) goto Err;
// Add frame/fragment chunk (with copy_data = 1).
err = AddDataToChunkList(&frame_frgm, 1, tag, &wpi.header_);
@ -372,6 +343,7 @@ WebPMuxError WebPMuxSetAnimationParams(WebPMux* mux,
const WebPMuxAnimParams* params) {
WebPMuxError err;
uint8_t data[ANIM_CHUNK_SIZE];
const WebPData anim = { data, ANIM_CHUNK_SIZE };
if (mux == NULL || params == NULL) return WEBP_MUX_INVALID_ARGUMENT;
if (params->loop_count < 0 || params->loop_count >= MAX_LOOP_COUNT) {
@ -385,7 +357,7 @@ WebPMuxError WebPMuxSetAnimationParams(WebPMux* mux,
// Set the animation parameters.
PutLE32(data, params->bgcolor);
PutLE16(data + 4, params->loop_count);
return MuxAddChunk(mux, 1, kChunks[IDX_ANIM].tag, data, sizeof(data), 1);
return MuxSet(mux, kChunks[IDX_ANIM].tag, 1, &anim, 1);
}
//------------------------------------------------------------------------------
@ -422,40 +394,23 @@ static WebPMuxError GetFrameFragmentInfo(
return WEBP_MUX_OK;
}
WebPMuxError MuxGetImageWidthHeight(const WebPChunk* const image_chunk,
int* const width, int* const height) {
const uint32_t tag = image_chunk->tag_;
const WebPData* const data = &image_chunk->data_;
int w, h;
int ok;
assert(image_chunk != NULL);
assert(tag == kChunks[IDX_VP8].tag || tag == kChunks[IDX_VP8L].tag);
ok = (tag == kChunks[IDX_VP8].tag) ?
VP8GetInfo(data->bytes, data->size, data->size, &w, &h) :
VP8LGetInfo(data->bytes, data->size, &w, &h, NULL);
if (ok) {
*width = w;
*height = h;
return WEBP_MUX_OK;
} else {
return WEBP_MUX_BAD_DATA;
}
}
static WebPMuxError GetImageInfo(const WebPMuxImage* const wpi,
int* const x_offset, int* const y_offset,
int* const duration,
int* const width, int* const height) {
const WebPChunk* const image_chunk = wpi->img_;
const WebPChunk* const frame_frgm_chunk = wpi->header_;
WebPMuxError err;
assert(wpi != NULL);
assert(frame_frgm_chunk != NULL);
// Get offsets and duration from ANMF/FRGM chunk.
const WebPMuxError err =
GetFrameFragmentInfo(frame_frgm_chunk, x_offset, y_offset, duration);
err = GetFrameFragmentInfo(frame_frgm_chunk, x_offset, y_offset, duration);
if (err != WEBP_MUX_OK) return err;
// Get width and height from VP8/VP8L chunk.
return MuxGetImageWidthHeight(image_chunk, width, height);
if (width != NULL) *width = wpi->width_;
if (height != NULL) *height = wpi->height_;
return WEBP_MUX_OK;
}
static WebPMuxError GetImageCanvasWidthHeight(
@ -469,13 +424,15 @@ static WebPMuxError GetImageCanvasWidthHeight(
assert(wpi != NULL);
assert(wpi->img_ != NULL);
if (wpi->next_) {
if (wpi->next_ != NULL) {
int max_x = 0;
int max_y = 0;
int64_t image_area = 0;
// if we have a chain of wpi's, header_ is necessarily set
assert(wpi->header_ != NULL);
// Aggregate the bounding box for animation frames & fragmented images.
for (; wpi != NULL; wpi = wpi->next_) {
int x_offset, y_offset, duration, w, h;
int x_offset = 0, y_offset = 0, duration = 0, w = 0, h = 0;
const WebPMuxError err = GetImageInfo(wpi, &x_offset, &y_offset,
&duration, &w, &h);
const int max_x_pos = x_offset + w;
@ -500,13 +457,9 @@ static WebPMuxError GetImageCanvasWidthHeight(
return WEBP_MUX_INVALID_ARGUMENT;
}
} else {
// For a single image, extract the width & height from VP8/VP8L image-data.
int w, h;
const WebPChunk* const image_chunk = wpi->img_;
const WebPMuxError err = MuxGetImageWidthHeight(image_chunk, &w, &h);
if (err != WEBP_MUX_OK) return err;
*width = w;
*height = h;
// For a single image, canvas dimensions are same as image dimensions.
*width = wpi->width_;
*height = wpi->height_;
}
return WEBP_MUX_OK;
}
@ -522,7 +475,7 @@ static WebPMuxError CreateVP8XChunk(WebPMux* const mux) {
int width = 0;
int height = 0;
uint8_t data[VP8X_CHUNK_SIZE];
const size_t data_size = VP8X_CHUNK_SIZE;
const WebPData vp8x = { data, VP8X_CHUNK_SIZE };
const WebPMuxImage* images = NULL;
assert(mux != NULL);
@ -575,9 +528,8 @@ static WebPMuxError CreateVP8XChunk(WebPMux* const mux) {
return WEBP_MUX_INVALID_ARGUMENT;
}
if (MuxHasLosslessImages(images)) {
// We have a file with a VP8X chunk having some lossless images.
// As lossless images implicitly contain alpha, force ALPHA_FLAG to be true.
if (MuxHasAlpha(images)) {
// This means some frames explicitly/implicitly contain alpha.
// Note: This 'flags' update must NOT be done for a lossless image
// without a VP8X chunk!
flags |= ALPHA_FLAG;
@ -587,8 +539,7 @@ static WebPMuxError CreateVP8XChunk(WebPMux* const mux) {
PutLE24(data + 4, width - 1); // canvas width.
PutLE24(data + 7, height - 1); // canvas height.
err = MuxAddChunk(mux, 1, kChunks[IDX_VP8X].tag, data, data_size, 1);
return err;
return MuxSet(mux, kChunks[IDX_VP8X].tag, 1, &vp8x, 1);
}
// Cleans up 'mux' by removing any unnecessary chunks.
@ -627,6 +578,25 @@ static WebPMuxError MuxCleanup(WebPMux* const mux) {
return WEBP_MUX_OK;
}
// Total size of a list of images.
static size_t ImageListDiskSize(const WebPMuxImage* wpi_list) {
size_t size = 0;
while (wpi_list != NULL) {
size += MuxImageDiskSize(wpi_list);
wpi_list = wpi_list->next_;
}
return size;
}
// Write out the given list of images into 'dst'.
static uint8_t* ImageListEmit(const WebPMuxImage* wpi_list, uint8_t* dst) {
while (wpi_list != NULL) {
dst = MuxImageEmit(wpi_list, dst);
wpi_list = wpi_list->next_;
}
return dst;
}
WebPMuxError WebPMuxAssemble(WebPMux* mux, WebPData* assembled_data) {
size_t size = 0;
uint8_t* data = NULL;
@ -644,10 +614,10 @@ WebPMuxError WebPMuxAssemble(WebPMux* mux, WebPData* assembled_data) {
if (err != WEBP_MUX_OK) return err;
// Allocate data.
size = ChunksListDiskSize(mux->vp8x_) + ChunksListDiskSize(mux->iccp_)
+ ChunksListDiskSize(mux->anim_) + MuxImageListDiskSize(mux->images_)
+ ChunksListDiskSize(mux->exif_) + ChunksListDiskSize(mux->xmp_)
+ ChunksListDiskSize(mux->unknown_) + RIFF_HEADER_SIZE;
size = ChunkListDiskSize(mux->vp8x_) + ChunkListDiskSize(mux->iccp_)
+ ChunkListDiskSize(mux->anim_) + ImageListDiskSize(mux->images_)
+ ChunkListDiskSize(mux->exif_) + ChunkListDiskSize(mux->xmp_)
+ ChunkListDiskSize(mux->unknown_) + RIFF_HEADER_SIZE;
data = (uint8_t*)malloc(size);
if (data == NULL) return WEBP_MUX_MEMORY_ERROR;
@ -657,7 +627,7 @@ WebPMuxError WebPMuxAssemble(WebPMux* mux, WebPData* assembled_data) {
dst = ChunkListEmit(mux->vp8x_, dst);
dst = ChunkListEmit(mux->iccp_, dst);
dst = ChunkListEmit(mux->anim_, dst);
dst = MuxImageListEmit(mux->images_, dst);
dst = ImageListEmit(mux->images_, dst);
dst = ChunkListEmit(mux->exif_, dst);
dst = ChunkListEmit(mux->xmp_, dst);
dst = ChunkListEmit(mux->unknown_, dst);
@ -680,6 +650,3 @@ WebPMuxError WebPMuxAssemble(WebPMux* mux, WebPData* assembled_data) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Internal header for mux library.
@ -17,7 +19,7 @@
#include "../dec/vp8li.h"
#include "../webp/mux.h"
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
extern "C" {
#endif
@ -25,7 +27,7 @@ extern "C" {
// Defines and constants.
#define MUX_MAJ_VERSION 0
#define MUX_MIN_VERSION 1
#define MUX_MIN_VERSION 2
#define MUX_REV_VERSION 0
// Chunk object.
@ -46,6 +48,10 @@ struct WebPMuxImage {
WebPChunk* header_; // Corresponds to WEBP_CHUNK_ANMF/WEBP_CHUNK_FRGM.
WebPChunk* alpha_; // Corresponds to WEBP_CHUNK_ALPHA.
WebPChunk* img_; // Corresponds to WEBP_CHUNK_IMAGE.
WebPChunk* unknown_; // Corresponds to WEBP_CHUNK_UNKNOWN.
int width_;
int height_;
int has_alpha_; // Through ALPH chunk or as part of VP8L.
int is_partial_; // True if only some of the chunks are filled.
WebPMuxImage* next_;
};
@ -99,10 +105,10 @@ extern const ChunkInfo kChunks[IDX_LAST_CHUNK];
// Initialize.
void ChunkInit(WebPChunk* const chunk);
// Get chunk index from chunk tag. Returns IDX_NIL if not found.
// Get chunk index from chunk tag. Returns IDX_UNKNOWN if not found.
CHUNK_INDEX ChunkGetIndexFromTag(uint32_t tag);
// Get chunk id from chunk tag. Returns WEBP_CHUNK_NIL if not found.
// Get chunk id from chunk tag. Returns WEBP_CHUNK_UNKNOWN if not found.
WebPChunkId ChunkGetIdFromTag(uint32_t tag);
// Convert a fourcc string to a tag.
@ -131,6 +137,9 @@ WebPChunk* ChunkRelease(WebPChunk* const chunk);
// Deletes given chunk & returns chunk->next_.
WebPChunk* ChunkDelete(WebPChunk* const chunk);
// Deletes all chunks in the given chunk list.
void ChunkListDelete(WebPChunk** const chunk_list);
// Returns size of the chunk including chunk header and padding byte (if any).
static WEBP_INLINE size_t SizeWithPadding(size_t chunk_size) {
return CHUNK_HEADER_SIZE + ((chunk_size + 1) & ~1U);
@ -144,15 +153,11 @@ static WEBP_INLINE size_t ChunkDiskSize(const WebPChunk* chunk) {
}
// Total size of a list of chunks.
size_t ChunksListDiskSize(const WebPChunk* chunk_list);
size_t ChunkListDiskSize(const WebPChunk* chunk_list);
// Write out the given list of chunks into 'dst'.
uint8_t* ChunkListEmit(const WebPChunk* chunk_list, uint8_t* dst);
// Get the width & height of image stored in 'image_chunk'.
WebPMuxError MuxGetImageWidthHeight(const WebPChunk* const image_chunk,
int* const width, int* const height);
//------------------------------------------------------------------------------
// MuxImage object management.
@ -166,13 +171,14 @@ WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi);
// 'wpi' can be NULL.
WebPMuxImage* MuxImageDelete(WebPMuxImage* const wpi);
// Delete all images in 'wpi_list'.
void MuxImageDeleteAll(WebPMuxImage** const wpi_list);
// Count number of images matching the given tag id in the 'wpi_list'.
// If id == WEBP_CHUNK_NIL, all images will be matched.
int MuxImageCount(const WebPMuxImage* wpi_list, WebPChunkId id);
// Update width/height/has_alpha info from chunks within wpi.
// Also remove ALPH chunk if not needed.
int MuxImageFinalize(WebPMuxImage* const wpi);
// Check if given ID corresponds to an image related chunk.
static WEBP_INLINE int IsWPI(WebPChunkId id) {
switch (id) {
@ -184,19 +190,6 @@ static WEBP_INLINE int IsWPI(WebPChunkId id) {
}
}
// Get a reference to appropriate chunk list within an image given chunk tag.
static WEBP_INLINE WebPChunk** MuxImageGetListFromId(
const WebPMuxImage* const wpi, WebPChunkId id) {
assert(wpi != NULL);
switch (id) {
case WEBP_CHUNK_ANMF:
case WEBP_CHUNK_FRGM: return (WebPChunk**)&wpi->header_;
case WEBP_CHUNK_ALPHA: return (WebPChunk**)&wpi->alpha_;
case WEBP_CHUNK_IMAGE: return (WebPChunk**)&wpi->img_;
default: return NULL;
}
}
// Pushes 'wpi' at the end of 'wpi_list'.
WebPMuxError MuxImagePush(const WebPMuxImage* wpi, WebPMuxImage** wpi_list);
@ -210,38 +203,27 @@ WebPMuxError MuxImageGetNth(const WebPMuxImage** wpi_list, uint32_t nth,
// Total size of the given image.
size_t MuxImageDiskSize(const WebPMuxImage* const wpi);
// Total size of a list of images.
size_t MuxImageListDiskSize(const WebPMuxImage* wpi_list);
// Write out the given image into 'dst'.
uint8_t* MuxImageEmit(const WebPMuxImage* const wpi, uint8_t* dst);
// Write out the given list of images into 'dst'.
uint8_t* MuxImageListEmit(const WebPMuxImage* wpi_list, uint8_t* dst);
//------------------------------------------------------------------------------
// Helper methods for mux.
// Checks if the given image list contains at least one lossless image.
int MuxHasLosslessImages(const WebPMuxImage* images);
// Checks if the given image list contains at least one image with alpha.
int MuxHasAlpha(const WebPMuxImage* images);
// Write out RIFF header into 'data', given total data size 'size'.
uint8_t* MuxEmitRiffHeader(uint8_t* const data, size_t size);
// Returns the list where chunk with given ID is to be inserted in mux.
// Return value is NULL if this chunk should be inserted in mux->images_ list
// or if 'id' is not known.
WebPChunk** MuxGetChunkListFromId(const WebPMux* mux, WebPChunkId id);
// Validates that the given mux has a single image.
WebPMuxError MuxValidateForImage(const WebPMux* const mux);
// Validates the given mux object.
WebPMuxError MuxValidate(const WebPMux* const mux);
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Internal objects and utils for mux.
@ -14,10 +16,6 @@
#include "./muxi.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#define UNDEFINED_CHUNK_SIZE (-1)
const ChunkInfo kChunks[] = {
@ -31,7 +29,7 @@ const ChunkInfo kChunks[] = {
{ MKFOURCC('V', 'P', '8', 'L'), WEBP_CHUNK_IMAGE, UNDEFINED_CHUNK_SIZE },
{ MKFOURCC('E', 'X', 'I', 'F'), WEBP_CHUNK_EXIF, UNDEFINED_CHUNK_SIZE },
{ MKFOURCC('X', 'M', 'P', ' '), WEBP_CHUNK_XMP, UNDEFINED_CHUNK_SIZE },
{ MKFOURCC('U', 'N', 'K', 'N'), WEBP_CHUNK_UNKNOWN, UNDEFINED_CHUNK_SIZE },
{ NIL_TAG, WEBP_CHUNK_UNKNOWN, UNDEFINED_CHUNK_SIZE },
{ NIL_TAG, WEBP_CHUNK_NIL, UNDEFINED_CHUNK_SIZE }
};
@ -68,9 +66,9 @@ WebPChunk* ChunkRelease(WebPChunk* const chunk) {
CHUNK_INDEX ChunkGetIndexFromTag(uint32_t tag) {
int i;
for (i = 0; kChunks[i].tag != NIL_TAG; ++i) {
if (tag == kChunks[i].tag) return i;
if (tag == kChunks[i].tag) return (CHUNK_INDEX)i;
}
return IDX_NIL;
return IDX_UNKNOWN;
}
WebPChunkId ChunkGetIdFromTag(uint32_t tag) {
@ -78,7 +76,7 @@ WebPChunkId ChunkGetIdFromTag(uint32_t tag) {
for (i = 0; kChunks[i].tag != NIL_TAG; ++i) {
if (tag == kChunks[i].tag) return kChunks[i].id;
}
return WEBP_CHUNK_NIL;
return WEBP_CHUNK_UNKNOWN;
}
uint32_t ChunkGetTagFromFourCC(const char fourcc[4]) {
@ -87,8 +85,7 @@ uint32_t ChunkGetTagFromFourCC(const char fourcc[4]) {
CHUNK_INDEX ChunkGetIndexFromFourCC(const char fourcc[4]) {
const uint32_t tag = ChunkGetTagFromFourCC(fourcc);
const CHUNK_INDEX idx = ChunkGetIndexFromTag(tag);
return (idx == IDX_NIL) ? IDX_UNKNOWN : idx;
return ChunkGetIndexFromTag(tag);
}
//------------------------------------------------------------------------------
@ -186,18 +183,15 @@ WebPChunk* ChunkDelete(WebPChunk* const chunk) {
return next;
}
void ChunkListDelete(WebPChunk** const chunk_list) {
while (*chunk_list != NULL) {
*chunk_list = ChunkDelete(*chunk_list);
}
}
//------------------------------------------------------------------------------
// Chunk serialization methods.
size_t ChunksListDiskSize(const WebPChunk* chunk_list) {
size_t size = 0;
while (chunk_list != NULL) {
size += ChunkDiskSize(chunk_list);
chunk_list = chunk_list->next_;
}
return size;
}
static uint8_t* ChunkEmit(const WebPChunk* const chunk, uint8_t* dst) {
const size_t chunk_size = chunk->data_.size;
assert(chunk);
@ -219,6 +213,15 @@ uint8_t* ChunkListEmit(const WebPChunk* chunk_list, uint8_t* dst) {
return dst;
}
size_t ChunkListDiskSize(const WebPChunk* chunk_list) {
size_t size = 0;
while (chunk_list != NULL) {
size += ChunkDiskSize(chunk_list);
chunk_list = chunk_list->next_;
}
return size;
}
//------------------------------------------------------------------------------
// Life of a MuxImage object.
@ -233,6 +236,7 @@ WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi) {
ChunkDelete(wpi->header_);
ChunkDelete(wpi->alpha_);
ChunkDelete(wpi->img_);
ChunkListDelete(&wpi->unknown_);
next = wpi->next_;
MuxImageInit(wpi);
@ -242,6 +246,19 @@ WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi) {
//------------------------------------------------------------------------------
// MuxImage search methods.
// Get a reference to appropriate chunk list within an image given chunk tag.
static WebPChunk** GetChunkListFromId(const WebPMuxImage* const wpi,
WebPChunkId id) {
assert(wpi != NULL);
switch (id) {
case WEBP_CHUNK_ANMF:
case WEBP_CHUNK_FRGM: return (WebPChunk**)&wpi->header_;
case WEBP_CHUNK_ALPHA: return (WebPChunk**)&wpi->alpha_;
case WEBP_CHUNK_IMAGE: return (WebPChunk**)&wpi->img_;
default: return NULL;
}
}
int MuxImageCount(const WebPMuxImage* wpi_list, WebPChunkId id) {
int count = 0;
const WebPMuxImage* current;
@ -249,7 +266,7 @@ int MuxImageCount(const WebPMuxImage* wpi_list, WebPChunkId id) {
if (id == WEBP_CHUNK_NIL) {
++count; // Special case: count all images.
} else {
const WebPChunk* const wpi_chunk = *MuxImageGetListFromId(current, id);
const WebPChunk* const wpi_chunk = *GetChunkListFromId(current, id);
if (wpi_chunk != NULL) {
const WebPChunkId wpi_chunk_id = ChunkGetIdFromTag(wpi_chunk->tag_);
if (wpi_chunk_id == id) ++count; // Count images with a matching 'id'.
@ -318,12 +335,6 @@ WebPMuxImage* MuxImageDelete(WebPMuxImage* const wpi) {
return next;
}
void MuxImageDeleteAll(WebPMuxImage** const wpi_list) {
while (*wpi_list != NULL) {
*wpi_list = MuxImageDelete(*wpi_list);
}
}
WebPMuxError MuxImageDeleteNth(WebPMuxImage** wpi_list, uint32_t nth) {
assert(wpi_list);
if (!SearchImageToGetOrDelete(wpi_list, nth, &wpi_list)) {
@ -357,15 +368,7 @@ size_t MuxImageDiskSize(const WebPMuxImage* const wpi) {
if (wpi->header_ != NULL) size += ChunkDiskSize(wpi->header_);
if (wpi->alpha_ != NULL) size += ChunkDiskSize(wpi->alpha_);
if (wpi->img_ != NULL) size += ChunkDiskSize(wpi->img_);
return size;
}
size_t MuxImageListDiskSize(const WebPMuxImage* wpi_list) {
size_t size = 0;
while (wpi_list != NULL) {
size += MuxImageDiskSize(wpi_list);
wpi_list = wpi_list->next_;
}
if (wpi->unknown_ != NULL) size += ChunkListDiskSize(wpi->unknown_);
return size;
}
@ -397,26 +400,16 @@ uint8_t* MuxImageEmit(const WebPMuxImage* const wpi, uint8_t* dst) {
}
if (wpi->alpha_ != NULL) dst = ChunkEmit(wpi->alpha_, dst);
if (wpi->img_ != NULL) dst = ChunkEmit(wpi->img_, dst);
return dst;
}
uint8_t* MuxImageListEmit(const WebPMuxImage* wpi_list, uint8_t* dst) {
while (wpi_list != NULL) {
dst = MuxImageEmit(wpi_list, dst);
wpi_list = wpi_list->next_;
}
if (wpi->unknown_ != NULL) dst = ChunkListEmit(wpi->unknown_, dst);
return dst;
}
//------------------------------------------------------------------------------
// Helper methods for mux.
int MuxHasLosslessImages(const WebPMuxImage* images) {
int MuxHasAlpha(const WebPMuxImage* images) {
while (images != NULL) {
assert(images->img_ != NULL);
if (images->img_->tag_ == kChunks[IDX_VP8L].tag) {
return 1;
}
if (images->has_alpha_) return 1;
images = images->next_;
}
return 0;
@ -438,25 +431,7 @@ WebPChunk** MuxGetChunkListFromId(const WebPMux* mux, WebPChunkId id) {
case WEBP_CHUNK_ANIM: return (WebPChunk**)&mux->anim_;
case WEBP_CHUNK_EXIF: return (WebPChunk**)&mux->exif_;
case WEBP_CHUNK_XMP: return (WebPChunk**)&mux->xmp_;
case WEBP_CHUNK_UNKNOWN: return (WebPChunk**)&mux->unknown_;
default: return NULL;
}
}
WebPMuxError MuxValidateForImage(const WebPMux* const mux) {
const int num_images = MuxImageCount(mux->images_, WEBP_CHUNK_IMAGE);
const int num_frames = MuxImageCount(mux->images_, WEBP_CHUNK_ANMF);
const int num_fragments = MuxImageCount(mux->images_, WEBP_CHUNK_FRGM);
if (num_images == 0) {
// No images in mux.
return WEBP_MUX_NOT_FOUND;
} else if (num_images == 1 && num_frames == 0 && num_fragments == 0) {
// Valid case (single image).
return WEBP_MUX_OK;
} else {
// Frame/Fragment case OR an invalid mux.
return WEBP_MUX_INVALID_ARGUMENT;
default: return (WebPChunk**)&mux->unknown_;
}
}
@ -472,7 +447,7 @@ static int IsNotCompatible(int feature, int num_items) {
// On success returns WEBP_MUX_OK and stores the chunk count in *num.
static WebPMuxError ValidateChunk(const WebPMux* const mux, CHUNK_INDEX idx,
WebPFeatureFlags feature,
WebPFeatureFlags vp8x_flags,
uint32_t vp8x_flags,
int max, int* num) {
const WebPMuxError err =
WebPMuxNumChunks(mux, kChunks[idx].id, num);
@ -548,14 +523,18 @@ WebPMuxError MuxValidate(const WebPMux* const mux) {
if (num_vp8x == 0 && num_images != 1) return WEBP_MUX_INVALID_ARGUMENT;
// ALPHA_FLAG & alpha chunk(s) are consistent.
if (MuxHasLosslessImages(mux->images_)) {
if (MuxHasAlpha(mux->images_)) {
if (num_vp8x > 0) {
// Special case: we have a VP8X chunk as well as some lossless images.
// VP8X chunk is present, so it should contain ALPHA_FLAG.
if (!(flags & ALPHA_FLAG)) return WEBP_MUX_INVALID_ARGUMENT;
}
} else {
err = ValidateChunk(mux, IDX_ALPHA, ALPHA_FLAG, flags, -1, &num_alpha);
} else {
// VP8X chunk is not present, so ALPH chunks should NOT be present either.
err = WebPMuxNumChunks(mux, WEBP_CHUNK_ALPHA, &num_alpha);
if (err != WEBP_MUX_OK) return err;
if (num_alpha > 0) return WEBP_MUX_INVALID_ARGUMENT;
}
} else { // Mux doesn't need alpha. So, ALPHA_FLAG should NOT be present.
if (flags & ALPHA_FLAG) return WEBP_MUX_INVALID_ARGUMENT;
}
// num_fragments & num_images are consistent.
@ -570,6 +549,3 @@ WebPMuxError MuxValidate(const WebPMux* const mux) {
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

View File

@ -1,8 +1,10 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// This code is licensed under the same terms as WebM:
// Software License Agreement: http://www.webmproject.org/license/software/
// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// Use of this source code is governed by a BSD-style license
// that can be found in the COPYING file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
// -----------------------------------------------------------------------------
//
// Read APIs for mux.
@ -14,10 +16,6 @@
#include "./muxi.h"
#include "../utils/utils.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// Helper method(s).
@ -74,6 +72,29 @@ static WebPMuxError ChunkVerifyAndAssign(WebPChunk* chunk,
return ChunkAssignData(chunk, &chunk_data, copy_data, GetLE32(data + 0));
}
int MuxImageFinalize(WebPMuxImage* const wpi) {
const WebPChunk* const img = wpi->img_;
const WebPData* const image = &img->data_;
const int is_lossless = (img->tag_ == kChunks[IDX_VP8L].tag);
int w, h;
int vp8l_has_alpha = 0;
const int ok = is_lossless ?
VP8LGetInfo(image->bytes, image->size, &w, &h, &vp8l_has_alpha) :
VP8GetInfo(image->bytes, image->size, image->size, &w, &h);
assert(img != NULL);
if (ok) {
// Ignore ALPH chunk accompanying VP8L.
if (is_lossless && (wpi->alpha_ != NULL)) {
ChunkDelete(wpi->alpha_);
wpi->alpha_ = NULL;
}
wpi->width_ = w;
wpi->height_ = h;
wpi->has_alpha_ = vp8l_has_alpha || (wpi->alpha_ != NULL);
}
return ok;
}
static int MuxImageParse(const WebPChunk* const chunk, int copy_data,
WebPMuxImage* const wpi) {
const uint8_t* bytes = chunk->data_.bytes;
@ -119,8 +140,14 @@ static int MuxImageParse(const WebPChunk* const chunk, int copy_data,
break;
case WEBP_CHUNK_IMAGE:
if (ChunkSetNth(&subchunk, &wpi->img_, 1) != WEBP_MUX_OK) goto Fail;
if (!MuxImageFinalize(wpi)) goto Fail;
wpi->is_partial_ = 0; // wpi is completely filled.
break;
case WEBP_CHUNK_UNKNOWN:
if (wpi->is_partial_) goto Fail; // Encountered an unknown chunk
// before some image chunks.
if (ChunkSetNth(&subchunk, &wpi->unknown_, 0) != WEBP_MUX_OK) goto Fail;
break;
default:
goto Fail;
break;
@ -216,6 +243,7 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
break;
case WEBP_CHUNK_IMAGE:
if (ChunkSetNth(&chunk, &wpi->img_, 1) != WEBP_MUX_OK) goto Err;
if (!MuxImageFinalize(wpi)) goto Err;
wpi->is_partial_ = 0; // wpi is completely filled.
PushImage:
// Add this to mux->images_ list.
@ -235,7 +263,6 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
if (wpi->is_partial_) goto Err; // Encountered a non-image chunk before
// getting all chunks of an image.
chunk_list = MuxGetChunkListFromId(mux, id); // List to add this chunk.
if (chunk_list == NULL) chunk_list = &mux->unknown_;
if (ChunkSetNth(&chunk, chunk_list, 0) != WEBP_MUX_OK) goto Err;
break;
}
@ -260,35 +287,68 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
//------------------------------------------------------------------------------
// Get API(s).
WebPMuxError WebPMuxGetFeatures(const WebPMux* mux, uint32_t* flags) {
WebPData data;
// Validates that the given mux has a single image.
static WebPMuxError ValidateForSingleImage(const WebPMux* const mux) {
const int num_images = MuxImageCount(mux->images_, WEBP_CHUNK_IMAGE);
const int num_frames = MuxImageCount(mux->images_, WEBP_CHUNK_ANMF);
const int num_fragments = MuxImageCount(mux->images_, WEBP_CHUNK_FRGM);
if (mux == NULL || flags == NULL) return WEBP_MUX_INVALID_ARGUMENT;
*flags = 0;
if (num_images == 0) {
// No images in mux.
return WEBP_MUX_NOT_FOUND;
} else if (num_images == 1 && num_frames == 0 && num_fragments == 0) {
// Valid case (single image).
return WEBP_MUX_OK;
} else {
// Frame/Fragment case OR an invalid mux.
return WEBP_MUX_INVALID_ARGUMENT;
}
}
// Get the canvas width, height and flags after validating that VP8X/VP8/VP8L
// chunk and canvas size are valid.
static WebPMuxError MuxGetCanvasInfo(const WebPMux* const mux,
int* width, int* height, uint32_t* flags) {
int w, h;
uint32_t f = 0;
WebPData data;
assert(mux != NULL);
// Check if VP8X chunk is present.
if (MuxGet(mux, IDX_VP8X, 1, &data) == WEBP_MUX_OK) {
if (data.size < CHUNK_SIZE_BYTES) return WEBP_MUX_BAD_DATA;
*flags = GetLE32(data.bytes); // All OK. Fill up flags.
} else {
WebPMuxError err = MuxValidateForImage(mux); // Check for single image.
if (data.size < VP8X_CHUNK_SIZE) return WEBP_MUX_BAD_DATA;
f = GetLE32(data.bytes + 0);
w = GetLE24(data.bytes + 4) + 1;
h = GetLE24(data.bytes + 7) + 1;
} else { // Single image case.
const WebPMuxImage* const wpi = mux->images_;
WebPMuxError err = ValidateForSingleImage(mux);
if (err != WEBP_MUX_OK) return err;
if (MuxHasLosslessImages(mux->images_)) {
const WebPData* const vp8l_data = &mux->images_->img_->data_;
int has_alpha = 0;
if (!VP8LGetInfo(vp8l_data->bytes, vp8l_data->size, NULL, NULL,
&has_alpha)) {
return WEBP_MUX_BAD_DATA;
}
if (has_alpha) {
*flags = ALPHA_FLAG;
}
}
assert(wpi != NULL);
w = wpi->width_;
h = wpi->height_;
if (wpi->has_alpha_) f |= ALPHA_FLAG;
}
if (w * (uint64_t)h >= MAX_IMAGE_AREA) return WEBP_MUX_BAD_DATA;
if (width != NULL) *width = w;
if (height != NULL) *height = h;
if (flags != NULL) *flags = f;
return WEBP_MUX_OK;
}
WebPMuxError WebPMuxGetCanvasSize(const WebPMux* mux, int* width, int* height) {
if (mux == NULL || width == NULL || height == NULL) {
return WEBP_MUX_INVALID_ARGUMENT;
}
return MuxGetCanvasInfo(mux, width, height, NULL);
}
WebPMuxError WebPMuxGetFeatures(const WebPMux* mux, uint32_t* flags) {
if (mux == NULL || flags == NULL) return WEBP_MUX_INVALID_ARGUMENT;
return MuxGetCanvasInfo(mux, NULL, NULL, flags);
}
static uint8_t* EmitVP8XChunk(uint8_t* const dst, int width,
int height, uint32_t flags) {
const size_t vp8x_size = CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE;
@ -322,15 +382,7 @@ static WebPMuxError SynthesizeBitstream(const WebPMuxImage* const wpi,
dst = MuxEmitRiffHeader(data, size);
if (need_vp8x) {
int w, h;
WebPMuxError err;
assert(wpi->img_ != NULL);
err = MuxGetImageWidthHeight(wpi->img_, &w, &h);
if (err != WEBP_MUX_OK) {
free(data);
return err;
}
dst = EmitVP8XChunk(dst, w, h, ALPHA_FLAG); // VP8X.
dst = EmitVP8XChunk(dst, wpi->width_, wpi->height_, ALPHA_FLAG); // VP8X.
dst = ChunkListEmit(wpi->alpha_, dst); // ALPH.
}
@ -370,6 +422,8 @@ static WebPMuxError MuxGetImageInternal(const WebPMuxImage* const wpi,
info->x_offset = 0;
info->y_offset = 0;
info->duration = 1;
info->dispose_method = WEBP_MUX_DISPOSE_NONE;
info->blend_method = WEBP_MUX_BLEND;
// Extract data for related fields.
info->id = ChunkGetIdFromTag(wpi->img_->tag_);
return SynthesizeBitstream(wpi, &info->bitstream);
@ -390,10 +444,17 @@ static WebPMuxError MuxGetFrameFragmentInternal(const WebPMuxImage* const wpi,
// Extract info.
frame->x_offset = 2 * GetLE24(frame_frgm_data->bytes + 0);
frame->y_offset = 2 * GetLE24(frame_frgm_data->bytes + 3);
frame->duration = is_frame ? GetLE24(frame_frgm_data->bytes + 12) : 1;
frame->dispose_method =
is_frame ? (WebPMuxAnimDispose)(frame_frgm_data->bytes[15] & 1)
: WEBP_MUX_DISPOSE_NONE;
if (is_frame) {
const uint8_t bits = frame_frgm_data->bytes[15];
frame->duration = GetLE24(frame_frgm_data->bytes + 12);
frame->dispose_method =
(bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE;
frame->blend_method = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND;
} else { // Defaults for unused values.
frame->duration = 1;
frame->dispose_method = WEBP_MUX_DISPOSE_NONE;
frame->blend_method = WEBP_MUX_BLEND;
}
frame->id = ChunkGetIdFromTag(wpi->header_->tag_);
return SynthesizeBitstream(wpi, &frame->bitstream);
}
@ -440,7 +501,7 @@ WebPMuxError WebPMuxGetAnimationParams(const WebPMux* mux,
static CHUNK_INDEX ChunkGetIndexFromId(WebPChunkId id) {
int i;
for (i = 0; kChunks[i].id != WEBP_CHUNK_NIL; ++i) {
if (id == kChunks[i].id) return i;
if (id == kChunks[i].id) return (CHUNK_INDEX)i;
}
return IDX_NIL;
}
@ -468,12 +529,8 @@ WebPMuxError WebPMuxNumChunks(const WebPMux* mux,
*num_elements = MuxImageCount(mux->images_, id);
} else {
WebPChunk* const* chunk_list = MuxGetChunkListFromId(mux, id);
if (chunk_list == NULL) {
*num_elements = 0;
} else {
const CHUNK_INDEX idx = ChunkGetIndexFromId(id);
*num_elements = CountChunks(*chunk_list, kChunks[idx].tag);
}
const CHUNK_INDEX idx = ChunkGetIndexFromId(id);
*num_elements = CountChunks(*chunk_list, kChunks[idx].tag);
}
return WEBP_MUX_OK;
@ -481,6 +538,3 @@ WebPMuxError WebPMuxNumChunks(const WebPMux* mux,
//------------------------------------------------------------------------------
#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif

Some files were not shown because too many files have changed in this diff Show More