/src/serenity/Userland/Libraries/LibGfx/ImageFormats/WebPLoader.cpp
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2023, Nico Weber <thakis@chromium.org> |
3 | | * |
4 | | * SPDX-License-Identifier: BSD-2-Clause |
5 | | */ |
6 | | |
7 | | #include <AK/Debug.h> |
8 | | #include <AK/Endian.h> |
9 | | #include <AK/Format.h> |
10 | | #include <AK/MemoryStream.h> |
11 | | #include <AK/Vector.h> |
12 | | #include <LibGfx/FourCC.h> |
13 | | #include <LibGfx/ImageFormats/WebPLoader.h> |
14 | | #include <LibGfx/ImageFormats/WebPLoaderLossless.h> |
15 | | #include <LibGfx/ImageFormats/WebPLoaderLossy.h> |
16 | | #include <LibGfx/ImageFormats/WebPShared.h> |
17 | | #include <LibGfx/Painter.h> |
18 | | #include <LibRIFF/ChunkID.h> |
19 | | #include <LibRIFF/RIFF.h> |
20 | | |
21 | | // Overview: https://developers.google.com/speed/webp/docs/compression |
22 | | // Container: https://developers.google.com/speed/webp/docs/riff_container |
23 | | |
24 | | namespace Gfx { |
25 | | |
26 | | namespace { |
27 | | |
28 | | // "For a still image, the image data consists of a single frame, which is made up of: |
29 | | // An optional alpha subchunk. |
30 | | // A bitstream subchunk." |
31 | | struct ImageData { |
32 | | // "This optional chunk contains encoded alpha data for this frame. A frame containing a 'VP8L' chunk SHOULD NOT contain this chunk." |
33 | | Optional<RIFF::Chunk> alpha_chunk; // 'ALPH' |
34 | | RIFF::Chunk image_data_chunk; // Either 'VP8 ' or 'VP8L'. For 'VP8L', alpha_chunk will not have a value. |
35 | | }; |
36 | | |
37 | | } |
38 | | |
39 | | struct WebPLoadingContext { |
40 | | enum State { |
41 | | NotDecoded = 0, |
42 | | Error, |
43 | | HeaderDecoded, |
44 | | FirstChunkRead, |
45 | | FirstChunkDecoded, |
46 | | ChunksDecoded, |
47 | | AnimationFrameChunksDecoded, |
48 | | BitmapDecoded, |
49 | | }; |
50 | | State state { State::NotDecoded }; |
51 | | ReadonlyBytes data; |
52 | | |
53 | | ReadonlyBytes chunks_cursor; |
54 | | |
55 | | Optional<IntSize> size; |
56 | | |
57 | | RefPtr<Gfx::Bitmap> bitmap; |
58 | | |
59 | | // Either 'VP8 ' (simple lossy file), 'VP8L' (simple lossless file), or 'VP8X' (extended file). |
60 | | Optional<RIFF::Chunk> first_chunk; |
61 | | |
62 | | // Only valid if first_chunk->type == 'VP8X'. |
63 | | VP8XHeader vp8x_header; |
64 | | |
65 | | // If first_chunk is not a VP8X chunk, then only image_data.image_data_chunk is set and all the other Chunks are not set. |
66 | | // Once state is >= ChunksDecoded, for non-animated images, this will have a value, or decoding will have failed. |
67 | | Optional<ImageData> image_data; |
68 | | |
69 | | Optional<RIFF::Chunk> animation_header_chunk; // 'ANIM' |
70 | | Vector<RIFF::Chunk> animation_frame_chunks; // 'ANMF' |
71 | | |
72 | | // These are set in state >= AnimationFrameChunksDecoded, if first_chunk.type == 'VP8X' && vp8x_header.has_animation. |
73 | | Optional<ANIMChunk> animation_header_chunk_data; |
74 | | Optional<Vector<ANMFChunk>> animation_frame_chunks_data; |
75 | | size_t current_frame { 0 }; |
76 | | |
77 | | Optional<RIFF::Chunk> iccp_chunk; // 'ICCP' |
78 | | Optional<RIFF::Chunk> exif_chunk; // 'EXIF' |
79 | | Optional<RIFF::Chunk> xmp_chunk; // 'XMP ' |
80 | | }; |
81 | | |
82 | | // https://developers.google.com/speed/webp/docs/riff_container#webp_file_header |
83 | | static ErrorOr<void> decode_webp_header(WebPLoadingContext& context) |
84 | 2.70k | { |
85 | 2.70k | if (context.state >= WebPLoadingContext::HeaderDecoded) |
86 | 0 | return {}; |
87 | | |
88 | 2.70k | FixedMemoryStream header_stream { context.data }; |
89 | 2.70k | auto header = TRY(header_stream.read_value<RIFF::FileHeader>()); |
90 | 2.70k | if (header.magic() != RIFF::riff_magic || header.subformat != "WEBP"sv) |
91 | 40 | return Error::from_string_literal("Invalid WebP header"); |
92 | | |
93 | | // "File Size: [...] The size of the file in bytes starting at offset 8. The maximum value of this field is 2^32 minus 10 bytes." |
94 | 2.66k | u32 const maximum_webp_file_size = 0xffff'ffff - 9; |
95 | 2.66k | if (header.file_size() > maximum_webp_file_size) |
96 | 1 | return Error::from_string_literal("WebP header file size over maximum"); |
97 | | |
98 | | // "The file size in the header is the total size of the chunks that follow plus 4 bytes for the 'WEBP' RIFF::ChunkID. |
99 | | // The file SHOULD NOT contain any data after the data specified by File Size. |
100 | | // Readers MAY parse such files, ignoring the trailing data." |
101 | 2.65k | if (context.data.size() - 8 < header.file_size()) |
102 | 43 | return Error::from_string_literal("WebP data too small for size in header"); |
103 | 2.61k | if (header.file_size() < 4) // Need at least 4 bytes for 'WEBP', else we'll trim to less than the header size below. |
104 | 8 | return Error::from_string_literal("WebP stored file size too small for header it's stored in"); |
105 | 2.60k | if (context.data.size() - 8 > header.file_size()) { |
106 | 484 | dbgln_if(WEBP_DEBUG, "WebP has {} bytes of data, but header needs only {}. Trimming.", context.data.size(), header.file_size() + 8); |
107 | 484 | context.data = context.data.trim(header.file_size() + 8); |
108 | 484 | } |
109 | | |
110 | 2.60k | context.state = WebPLoadingContext::HeaderDecoded; |
111 | 2.60k | return {}; |
112 | 2.61k | } |
113 | | |
114 | | // https://developers.google.com/speed/webp/docs/riff_container#alpha |
115 | | static ErrorOr<void> decode_webp_chunk_ALPH(RIFF::Chunk const& alph_chunk, Bitmap& bitmap) |
116 | 166 | { |
117 | 166 | VERIFY(alph_chunk.id() == "ALPH"sv); |
118 | | |
119 | 166 | if (alph_chunk.size() < 1) |
120 | 3 | return Error::from_string_literal("WebPImageDecoderPlugin: ALPH chunk too small"); |
121 | | |
122 | 163 | u8 flags = alph_chunk[0]; |
123 | 163 | u8 preprocessing = (flags >> 4) & 3; |
124 | 163 | u8 filtering_method = (flags >> 2) & 3; |
125 | 163 | u8 compression_method = flags & 3; |
126 | | |
127 | 163 | dbgln_if(WEBP_DEBUG, "ALPH: preprocessing {} filtering_method {} compression_method {}", preprocessing, filtering_method, compression_method); |
128 | | |
129 | 163 | ReadonlyBytes alpha_data = alph_chunk.data().slice(1); |
130 | | |
131 | 163 | size_t pixel_count = bitmap.width() * bitmap.height(); |
132 | | |
133 | 163 | auto alpha = TRY(ByteBuffer::create_uninitialized(pixel_count)); |
134 | | |
135 | 163 | if (compression_method == 0) { |
136 | | // "Raw data: consists of a byte sequence of length width * height, containing all the 8-bit transparency values in scan order." |
137 | 77 | if (alpha_data.size() < pixel_count) |
138 | 3 | return Error::from_string_literal("WebPImageDecoderPlugin: uncompressed ALPH data too small"); |
139 | 74 | memcpy(alpha.data(), alpha_data.data(), pixel_count); |
140 | 86 | } else { |
141 | | // "Lossless format compression: the byte sequence is a compressed image-stream (as described in the WebP Lossless Bitstream Format) |
142 | | // of implicit dimension width x height. That is, this image-stream does NOT contain any headers describing the image dimension. |
143 | | // Once the image-stream is decoded into ARGB color values, following the process described in the lossless format specification, |
144 | | // the transparency information must be extracted from the green channel of the ARGB quadruplet." |
145 | 86 | VP8LHeader vp8l_header { static_cast<u16>(bitmap.width()), static_cast<u16>(bitmap.height()), /*is_alpha_used=*/false, alpha_data }; |
146 | 86 | auto lossless_bitmap = TRY(decode_webp_chunk_VP8L_contents(vp8l_header)); |
147 | | |
148 | 78 | if (pixel_count != static_cast<size_t>(lossless_bitmap->width() * lossless_bitmap->height())) |
149 | 0 | return Error::from_string_literal("WebPImageDecoderPlugin: decompressed ALPH dimensions don't match VP8 dimensions"); |
150 | | |
151 | 3.28M | for (size_t i = 0; i < pixel_count; ++i) |
152 | 3.28M | alpha[i] = (lossless_bitmap->begin()[i] & 0xff00) >> 8; |
153 | 78 | } |
154 | | |
155 | | // "For each pixel, filtering is performed using the following calculations. Assume the alpha values surrounding the current X position are labeled as: |
156 | | // |
157 | | // C | B | |
158 | | // ---+---+ |
159 | | // A | X | |
160 | | // [...] |
161 | | // |
162 | | // The final value is derived by adding the decompressed value X to the predictor and using modulo-256 arithmetic" |
163 | 152 | switch (filtering_method) { |
164 | 8 | case 0: |
165 | | // "Method 0: predictor = 0" |
166 | | // Nothing to do. |
167 | 8 | break; |
168 | | |
169 | 45 | case 1: |
170 | | // "Method 1: predictor = A" |
171 | | // "The top-left value at location (0, 0) uses 0 as predictor value. Otherwise, |
172 | | // For horizontal or gradient filtering methods, the left-most pixels at location (0, y) are predicted using the location (0, y-1) just above." |
173 | 14.9k | for (int y = 1; y < bitmap.height(); ++y) |
174 | 14.8k | alpha[y * bitmap.width()] += alpha[(y - 1) * bitmap.width()]; |
175 | 14.9k | for (int y = 0; y < bitmap.height(); ++y) { |
176 | 1.23M | for (int x = 1; x < bitmap.width(); ++x) { |
177 | 1.21M | u8 A = alpha[y * bitmap.width() + (x - 1)]; |
178 | 1.21M | alpha[y * bitmap.width() + x] += A; |
179 | 1.21M | } |
180 | 14.9k | } |
181 | 45 | break; |
182 | | |
183 | 42 | case 2: |
184 | | // "Method 2: predictor = B" |
185 | | // "The top-left value at location (0, 0) uses 0 as predictor value. Otherwise, |
186 | | // For vertical or gradient filtering methods, the top-most pixels at location (x, 0) are predicted using the location (x-1, 0) on the left." |
187 | 21.0k | for (int x = 1; x < bitmap.width(); ++x) |
188 | 21.0k | alpha[x] += alpha[x - 1]; |
189 | 7.84k | for (int y = 1; y < bitmap.height(); ++y) { |
190 | 609k | for (int x = 0; x < bitmap.width(); ++x) { |
191 | 602k | u8 B = alpha[(y - 1) * bitmap.width() + x]; |
192 | 602k | alpha[y * bitmap.width() + x] += B; |
193 | 602k | } |
194 | 7.80k | } |
195 | 42 | break; |
196 | | |
197 | 57 | case 3: |
198 | | // "Method 3: predictor = clip(A + B - C)" |
199 | | // where clip(v) is equal to: |
200 | | // * 0 if v < 0 |
201 | | // * 255 if v > 255 |
202 | | // * v otherwise" |
203 | | // "The top-left value at location (0, 0) uses 0 as predictor value. Otherwise, |
204 | | // For horizontal or gradient filtering methods, the left-most pixels at location (0, y) are predicted using the location (0, y-1) just above. |
205 | | // For vertical or gradient filtering methods, the top-most pixels at location (x, 0) are predicted using the location (x-1, 0) on the left." |
206 | 23.5k | for (int x = 1; x < bitmap.width(); ++x) |
207 | 23.5k | alpha[x] += alpha[x - 1]; |
208 | 11.8k | for (int y = 1; y < bitmap.height(); ++y) |
209 | 11.7k | alpha[y * bitmap.width()] += alpha[(y - 1) * bitmap.width()]; |
210 | 11.8k | for (int y = 1; y < bitmap.height(); ++y) { |
211 | 757k | for (int x = 1; x < bitmap.width(); ++x) { |
212 | 745k | u8 A = alpha[y * bitmap.width() + (x - 1)]; |
213 | 745k | u8 B = alpha[(y - 1) * bitmap.width() + x]; |
214 | 745k | u8 C = alpha[(y - 1) * bitmap.width() + (x - 1)]; |
215 | 745k | alpha[y * bitmap.width() + x] += clamp(A + B - C, 0, 255); |
216 | 745k | } |
217 | 11.7k | } |
218 | 57 | break; |
219 | | |
220 | 0 | default: |
221 | 0 | return Error::from_string_literal("WebPImageDecoderPlugin: uncompressed ALPH invalid filtering method"); |
222 | 152 | } |
223 | | |
224 | 3.29M | for (size_t i = 0; i < pixel_count; ++i) |
225 | 3.29M | bitmap.begin()[i] = alpha[i] << 24 | (bitmap.begin()[i] & 0xffffff); |
226 | | |
227 | 152 | return {}; |
228 | 152 | } |
229 | | |
230 | | static ErrorOr<VP8XHeader> decode_webp_chunk_VP8X(RIFF::Chunk const& vp8x_chunk) |
231 | 624 | { |
232 | 624 | VERIFY(vp8x_chunk.id() == "VP8X"sv); |
233 | | |
234 | | // The VP8X chunk is documented at "Extended WebP file header:" at the end of |
235 | | // https://developers.google.com/speed/webp/docs/riff_container#extended_file_format |
236 | 624 | if (vp8x_chunk.size() < 10) |
237 | 10 | return Error::from_string_literal("WebPImageDecoderPlugin: VP8X chunk too small"); |
238 | | |
239 | | // 1 byte flags |
240 | | // "Reserved (Rsv): 2 bits MUST be 0. Readers MUST ignore this field. |
241 | | // ICC profile (I): 1 bit Set if the file contains an ICC profile. |
242 | | // Alpha (L): 1 bit Set if any of the frames of the image contain transparency information ("alpha"). |
243 | | // Exif metadata (E): 1 bit Set if the file contains Exif metadata. |
244 | | // XMP metadata (X): 1 bit Set if the file contains XMP metadata. |
245 | | // Animation (A): 1 bit Set if this is an animated image. Data in 'ANIM' and 'ANMF' chunks should be used to control the animation. |
246 | | // Reserved (R): 1 bit MUST be 0. Readers MUST ignore this field." |
247 | 614 | u8 flags = vp8x_chunk[0]; |
248 | 614 | bool has_icc = flags & 0x20; |
249 | 614 | bool has_alpha = flags & 0x10; |
250 | 614 | bool has_exif = flags & 0x8; |
251 | 614 | bool has_xmp = flags & 0x4; |
252 | 614 | bool has_animation = flags & 0x2; |
253 | | |
254 | | // 3 bytes reserved |
255 | | // 3 bytes width minus one |
256 | 614 | u32 width = (vp8x_chunk[4] | (vp8x_chunk[5] << 8) | (vp8x_chunk[6] << 16)) + 1; |
257 | | |
258 | | // 3 bytes height minus one |
259 | 614 | u32 height = (vp8x_chunk[7] | (vp8x_chunk[8] << 8) | (vp8x_chunk[9] << 16)) + 1; |
260 | | |
261 | 614 | dbgln_if(WEBP_DEBUG, "VP8X: flags {:#x} --{}{}{}{}{}{}, width {}, height {}", |
262 | 614 | flags, |
263 | 614 | has_icc ? " icc" : "", |
264 | 614 | has_alpha ? " alpha" : "", |
265 | 614 | has_exif ? " exif" : "", |
266 | 614 | has_xmp ? " xmp" : "", |
267 | 614 | has_animation ? " anim" : "", |
268 | 614 | (flags & 0x3e) == 0 ? " none" : "", |
269 | 614 | width, height); |
270 | | |
271 | 614 | return VP8XHeader { has_icc, has_alpha, has_exif, has_xmp, has_animation, width, height }; |
272 | 624 | } |
273 | | |
274 | | // https://developers.google.com/speed/webp/docs/riff_container#animation |
275 | | static ErrorOr<ANIMChunk> decode_webp_chunk_ANIM(RIFF::Chunk const& anim_chunk) |
276 | 253 | { |
277 | 253 | VERIFY(anim_chunk.id() == "ANIM"sv); |
278 | 253 | if (anim_chunk.size() < 6) |
279 | 3 | return Error::from_string_literal("WebPImageDecoderPlugin: ANIM chunk too small"); |
280 | | |
281 | 250 | u32 background_color = (u32)anim_chunk[0] | ((u32)anim_chunk[1] << 8) | ((u32)anim_chunk[2] << 16) | ((u32)anim_chunk[3] << 24); |
282 | 250 | u16 loop_count = anim_chunk[4] | (anim_chunk[5] << 8); |
283 | | |
284 | 250 | dbgln_if(WEBP_DEBUG, "ANIM: background_color {:x} loop_count {}", background_color, loop_count); |
285 | | |
286 | 250 | return ANIMChunk { background_color, loop_count }; |
287 | 253 | } |
288 | | |
289 | | // https://developers.google.com/speed/webp/docs/riff_container#animation |
290 | | static ErrorOr<ANMFChunk> decode_webp_chunk_ANMF(WebPLoadingContext& context, RIFF::Chunk const& anmf_chunk) |
291 | 274 | { |
292 | 274 | VERIFY(anmf_chunk.id() == "ANMF"sv); |
293 | 274 | if (anmf_chunk.size() < 16) |
294 | 2 | return Error::from_string_literal("WebPImageDecoderPlugin: ANMF chunk too small"); |
295 | | |
296 | | // "The X coordinate of the upper left corner of the frame is Frame X * 2." |
297 | 272 | u32 frame_x = ((u32)anmf_chunk[0] | ((u32)anmf_chunk[1] << 8) | ((u32)anmf_chunk[2] << 16)) * 2; |
298 | | |
299 | | // "The Y coordinate of the upper left corner of the frame is Frame Y * 2." |
300 | 272 | u32 frame_y = ((u32)anmf_chunk[3] | ((u32)anmf_chunk[4] << 8) | ((u32)anmf_chunk[5] << 16)) * 2; |
301 | | |
302 | | // "The frame width is 1 + Frame Width Minus One." |
303 | 272 | u32 frame_width = ((u32)anmf_chunk[6] | ((u32)anmf_chunk[7] << 8) | ((u32)anmf_chunk[8] << 16)) + 1; |
304 | | |
305 | | // "The frame height is 1 + Frame Height Minus One." |
306 | 272 | u32 frame_height = ((u32)anmf_chunk[9] | ((u32)anmf_chunk[10] << 8) | ((u32)anmf_chunk[11] << 16)) + 1; |
307 | | |
308 | | // "The time to wait before displaying the next frame, in 1 millisecond units. |
309 | | // Note the interpretation of frame duration of 0 (and often <= 10) is implementation defined. |
310 | | // Many tools and browsers assign a minimum duration similar to GIF." |
311 | 272 | u32 frame_duration = (u32)anmf_chunk[12] | ((u32)anmf_chunk[13] << 8) | ((u32)anmf_chunk[14] << 16); |
312 | | |
313 | 272 | u8 flags = anmf_chunk[15]; |
314 | 272 | auto blending_method = static_cast<ANMFChunkHeader::BlendingMethod>((flags >> 1) & 1); |
315 | 272 | auto disposal_method = static_cast<ANMFChunkHeader::DisposalMethod>(flags & 1); |
316 | | |
317 | 272 | dbgln_if(WEBP_DEBUG, "ANMF: frame_x {} frame_y {} frame_width {} frame_height {} frame_duration {} blending_method {} disposal_method {}", |
318 | 272 | frame_x, frame_y, frame_width, frame_height, frame_duration, (int)blending_method, (int)disposal_method); |
319 | | |
320 | | // https://developers.google.com/speed/webp/docs/riff_container#assembling_the_canvas_from_frames |
321 | | // "assert VP8X.canvasWidth >= frame_right |
322 | | // assert VP8X.canvasHeight >= frame_bottom" |
323 | 272 | VERIFY(context.first_chunk->id() == "VP8X"sv); |
324 | 272 | if (frame_x + frame_width > context.vp8x_header.width || frame_y + frame_height > context.vp8x_header.height) |
325 | 52 | return Error::from_string_literal("WebPImageDecoderPlugin: ANMF dimensions out of bounds"); |
326 | | |
327 | 220 | auto header = ANMFChunkHeader { frame_x, frame_y, frame_width, frame_height, frame_duration, blending_method, disposal_method }; |
328 | 220 | ReadonlyBytes frame_data = anmf_chunk.data().slice(16); |
329 | 220 | return ANMFChunk { header, frame_data }; |
330 | 272 | } |
331 | | |
332 | | static ErrorOr<ImageData> decode_webp_set_image_data(Optional<RIFF::Chunk> alpha, Optional<RIFF::Chunk> image_data) |
333 | 2.26k | { |
334 | 2.26k | if (!image_data.has_value()) |
335 | 26 | return Error::from_string_literal("WebPImageDecoderPlugin: missing image data"); |
336 | | |
337 | | // https://developers.google.com/speed/webp/docs/riff_container#alpha |
338 | | // "A frame containing a 'VP8L' chunk SHOULD NOT contain this chunk." |
339 | 2.24k | if (alpha.has_value() && image_data->id() == "VP8L"sv) { |
340 | 2 | dbgln_if(WEBP_DEBUG, "WebPImageDecoderPlugin: VP8L frames should not have ALPH chunks. Ignoring ALPH chunk."); |
341 | 2 | alpha.clear(); |
342 | 2 | } |
343 | | |
344 | 2.24k | return ImageData { move(alpha), image_data.value() }; |
345 | 2.26k | } |
346 | | |
347 | | // https://developers.google.com/speed/webp/docs/riff_container#extended_file_format |
348 | | static ErrorOr<void> decode_webp_extended(WebPLoadingContext& context, ReadonlyBytes chunks) |
349 | 614 | { |
350 | 614 | VERIFY(context.first_chunk->id() == "VP8X"sv); |
351 | | |
352 | 614 | Optional<RIFF::Chunk> alpha, image_data; |
353 | | |
354 | | // FIXME: This isn't quite to spec, which says |
355 | | // "All chunks SHOULD be placed in the same order as listed above. |
356 | | // If a chunk appears in the wrong place, the file is invalid, but readers MAY parse the file, ignoring the chunks that are out of order." |
357 | 2.11k | auto store = [](auto& field, RIFF::Chunk const& chunk) { |
358 | 2.11k | if (!field.has_value()) |
359 | 855 | field = chunk; |
360 | 2.11k | }; |
361 | 10.7k | while (!chunks.is_empty()) { |
362 | 10.2k | auto chunk = TRY(RIFF::Chunk::decode_and_advance(chunks)); |
363 | | |
364 | 10.0k | if (chunk.id() == "ICCP"sv) |
365 | 217 | store(context.iccp_chunk, chunk); |
366 | 9.87k | else if (chunk.id() == "ALPH"sv) |
367 | 247 | store(alpha, chunk); |
368 | 9.62k | else if (chunk.id() == "ANIM"sv) |
369 | 1.30k | store(context.animation_header_chunk, chunk); |
370 | 8.32k | else if (chunk.id() == "ANMF"sv) |
371 | 940 | TRY(context.animation_frame_chunks.try_append(chunk)); |
372 | 7.38k | else if (chunk.id() == "EXIF"sv) |
373 | 41 | store(context.exif_chunk, chunk); |
374 | 7.34k | else if (chunk.id() == "XMP "sv) |
375 | 29 | store(context.xmp_chunk, chunk); |
376 | 7.31k | else if (chunk.id() == "VP8 "sv || chunk.id() == "VP8L"sv) |
377 | 280 | store(image_data, chunk); |
378 | 10.0k | } |
379 | | |
380 | | // Validate chunks. |
381 | | |
382 | | // https://developers.google.com/speed/webp/docs/riff_container#animation |
383 | | // "ANIM Chunk: [...] This chunk MUST appear if the Animation flag in the VP8X chunk is set. If the Animation flag is not set and this chunk is present, it MUST be ignored." |
384 | 614 | if (context.vp8x_header.has_animation && !context.animation_header_chunk.has_value()) |
385 | 14 | return Error::from_string_literal("WebPImageDecoderPlugin: Header claims animation, but no ANIM chunk"); |
386 | 476 | if (!context.vp8x_header.has_animation && context.animation_header_chunk.has_value()) { |
387 | 42 | dbgln_if(WEBP_DEBUG, "WebPImageDecoderPlugin: Header claims no animation, but ANIM chunk present. Ignoring ANIM chunk."); |
388 | 42 | context.animation_header_chunk.clear(); |
389 | 42 | } |
390 | | |
391 | | // "ANMF Chunk: [...] If the Animation flag is not set, then this chunk SHOULD NOT be present." |
392 | 476 | if (!context.vp8x_header.has_animation && !context.animation_frame_chunks.is_empty()) { |
393 | 28 | dbgln_if(WEBP_DEBUG, "WebPImageDecoderPlugin: Header claims no animation, but ANMF chunks present. Ignoring ANMF chunks."); |
394 | 28 | context.animation_frame_chunks.clear(); |
395 | 28 | } |
396 | | |
397 | | // Image data is not optional -- but the spec doesn't explicitly say that an animated image must have more than 0 frames. |
398 | | // The spec also doesn't say that animated images must not contain a regular image data segment. |
399 | 476 | if (!context.vp8x_header.has_animation || image_data.has_value()) |
400 | 227 | context.image_data = TRY(decode_webp_set_image_data(move(alpha), move(image_data))); |
401 | | |
402 | | // https://developers.google.com/speed/webp/docs/riff_container#color_profile |
403 | | // "This chunk MUST appear before the image data." |
404 | 476 | if (context.iccp_chunk.has_value() |
405 | 95 | && ((context.image_data.has_value() |
406 | 4 | && (context.iccp_chunk->data().data() > context.image_data->image_data_chunk.data().data() |
407 | 3 | || (context.image_data->alpha_chunk.has_value() && context.iccp_chunk->data().data() > context.image_data->alpha_chunk->data().data()))) |
408 | 93 | || (!context.animation_frame_chunks.is_empty() && context.iccp_chunk->data().data() > context.animation_frame_chunks[0].data().data()))) { |
409 | 3 | return Error::from_string_literal("WebPImageDecoderPlugin: ICCP chunk is after image data"); |
410 | 3 | } |
411 | | |
412 | 455 | if (context.iccp_chunk.has_value() && !context.vp8x_header.has_icc) |
413 | 3 | return Error::from_string_literal("WebPImageDecoderPlugin: ICCP chunk present, but VP8X header claims no ICC profile"); |
414 | 452 | if (!context.iccp_chunk.has_value() && context.vp8x_header.has_icc) |
415 | 4 | return Error::from_string_literal("WebPImageDecoderPlugin: VP8X header claims ICC profile, but no ICCP chunk present"); |
416 | | |
417 | 448 | context.state = WebPLoadingContext::State::ChunksDecoded; |
418 | 448 | return {}; |
419 | 452 | } |
420 | | |
421 | | static ErrorOr<void> read_webp_first_chunk(WebPLoadingContext& context) |
422 | 2.60k | { |
423 | 2.60k | if (context.state >= WebPLoadingContext::State::FirstChunkRead) |
424 | 0 | return {}; |
425 | | |
426 | 2.60k | context.chunks_cursor = context.data.slice(sizeof(RIFF::FileHeader)); |
427 | 2.60k | auto first_chunk = TRY(RIFF::Chunk::decode_and_advance(context.chunks_cursor)); |
428 | | |
429 | 2.52k | if (first_chunk.id() != "VP8 "sv && first_chunk.id() != "VP8L"sv && first_chunk.id() != "VP8X"sv) |
430 | 12 | return Error::from_string_literal("WebPImageDecoderPlugin: Invalid first chunk type"); |
431 | | |
432 | 2.51k | context.first_chunk = first_chunk; |
433 | 2.51k | context.state = WebPLoadingContext::State::FirstChunkRead; |
434 | | |
435 | 2.51k | if (first_chunk.id() == "VP8 "sv || first_chunk.id() == "VP8L"sv) |
436 | 1.88k | context.image_data = TRY(decode_webp_set_image_data(OptionalNone {}, first_chunk)); |
437 | | |
438 | 2.51k | return {}; |
439 | 2.51k | } |
440 | | |
441 | | static ErrorOr<void> decode_webp_first_chunk(WebPLoadingContext& context) |
442 | 2.60k | { |
443 | 2.60k | if (context.state >= WebPLoadingContext::State::FirstChunkDecoded) |
444 | 0 | return {}; |
445 | | |
446 | 2.60k | if (context.state < WebPLoadingContext::FirstChunkRead) |
447 | 2.60k | TRY(read_webp_first_chunk(context)); |
448 | | |
449 | 2.60k | if (context.first_chunk->id() == "VP8 "sv) { |
450 | 768 | auto vp8_header = TRY(decode_webp_chunk_VP8_header(context.first_chunk->data())); |
451 | 702 | context.size = IntSize { vp8_header.width, vp8_header.height }; |
452 | 702 | context.state = WebPLoadingContext::State::FirstChunkDecoded; |
453 | 702 | return {}; |
454 | 768 | } |
455 | 1.74k | if (context.first_chunk->id() == "VP8L"sv) { |
456 | 1.12k | auto vp8l_header = TRY(decode_webp_chunk_VP8L_header(context.first_chunk->data())); |
457 | 1.10k | context.size = IntSize { vp8l_header.width, vp8l_header.height }; |
458 | 1.10k | context.state = WebPLoadingContext::State::FirstChunkDecoded; |
459 | 1.10k | return {}; |
460 | 1.12k | } |
461 | 624 | VERIFY(context.first_chunk->id() == "VP8X"sv); |
462 | 624 | context.vp8x_header = TRY(decode_webp_chunk_VP8X(context.first_chunk.value())); |
463 | 614 | context.size = IntSize { context.vp8x_header.width, context.vp8x_header.height }; |
464 | 614 | context.state = WebPLoadingContext::State::FirstChunkDecoded; |
465 | 614 | return {}; |
466 | 624 | } |
467 | | |
468 | | static ErrorOr<void> decode_webp_chunks(WebPLoadingContext& context) |
469 | 2.41k | { |
470 | 2.41k | if (context.state >= WebPLoadingContext::State::ChunksDecoded) |
471 | 0 | return {}; |
472 | | |
473 | 2.41k | VERIFY(context.state >= WebPLoadingContext::FirstChunkDecoded); |
474 | | |
475 | 2.41k | if (context.first_chunk->id() == "VP8X"sv) |
476 | 614 | return decode_webp_extended(context, context.chunks_cursor); |
477 | | |
478 | 1.80k | context.state = WebPLoadingContext::State::ChunksDecoded; |
479 | 1.80k | return {}; |
480 | 2.41k | } |
481 | | |
482 | | static ErrorOr<void> decode_webp_animation_frame_chunks(WebPLoadingContext& context) |
483 | 253 | { |
484 | 253 | if (context.state >= WebPLoadingContext::State::AnimationFrameChunksDecoded) |
485 | 0 | return {}; |
486 | | |
487 | 253 | VERIFY(context.state == WebPLoadingContext::State::ChunksDecoded); |
488 | | |
489 | 253 | context.animation_header_chunk_data = TRY(decode_webp_chunk_ANIM(context.animation_header_chunk.value())); |
490 | | |
491 | 250 | Vector<ANMFChunk> decoded_chunks; |
492 | 250 | TRY(decoded_chunks.try_ensure_capacity(context.animation_frame_chunks.size())); |
493 | 250 | for (auto const& chunk : context.animation_frame_chunks) |
494 | 274 | TRY(decoded_chunks.try_append(TRY(decode_webp_chunk_ANMF(context, chunk)))); |
495 | 250 | context.animation_frame_chunks_data = move(decoded_chunks); |
496 | | |
497 | 196 | context.state = WebPLoadingContext::State::AnimationFrameChunksDecoded; |
498 | 196 | return {}; |
499 | 250 | } |
500 | | |
501 | | static ErrorOr<ImageData> decode_webp_animation_frame_image_data(ANMFChunk const& frame) |
502 | 158 | { |
503 | 158 | ReadonlyBytes chunks = frame.frame_data; |
504 | 158 | auto chunk = TRY(RIFF::Chunk::decode_and_advance(chunks)); |
505 | | |
506 | 151 | Optional<RIFF::Chunk> alpha, image_data; |
507 | 151 | if (chunk.id() == "ALPH"sv) { |
508 | 27 | alpha = chunk; |
509 | 27 | chunk = TRY(RIFF::Chunk::decode_and_advance(chunks)); |
510 | 26 | } |
511 | 151 | if (chunk.id() == "VP8 "sv || chunk.id() == "VP8L"sv) |
512 | 142 | image_data = chunk; |
513 | | |
514 | 150 | return decode_webp_set_image_data(move(alpha), move(image_data)); |
515 | 151 | } |
516 | | |
517 | | static ErrorOr<NonnullRefPtr<Bitmap>> decode_webp_image_data(WebPLoadingContext& context, ImageData const& image_data) |
518 | 2.13k | { |
519 | 2.13k | if (image_data.image_data_chunk.id() == "VP8L"sv) { |
520 | 1.25k | VERIFY(!image_data.alpha_chunk.has_value()); |
521 | 1.25k | auto vp8l_header = TRY(decode_webp_chunk_VP8L_header(image_data.image_data_chunk.data())); |
522 | | |
523 | | // Check that the VP8X header alpha flag matches the VP8L header alpha flag. |
524 | | // FIXME: For animated images, if VP8X has alpha then at least one frame should have alpha. But we currently don't check this for animations. |
525 | 1.24k | if (context.first_chunk->id() == "VP8X" && !context.animation_frame_chunks_data.has_value() && context.vp8x_header.has_alpha != vp8l_header.is_alpha_used) |
526 | 1 | return Error::from_string_literal("WebPImageDecoderPlugin: VP8X header alpha flag doesn't match VP8L header"); |
527 | | |
528 | 1.24k | return decode_webp_chunk_VP8L_contents(vp8l_header); |
529 | 1.24k | } |
530 | | |
531 | 885 | VERIFY(image_data.image_data_chunk.id() == "VP8 "sv); |
532 | 885 | auto vp8_header = TRY(decode_webp_chunk_VP8_header(image_data.image_data_chunk.data())); |
533 | 882 | auto bitmap = TRY(decode_webp_chunk_VP8_contents(vp8_header, image_data.alpha_chunk.has_value())); |
534 | | |
535 | 234 | if (image_data.alpha_chunk.has_value()) |
536 | 166 | TRY(decode_webp_chunk_ALPH(image_data.alpha_chunk.value(), *bitmap)); |
537 | | |
538 | 234 | return bitmap; |
539 | 234 | } |
540 | | |
541 | | // https://developers.google.com/speed/webp/docs/riff_container#canvas_assembly_from_frames |
542 | | static ErrorOr<ImageFrameDescriptor> decode_webp_animation_frame(WebPLoadingContext& context, size_t frame_index) |
543 | 196 | { |
544 | 196 | if (frame_index >= context.animation_frame_chunks_data->size()) |
545 | 0 | return Error::from_string_literal("frame_index size too high"); |
546 | | |
547 | 196 | VERIFY(context.first_chunk->id() == "VP8X"sv); |
548 | 196 | VERIFY(context.vp8x_header.has_animation); |
549 | | |
550 | | // The spec says |
551 | | // "canvas ← new image of size VP8X.canvasWidth x VP8X.canvasHeight with |
552 | | // background color ANIM.background_color." |
553 | | // But: |
554 | | // * libwebp always fills with transparent black (#00000000) |
555 | | // * some images (e.g. images written by Aseprite) set the background color to fully opaque white |
556 | | // These images then end up with a nice transparent background in libwebp-based decoders (i.e. basically everywhere) |
557 | | // but show a silly opaque border in ours. So don't use context.animation_header_chunk_data->background_color here. |
558 | 196 | Color clear_color(Color::Transparent); |
559 | | |
560 | 196 | size_t start_frame = context.current_frame + 1; |
561 | 196 | dbgln_if(WEBP_DEBUG, "start_frame {} context.current_frame {}", start_frame, context.current_frame); |
562 | 196 | if (context.state < WebPLoadingContext::State::BitmapDecoded) { |
563 | 196 | start_frame = 0; |
564 | 196 | auto format = context.vp8x_header.has_alpha ? BitmapFormat::BGRA8888 : BitmapFormat::BGRx8888; |
565 | 196 | context.bitmap = TRY(Bitmap::create(format, { context.vp8x_header.width, context.vp8x_header.height })); |
566 | 158 | if (clear_color != Color(Color::Transparent)) // Bitmaps start out transparent, so only fill if not transparent. |
567 | 0 | context.bitmap->fill(clear_color); |
568 | 158 | } else if (frame_index < context.current_frame) { |
569 | 0 | start_frame = 0; |
570 | 0 | } |
571 | | |
572 | 196 | Painter painter(*context.bitmap); |
573 | | |
574 | 241 | for (size_t i = start_frame; i <= frame_index; ++i) { |
575 | 158 | dbgln_if(WEBP_DEBUG, "drawing frame {} to produce frame {}", i, frame_index); |
576 | | |
577 | 158 | auto const& frame = context.animation_frame_chunks_data.value()[i]; |
578 | 158 | auto const& frame_description = frame.header; |
579 | | |
580 | 158 | if (i > 0) { |
581 | 0 | auto const& previous_frame = context.animation_frame_chunks_data.value()[i - 1].header; |
582 | 0 | if (previous_frame.disposal_method == ANMFChunkHeader::DisposalMethod::DisposeToBackgroundColor) |
583 | 0 | painter.clear_rect({ previous_frame.frame_x, previous_frame.frame_y, previous_frame.frame_width, previous_frame.frame_height }, clear_color); |
584 | 0 | } |
585 | | |
586 | 158 | auto frame_image_data = TRY(decode_webp_animation_frame_image_data(frame)); |
587 | 142 | auto frame_bitmap = TRY(decode_webp_image_data(context, frame_image_data)); |
588 | 135 | if (static_cast<u32>(frame_bitmap->width()) != frame_description.frame_width || static_cast<u32>(frame_bitmap->height()) != frame_description.frame_height) |
589 | 52 | return Error::from_string_literal("WebPImageDecoderPlugin: decoded frame bitmap size doesn't match frame description size"); |
590 | | |
591 | | // FIXME: "Alpha-blending SHOULD be done in linear color space..." |
592 | 83 | bool apply_alpha = frame_description.blending_method == ANMFChunkHeader::BlendingMethod::UseAlphaBlending; |
593 | 83 | painter.blit({ frame_description.frame_x, frame_description.frame_y }, *frame_bitmap, { {}, frame_bitmap->size() }, /*opacity=*/1.0, apply_alpha); |
594 | | |
595 | 83 | context.current_frame = i; |
596 | 83 | context.state = WebPLoadingContext::State::BitmapDecoded; |
597 | 83 | } |
598 | | |
599 | 83 | return ImageFrameDescriptor { context.bitmap, static_cast<int>(context.animation_frame_chunks_data.value()[frame_index].header.frame_duration_in_milliseconds) }; |
600 | 158 | } |
601 | | |
602 | | WebPImageDecoderPlugin::WebPImageDecoderPlugin(ReadonlyBytes data, OwnPtr<WebPLoadingContext> context) |
603 | 2.70k | : m_context(move(context)) |
604 | 2.70k | { |
605 | 2.70k | m_context->data = data; |
606 | 2.70k | } |
607 | | |
608 | 2.70k | WebPImageDecoderPlugin::~WebPImageDecoderPlugin() = default; |
609 | | |
610 | | bool WebPImageDecoderPlugin::set_error(ErrorOr<void> const& error_or) |
611 | 319 | { |
612 | 319 | if (error_or.is_error()) { |
613 | 62 | dbgln("WebPLoadingContext error: {}", error_or.error()); |
614 | 62 | m_context->state = WebPLoadingContext::State::Error; |
615 | 62 | return true; |
616 | 62 | } |
617 | 257 | return false; |
618 | 319 | } |
619 | | |
620 | | IntSize WebPImageDecoderPlugin::size() |
621 | 0 | { |
622 | 0 | return m_context->size.value(); |
623 | 0 | } |
624 | | |
625 | | bool WebPImageDecoderPlugin::sniff(ReadonlyBytes data) |
626 | 0 | { |
627 | 0 | WebPLoadingContext context; |
628 | 0 | context.data = data; |
629 | | |
630 | | // Intentionally no set_error() call: We're just sniffing `data` passed to the function, not reading m_context->data. |
631 | 0 | return !decode_webp_header(context).is_error(); |
632 | 0 | } |
633 | | |
634 | | ErrorOr<NonnullOwnPtr<ImageDecoderPlugin>> WebPImageDecoderPlugin::create(ReadonlyBytes data) |
635 | 2.70k | { |
636 | 2.70k | auto context = TRY(try_make<WebPLoadingContext>()); |
637 | 2.70k | auto plugin = TRY(adopt_nonnull_own_or_enomem(new (nothrow) WebPImageDecoderPlugin(data, move(context)))); |
638 | 2.70k | TRY(decode_webp_header(*plugin->m_context)); |
639 | 2.60k | TRY(decode_webp_first_chunk(*plugin->m_context)); |
640 | 2.41k | return plugin; |
641 | 2.60k | } |
642 | | |
643 | | bool WebPImageDecoderPlugin::is_animated() |
644 | 4.66k | { |
645 | 4.66k | return m_context->first_chunk->id() == "VP8X"sv && m_context->vp8x_header.has_animation; |
646 | 4.66k | } |
647 | | |
648 | | size_t WebPImageDecoderPlugin::loop_count() |
649 | 0 | { |
650 | 0 | if (!is_animated()) |
651 | 0 | return 0; |
652 | | |
653 | 0 | if (m_context->state < WebPLoadingContext::State::ChunksDecoded) { |
654 | 0 | if (set_error(decode_webp_chunks(*m_context))) |
655 | 0 | return 0; |
656 | 0 | } |
657 | | |
658 | 0 | if (m_context->state < WebPLoadingContext::State::AnimationFrameChunksDecoded) { |
659 | 0 | if (set_error(decode_webp_animation_frame_chunks(*m_context))) |
660 | 0 | return 0; |
661 | 0 | } |
662 | | |
663 | 0 | return m_context->animation_header_chunk_data->loop_count; |
664 | 0 | } |
665 | | |
666 | | size_t WebPImageDecoderPlugin::frame_count() |
667 | 2.41k | { |
668 | 2.41k | if (!is_animated()) |
669 | 2.09k | return 1; |
670 | | |
671 | 319 | if (m_context->state < WebPLoadingContext::State::ChunksDecoded) { |
672 | 319 | if (set_error(decode_webp_chunks(*m_context))) |
673 | 62 | return 1; |
674 | 319 | } |
675 | | |
676 | 257 | return m_context->animation_frame_chunks.size(); |
677 | 319 | } |
678 | | |
679 | | size_t WebPImageDecoderPlugin::first_animated_frame_index() |
680 | 0 | { |
681 | 0 | return 0; |
682 | 0 | } |
683 | | |
684 | | ErrorOr<ImageFrameDescriptor> WebPImageDecoderPlugin::frame(size_t index, Optional<IntSize>) |
685 | 2.41k | { |
686 | 2.41k | if (index >= frame_count()) |
687 | 4 | return Error::from_string_literal("WebPImageDecoderPlugin: Invalid frame index"); |
688 | | |
689 | 2.41k | if (m_context->state == WebPLoadingContext::State::Error) |
690 | 62 | return Error::from_string_literal("WebPImageDecoderPlugin: Decoding failed"); |
691 | | |
692 | | // In a lambda so that only one check to set State::Error is needed, instead of one per TRY. |
693 | 2.35k | auto decode_frame = [this](size_t index) -> ErrorOr<ImageFrameDescriptor> { |
694 | 2.35k | if (m_context->state < WebPLoadingContext::State::ChunksDecoded) |
695 | 2.09k | TRY(decode_webp_chunks(*m_context)); |
696 | | |
697 | 2.35k | if (is_animated()) { |
698 | 253 | if (m_context->state < WebPLoadingContext::State::AnimationFrameChunksDecoded) |
699 | 253 | TRY(decode_webp_animation_frame_chunks(*m_context)); |
700 | 253 | return decode_webp_animation_frame(*m_context, index); |
701 | 253 | } |
702 | | |
703 | 1.99k | if (m_context->state < WebPLoadingContext::State::BitmapDecoded) { |
704 | 1.99k | auto bitmap = TRY(decode_webp_image_data(*m_context, m_context->image_data.value())); |
705 | | |
706 | | // Check that size in VP8X chunk matches dimensions in VP8 or VP8L chunk if both are present. |
707 | 596 | if (m_context->first_chunk->id() == "VP8X") { |
708 | 164 | if (static_cast<u32>(bitmap->width()) != m_context->vp8x_header.width || static_cast<u32>(bitmap->height()) != m_context->vp8x_header.height) |
709 | 160 | return Error::from_string_literal("WebPImageDecoderPlugin: VP8X and VP8/VP8L chunks store different dimensions"); |
710 | 164 | } |
711 | | |
712 | 436 | m_context->bitmap = move(bitmap); |
713 | 436 | m_context->state = WebPLoadingContext::State::BitmapDecoded; |
714 | 436 | } |
715 | | |
716 | 436 | VERIFY(m_context->bitmap); |
717 | 436 | return ImageFrameDescriptor { m_context->bitmap, 0 }; |
718 | 436 | }; |
719 | | |
720 | 2.35k | auto result = decode_frame(index); |
721 | 2.35k | if (result.is_error()) { |
722 | 1.83k | m_context->state = WebPLoadingContext::State::Error; |
723 | 1.83k | return result.release_error(); |
724 | 1.83k | } |
725 | 519 | return result.release_value(); |
726 | 2.35k | } |
727 | | |
728 | | ErrorOr<Optional<ReadonlyBytes>> WebPImageDecoderPlugin::icc_data() |
729 | 0 | { |
730 | 0 | if (auto result = decode_webp_chunks(*m_context); result.is_error()) { |
731 | 0 | m_context->state = WebPLoadingContext::State::Error; |
732 | 0 | return result.release_error(); |
733 | 0 | } |
734 | | |
735 | | // FIXME: "If this chunk is not present, sRGB SHOULD be assumed." |
736 | | |
737 | 0 | return m_context->iccp_chunk.map([](auto iccp_chunk) { return iccp_chunk.data(); }); |
738 | 0 | } |
739 | | |
740 | | } |