/rust/registry/src/index.crates.io-1949cf8c6b5b557f/avif-serialize-0.8.6/src/lib.rs
Line | Count | Source |
1 | | //! # AVIF image serializer (muxer) |
2 | | //! |
3 | | //! ## Usage |
4 | | //! |
5 | | //! 1. Compress pixels using an AV1 encoder, such as [rav1e](https://lib.rs/rav1e). [libaom](https://lib.rs/libaom-sys) works too. |
6 | | //! |
7 | | //! 2. Call `avif_serialize::serialize_to_vec(av1_data, None, width, height, 8)` |
8 | | //! |
9 | | //! See [cavif](https://github.com/kornelski/cavif-rs) for a complete implementation. |
10 | | |
11 | | mod boxes; |
12 | | pub mod constants; |
13 | | mod writer; |
14 | | |
15 | | use crate::boxes::*; |
16 | | use arrayvec::ArrayVec; |
17 | | use std::io; |
18 | | |
19 | | /// Config for the serialization (allows setting advanced image properties). |
20 | | /// |
21 | | /// See [`Aviffy::new`]. |
22 | | pub struct Aviffy { |
23 | | premultiplied_alpha: bool, |
24 | | colr: ColrBox, |
25 | | min_seq_profile: u8, |
26 | | chroma_subsampling: (bool, bool), |
27 | | monochrome: bool, |
28 | | width: u32, |
29 | | height: u32, |
30 | | bit_depth: u8, |
31 | | exif: Option<Vec<u8>>, |
32 | | } |
33 | | |
34 | | /// Makes an AVIF file given encoded AV1 data (create the data with [`rav1e`](https://lib.rs/rav1e)) |
35 | | /// |
36 | | /// `color_av1_data` is already-encoded AV1 image data for the color channels (YUV, RGB, etc.). |
37 | | /// [You can parse this information out of AV1 payload with `avif-parse`](https://docs.rs/avif-parse/latest/avif_parse/struct.AV1Metadata.html). |
38 | | /// |
39 | | /// The color image should have been encoded without chroma subsampling AKA YUV444 (`Cs444` in `rav1e`) |
40 | | /// AV1 handles full-res color so effortlessly, you should never need chroma subsampling ever again. |
41 | | /// |
42 | | /// Optional `alpha_av1_data` is a monochrome image (`rav1e` calls it "YUV400"/`Cs400`) representing transparency. |
43 | | /// Alpha adds a lot of header bloat, so don't specify it unless it's necessary. |
44 | | /// |
45 | | /// `width`/`height` is image size in pixels. It must of course match the size of encoded image data. |
46 | | /// `depth_bits` should be 8, 10 or 12, depending on how the image was encoded. |
47 | | /// |
48 | | /// Color and alpha must have the same dimensions and depth. |
49 | | /// |
50 | | /// Data is written (streamed) to `into_output`. |
51 | 0 | pub fn serialize<W: io::Write>(into_output: W, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> io::Result<()> { |
52 | 0 | Aviffy::new() |
53 | 0 | .set_width(width) |
54 | 0 | .set_height(height) |
55 | 0 | .set_bit_depth(depth_bits) |
56 | 0 | .write_slice(into_output, color_av1_data, alpha_av1_data) |
57 | 0 | } |
58 | | |
59 | | impl Aviffy { |
60 | | /// You will have to set image properties to match the AV1 bitstream. |
61 | | /// |
62 | | /// [You can get this information out of the AV1 payload with `avif-parse`](https://docs.rs/avif-parse/latest/avif_parse/struct.AV1Metadata.html). |
63 | | #[inline] |
64 | | #[must_use] |
65 | 0 | pub fn new() -> Self { |
66 | 0 | Self { |
67 | 0 | premultiplied_alpha: false, |
68 | 0 | min_seq_profile: 1, |
69 | 0 | chroma_subsampling: (false, false), |
70 | 0 | monochrome: false, |
71 | 0 | width: 0, |
72 | 0 | height: 0, |
73 | 0 | bit_depth: 0, |
74 | 0 | colr: ColrBox::default(), |
75 | 0 | exif: None, |
76 | 0 | } |
77 | 0 | } Unexecuted instantiation: <avif_serialize::Aviffy>::new Unexecuted instantiation: <avif_serialize::Aviffy>::new Unexecuted instantiation: <avif_serialize::Aviffy>::new |
78 | | |
79 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
80 | | /// Defaults to BT.601, because that's what Safari assumes when `colr` is missing. |
81 | | /// Other browsers are smart enough to read this from the AV1 payload instead. |
82 | | #[inline] |
83 | 0 | pub fn set_matrix_coefficients(&mut self, matrix_coefficients: constants::MatrixCoefficients) -> &mut Self { |
84 | 0 | self.colr.matrix_coefficients = matrix_coefficients; |
85 | 0 | self |
86 | 0 | } |
87 | | |
88 | | #[doc(hidden)] |
89 | 0 | pub fn matrix_coefficients(&mut self, matrix_coefficients: constants::MatrixCoefficients) -> &mut Self { |
90 | 0 | self.set_matrix_coefficients(matrix_coefficients) |
91 | 0 | } |
92 | | |
93 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
94 | | /// Defaults to sRGB. |
95 | | #[inline] |
96 | 0 | pub fn set_transfer_characteristics(&mut self, transfer_characteristics: constants::TransferCharacteristics) -> &mut Self { |
97 | 0 | self.colr.transfer_characteristics = transfer_characteristics; |
98 | 0 | self |
99 | 0 | } |
100 | | |
101 | | #[doc(hidden)] |
102 | 0 | pub fn transfer_characteristics(&mut self, transfer_characteristics: constants::TransferCharacteristics) -> &mut Self { |
103 | 0 | self.set_transfer_characteristics(transfer_characteristics) |
104 | 0 | } |
105 | | |
106 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
107 | | /// Defaults to sRGB/Rec.709. |
108 | | #[inline] |
109 | 0 | pub fn set_color_primaries(&mut self, color_primaries: constants::ColorPrimaries) -> &mut Self { |
110 | 0 | self.colr.color_primaries = color_primaries; |
111 | 0 | self |
112 | 0 | } |
113 | | |
114 | | #[doc(hidden)] |
115 | 0 | pub fn color_primaries(&mut self, color_primaries: constants::ColorPrimaries) -> &mut Self { |
116 | 0 | self.set_color_primaries(color_primaries) |
117 | 0 | } |
118 | | |
119 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
120 | | /// Defaults to full. |
121 | | #[inline] |
122 | 0 | pub fn set_full_color_range(&mut self, full_range: bool) -> &mut Self { |
123 | 0 | self.colr.full_range_flag = full_range; |
124 | 0 | self |
125 | 0 | } |
126 | | |
127 | | #[doc(hidden)] |
128 | 0 | pub fn full_color_range(&mut self, full_range: bool) -> &mut Self { |
129 | 0 | self.set_full_color_range(full_range) |
130 | 0 | } |
131 | | |
132 | | /// Makes an AVIF file given encoded AV1 data (create the data with [`rav1e`](https://lib.rs/rav1e)) |
133 | | /// |
134 | | /// `color_av1_data` is already-encoded AV1 image data for the color channels (YUV, RGB, etc.). |
135 | | /// The color image should have been encoded without chroma subsampling AKA YUV444 (`Cs444` in `rav1e`) |
136 | | /// AV1 handles full-res color so effortlessly, you should never need chroma subsampling ever again. |
137 | | /// |
138 | | /// Optional `alpha_av1_data` is a monochrome image (`rav1e` calls it "YUV400"/`Cs400`) representing transparency. |
139 | | /// Alpha adds a lot of header bloat, so don't specify it unless it's necessary. |
140 | | /// |
141 | | /// `width`/`height` is image size in pixels. It must of course match the size of encoded image data. |
142 | | /// `depth_bits` should be 8, 10 or 12, depending on how the image has been encoded in AV1. |
143 | | /// |
144 | | /// Color and alpha must have the same dimensions and depth. |
145 | | /// |
146 | | /// Data is written (streamed) to `into_output`. |
147 | | #[inline] |
148 | 0 | pub fn write<W: io::Write>(&self, into_output: W, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> io::Result<()> { |
149 | 0 | self.make_boxes(color_av1_data, alpha_av1_data, width, height, depth_bits)?.write(into_output) |
150 | 0 | } |
151 | | |
152 | | /// See [`Self::write`] |
153 | | #[inline] |
154 | 0 | pub fn write_slice<W: io::Write>(&self, into_output: W, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>) -> io::Result<()> { |
155 | 0 | self.make_boxes(color_av1_data, alpha_av1_data, self.width, self.height, self.bit_depth)?.write(into_output) |
156 | 0 | } |
157 | | |
158 | 0 | fn make_boxes<'data>(&'data self, color_av1_data: &'data [u8], alpha_av1_data: Option<&'data [u8]>, width: u32, height: u32, depth_bits: u8) -> io::Result<AvifFile<'data>> { |
159 | 0 | if ![8, 10, 12].contains(&depth_bits) { |
160 | 0 | return Err(io::Error::new(io::ErrorKind::InvalidInput, "depth must be 8/10/12")); |
161 | 0 | } |
162 | | |
163 | 0 | let mut image_items = ArrayVec::new(); |
164 | 0 | let mut iloc_items = ArrayVec::new(); |
165 | 0 | let mut ipma_entries = ArrayVec::new(); |
166 | 0 | let mut irefs = ArrayVec::new(); |
167 | 0 | let mut ipco = IpcoBox::new(); |
168 | 0 | let color_image_id = 1; |
169 | 0 | let alpha_image_id = 2; |
170 | 0 | let exif_id = 3; |
171 | | const ESSENTIAL_BIT: u8 = 0x80; |
172 | 0 | let color_depth_bits = depth_bits; |
173 | 0 | let alpha_depth_bits = depth_bits; // Sadly, the spec requires these to match. |
174 | | |
175 | 0 | image_items.push(InfeBox { |
176 | 0 | id: color_image_id, |
177 | 0 | typ: FourCC(*b"av01"), |
178 | 0 | name: "", |
179 | 0 | }); |
180 | | |
181 | 0 | let ispe_prop = ipco.push(IpcoProp::Ispe(IspeBox { width, height })).ok_or(io::ErrorKind::InvalidInput)?; |
182 | | |
183 | | // This is redundant, but Chrome wants it, and checks that it matches :( |
184 | 0 | let av1c_color_prop = ipco.push(IpcoProp::Av1C(Av1CBox { |
185 | 0 | seq_profile: self.min_seq_profile.max(if color_depth_bits >= 12 { 2 } else { 0 }), |
186 | | seq_level_idx_0: 31, |
187 | | seq_tier_0: false, |
188 | 0 | high_bitdepth: color_depth_bits >= 10, |
189 | 0 | twelve_bit: color_depth_bits >= 12, |
190 | 0 | monochrome: self.monochrome, |
191 | 0 | chroma_subsampling_x: self.chroma_subsampling.0, |
192 | 0 | chroma_subsampling_y: self.chroma_subsampling.1, |
193 | | chroma_sample_position: 0, |
194 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
195 | | |
196 | | // Useless bloat |
197 | 0 | let pixi_3 = ipco.push(IpcoProp::Pixi(PixiBox { |
198 | 0 | channels: 3, |
199 | 0 | depth: color_depth_bits, |
200 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
201 | | |
202 | 0 | let mut ipma = IpmaEntry { |
203 | 0 | item_id: color_image_id, |
204 | 0 | prop_ids: from_array([ispe_prop, av1c_color_prop | ESSENTIAL_BIT, pixi_3]), |
205 | 0 | }; |
206 | | |
207 | | // Redundant info, already in AV1 |
208 | 0 | if self.colr != ColrBox::default() { |
209 | 0 | let colr_color_prop = ipco.push(IpcoProp::Colr(self.colr)).ok_or(io::ErrorKind::InvalidInput)?; |
210 | 0 | ipma.prop_ids.push(colr_color_prop); |
211 | 0 | } |
212 | 0 | ipma_entries.push(ipma); |
213 | | |
214 | 0 | if let Some(exif_data) = self.exif.as_deref() { |
215 | 0 | image_items.push(InfeBox { |
216 | 0 | id: exif_id, |
217 | 0 | typ: FourCC(*b"Exif"), |
218 | 0 | name: "", |
219 | 0 | }); |
220 | 0 |
|
221 | 0 | iloc_items.push(IlocItem { |
222 | 0 | id: exif_id, |
223 | 0 | extents: [IlocExtent { data: exif_data }], |
224 | 0 | }); |
225 | 0 |
|
226 | 0 | irefs.push(IrefEntryBox { |
227 | 0 | from_id: exif_id, |
228 | 0 | to_id: color_image_id, |
229 | 0 | typ: FourCC(*b"cdsc"), |
230 | 0 | }); |
231 | 0 | } |
232 | | |
233 | 0 | if let Some(alpha_data) = alpha_av1_data { |
234 | 0 | image_items.push(InfeBox { |
235 | 0 | id: alpha_image_id, |
236 | 0 | typ: FourCC(*b"av01"), |
237 | 0 | name: "", |
238 | 0 | }); |
239 | | |
240 | 0 | irefs.push(IrefEntryBox { |
241 | 0 | from_id: alpha_image_id, |
242 | 0 | to_id: color_image_id, |
243 | 0 | typ: FourCC(*b"auxl"), |
244 | 0 | }); |
245 | | |
246 | 0 | if self.premultiplied_alpha { |
247 | 0 | irefs.push(IrefEntryBox { |
248 | 0 | from_id: color_image_id, |
249 | 0 | to_id: alpha_image_id, |
250 | 0 | typ: FourCC(*b"prem"), |
251 | 0 | }); |
252 | 0 | } |
253 | | |
254 | 0 | let av1c_alpha_prop = ipco.push(boxes::IpcoProp::Av1C(Av1CBox { |
255 | 0 | seq_profile: if alpha_depth_bits >= 12 { 2 } else { 0 }, |
256 | | seq_level_idx_0: 31, |
257 | | seq_tier_0: false, |
258 | 0 | high_bitdepth: alpha_depth_bits >= 10, |
259 | 0 | twelve_bit: alpha_depth_bits >= 12, |
260 | | monochrome: true, |
261 | | chroma_subsampling_x: true, |
262 | | chroma_subsampling_y: true, |
263 | | chroma_sample_position: 0, |
264 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
265 | | |
266 | | // So pointless |
267 | 0 | let pixi_1 = ipco.push(IpcoProp::Pixi(PixiBox { |
268 | 0 | channels: 1, |
269 | 0 | depth: alpha_depth_bits, |
270 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
271 | | |
272 | | // that's a silly way to add 1 bit of information, isn't it? |
273 | 0 | let auxc_prop = ipco.push(IpcoProp::AuxC(AuxCBox { |
274 | 0 | urn: "urn:mpeg:mpegB:cicp:systems:auxiliary:alpha", |
275 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
276 | | |
277 | 0 | ipma_entries.push(IpmaEntry { |
278 | 0 | item_id: alpha_image_id, |
279 | 0 | prop_ids: from_array([ispe_prop, av1c_alpha_prop | ESSENTIAL_BIT, auxc_prop, pixi_1]), |
280 | 0 | }); |
281 | | |
282 | | // Use interleaved color and alpha, with alpha first. |
283 | | // Makes it possible to display partial image. |
284 | 0 | iloc_items.push(IlocItem { |
285 | 0 | id: alpha_image_id, |
286 | 0 | extents: [IlocExtent { data: alpha_data }], |
287 | 0 | }); |
288 | 0 | } |
289 | 0 | iloc_items.push(IlocItem { |
290 | 0 | id: color_image_id, |
291 | 0 | extents: [IlocExtent { data: color_av1_data }], |
292 | 0 | }); |
293 | | |
294 | 0 | Ok(AvifFile { |
295 | 0 | ftyp: FtypBox { |
296 | 0 | major_brand: FourCC(*b"avif"), |
297 | 0 | minor_version: 0, |
298 | 0 | compatible_brands: [FourCC(*b"mif1"), FourCC(*b"miaf")].into(), |
299 | 0 | }, |
300 | 0 | meta: MetaBox { |
301 | 0 | hdlr: HdlrBox {}, |
302 | 0 | iinf: IinfBox { items: image_items }, |
303 | 0 | pitm: PitmBox(color_image_id), |
304 | 0 | iloc: IlocBox { |
305 | 0 | absolute_offset_start: None, |
306 | 0 | items: iloc_items, |
307 | 0 | }, |
308 | 0 | iprp: IprpBox { |
309 | 0 | ipco, |
310 | 0 | // It's not enough to define these properties, |
311 | 0 | // they must be assigned to the image |
312 | 0 | ipma: IpmaBox { entries: ipma_entries }, |
313 | 0 | }, |
314 | 0 | iref: IrefBox { entries: irefs }, |
315 | 0 | }, |
316 | 0 | // Here's the actual data. If HEIF wasn't such a kitchen sink, this |
317 | 0 | // would have been the only data this file needs. |
318 | 0 | mdat: MdatBox, |
319 | 0 | }) |
320 | 0 | } |
321 | | |
322 | | /// Panics if the input arguments were invalid. Use [`Self::write`] to handle the errors. |
323 | | #[must_use] |
324 | | #[track_caller] |
325 | 0 | pub fn to_vec(&self, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> Vec<u8> { |
326 | 0 | let mut file = self.make_boxes(color_av1_data, alpha_av1_data, width, height, depth_bits).unwrap(); |
327 | 0 | let mut out = Vec::new(); |
328 | 0 | file.write_to_vec(&mut out).unwrap(); |
329 | 0 | out |
330 | 0 | } |
331 | | |
332 | | /// `(false, false)` is 4:4:4 |
333 | | /// `(true, true)` is 4:2:0 |
334 | | /// |
335 | | /// `chroma_sample_position` is always 0. Don't use chroma subsampling with AVIF. |
336 | | #[inline] |
337 | 0 | pub fn set_chroma_subsampling(&mut self, subsampled_xy: (bool, bool)) -> &mut Self { |
338 | 0 | self.chroma_subsampling = subsampled_xy; |
339 | 0 | self |
340 | 0 | } |
341 | | |
342 | | /// Set whether the image is monochrome (grayscale). |
343 | | /// This is used to set the `monochrome` flag in the AV1 sequence header. |
344 | | #[inline] |
345 | 0 | pub fn set_monochrome(&mut self, monochrome: bool) -> &mut Self { |
346 | 0 | self.monochrome = monochrome; |
347 | 0 | self |
348 | 0 | } |
349 | | |
350 | | /// Set exif metadata to be included in the AVIF file as a separate item. |
351 | | #[inline] |
352 | 0 | pub fn set_exif(&mut self, exif: Vec<u8>) -> &mut Self { |
353 | 0 | self.exif = Some(exif); |
354 | 0 | self |
355 | 0 | } |
356 | | |
357 | | /// Sets minimum required |
358 | | /// |
359 | | /// Higher bit depth may increase this |
360 | | #[inline] |
361 | 0 | pub fn set_seq_profile(&mut self, seq_profile: u8) -> &mut Self { |
362 | 0 | self.min_seq_profile = seq_profile; |
363 | 0 | self |
364 | 0 | } |
365 | | |
366 | | #[inline] |
367 | 0 | pub fn set_width(&mut self, width: u32) -> &mut Self { |
368 | 0 | self.width = width; |
369 | 0 | self |
370 | 0 | } |
371 | | |
372 | | #[inline] |
373 | 0 | pub fn set_height(&mut self, height: u32) -> &mut Self { |
374 | 0 | self.height = height; |
375 | 0 | self |
376 | 0 | } |
377 | | |
378 | | /// 8, 10 or 12. |
379 | | #[inline] |
380 | 0 | pub fn set_bit_depth(&mut self, bit_depth: u8) -> &mut Self { |
381 | 0 | self.bit_depth = bit_depth; |
382 | 0 | self |
383 | 0 | } |
384 | | |
385 | | /// Set whether image's colorspace uses premultiplied alpha, i.e. RGB channels were multiplied by their alpha value, |
386 | | /// so that transparent areas are all black. Image decoders will be instructed to undo the premultiplication. |
387 | | /// |
388 | | /// Premultiplied alpha images usually compress better and tolerate heavier compression, but |
389 | | /// may not be supported correctly by less capable AVIF decoders. |
390 | | /// |
391 | | /// This just sets the configuration property. The pixel data must have already been processed before compression. |
392 | | /// If a decoder displays semitransparent colors too dark, it doesn't support premultiplied alpha. |
393 | | /// If a decoder displays semitransparent colors too bright, you didn't premultiply the colors before encoding. |
394 | | /// |
395 | | /// If you're not using premultiplied alpha, consider bleeding RGB colors into transparent areas, |
396 | | /// otherwise there may be unwanted outlines around edges of transparency. |
397 | | #[inline] |
398 | 0 | pub fn set_premultiplied_alpha(&mut self, is_premultiplied: bool) -> &mut Self { |
399 | 0 | self.premultiplied_alpha = is_premultiplied; |
400 | 0 | self |
401 | 0 | } |
402 | | |
403 | | #[doc(hidden)] |
404 | 0 | pub fn premultiplied_alpha(&mut self, is_premultiplied: bool) -> &mut Self { |
405 | 0 | self.set_premultiplied_alpha(is_premultiplied) |
406 | 0 | } |
407 | | } |
408 | | |
409 | | #[inline(always)] |
410 | 0 | fn from_array<const L1: usize, const L2: usize, T: Copy>(array: [T; L1]) -> ArrayVec<T, L2> { |
411 | 0 | assert!(L1 <= L2); |
412 | 0 | let mut tmp = ArrayVec::new_const(); |
413 | 0 | let _ = tmp.try_extend_from_slice(&array); |
414 | 0 | tmp |
415 | 0 | } Unexecuted instantiation: avif_serialize::from_array::<3, 5, u8> Unexecuted instantiation: avif_serialize::from_array::<4, 5, u8> |
416 | | |
417 | | /// See [`serialize`] for description. This one makes a `Vec` instead of using `io::Write`. |
418 | | #[must_use] |
419 | | #[track_caller] |
420 | 0 | pub fn serialize_to_vec(color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> Vec<u8> { |
421 | 0 | Aviffy::new().to_vec(color_av1_data, alpha_av1_data, width, height, depth_bits) |
422 | 0 | } |
423 | | |
424 | | #[test] |
425 | | fn test_roundtrip_parse_mp4() { |
426 | | let test_img = b"av12356abc"; |
427 | | let avif = serialize_to_vec(test_img, None, 10, 20, 8); |
428 | | |
429 | | let ctx = mp4parse::read_avif(&mut avif.as_slice(), mp4parse::ParseStrictness::Normal).unwrap(); |
430 | | |
431 | | assert_eq!(&test_img[..], ctx.primary_item_coded_data().unwrap()); |
432 | | } |
433 | | |
434 | | #[test] |
435 | | fn test_roundtrip_parse_mp4_alpha() { |
436 | | let test_img = b"av12356abc"; |
437 | | let test_a = b"alpha"; |
438 | | let avif = serialize_to_vec(test_img, Some(test_a), 10, 20, 8); |
439 | | |
440 | | let ctx = mp4parse::read_avif(&mut avif.as_slice(), mp4parse::ParseStrictness::Normal).unwrap(); |
441 | | |
442 | | assert_eq!(&test_img[..], ctx.primary_item_coded_data().unwrap()); |
443 | | assert_eq!(&test_a[..], ctx.alpha_item_coded_data().unwrap()); |
444 | | } |
445 | | |
446 | | #[test] |
447 | | fn test_roundtrip_parse_exif() { |
448 | | let test_img = b"av12356abc"; |
449 | | let test_a = b"alpha"; |
450 | | let avif = Aviffy::new() |
451 | | .set_exif(b"lol".to_vec()) |
452 | | .to_vec(test_img, Some(test_a), 10, 20, 8); |
453 | | |
454 | | let ctx = mp4parse::read_avif(&mut avif.as_slice(), mp4parse::ParseStrictness::Normal).unwrap(); |
455 | | |
456 | | assert_eq!(&test_img[..], ctx.primary_item_coded_data().unwrap()); |
457 | | assert_eq!(&test_a[..], ctx.alpha_item_coded_data().unwrap()); |
458 | | } |
459 | | |
460 | | #[test] |
461 | | fn test_roundtrip_parse_avif() { |
462 | | let test_img = [1, 2, 3, 4, 5, 6]; |
463 | | let test_alpha = [77, 88, 99]; |
464 | | let avif = serialize_to_vec(&test_img, Some(&test_alpha), 10, 20, 8); |
465 | | |
466 | | let ctx = avif_parse::read_avif(&mut avif.as_slice()).unwrap(); |
467 | | |
468 | | assert_eq!(&test_img[..], ctx.primary_item.as_slice()); |
469 | | assert_eq!(&test_alpha[..], ctx.alpha_item.as_deref().unwrap()); |
470 | | } |
471 | | |
472 | | #[test] |
473 | | fn test_roundtrip_parse_avif_colr() { |
474 | | let test_img = [1, 2, 3, 4, 5, 6]; |
475 | | let test_alpha = [77, 88, 99]; |
476 | | let avif = Aviffy::new() |
477 | | .matrix_coefficients(constants::MatrixCoefficients::Bt709) |
478 | | .to_vec(&test_img, Some(&test_alpha), 10, 20, 8); |
479 | | |
480 | | let ctx = avif_parse::read_avif(&mut avif.as_slice()).unwrap(); |
481 | | |
482 | | assert_eq!(&test_img[..], ctx.primary_item.as_slice()); |
483 | | assert_eq!(&test_alpha[..], ctx.alpha_item.as_deref().unwrap()); |
484 | | } |
485 | | |
486 | | #[test] |
487 | | fn premultiplied_flag() { |
488 | | let test_img = [1,2,3,4]; |
489 | | let test_alpha = [55,66,77,88,99]; |
490 | | let avif = Aviffy::new().premultiplied_alpha(true).to_vec(&test_img, Some(&test_alpha), 5, 5, 8); |
491 | | |
492 | | let ctx = avif_parse::read_avif(&mut avif.as_slice()).unwrap(); |
493 | | |
494 | | assert!(ctx.premultiplied_alpha); |
495 | | assert_eq!(&test_img[..], ctx.primary_item.as_slice()); |
496 | | assert_eq!(&test_alpha[..], ctx.alpha_item.as_deref().unwrap()); |
497 | | } |
498 | | |
499 | | #[test] |
500 | | fn size_required() { |
501 | | assert!(Aviffy::new().set_bit_depth(10).write_slice(&mut vec![], &[], None).is_err()); |
502 | | } |
503 | | |
504 | | #[test] |
505 | | fn depth_required() { |
506 | | assert!(Aviffy::new().set_width(1).set_height(1).write_slice(&mut vec![], &[], None).is_err()); |
507 | | } |