/rust/registry/src/index.crates.io-6f17d22bba15001f/avif-serialize-0.8.4/src/lib.rs
Line | Count | Source (jump to first uncovered line) |
1 | | //! # AVIF image serializer (muxer) |
2 | | //! |
3 | | //! ## Usage |
4 | | //! |
5 | | //! 1. Compress pixels using an AV1 encoder, such as [rav1e](https://lib.rs/rav1e). [libaom](https://lib.rs/libaom-sys) works too. |
6 | | //! |
7 | | //! 2. Call `avif_serialize::serialize_to_vec(av1_data, None, width, height, 8)` |
8 | | //! |
9 | | //! See [cavif](https://github.com/kornelski/cavif-rs) for a complete implementation. |
10 | | |
11 | | mod boxes; |
12 | | pub mod constants; |
13 | | mod writer; |
14 | | |
15 | | use crate::boxes::*; |
16 | | use arrayvec::ArrayVec; |
17 | | use std::io; |
18 | | |
19 | | /// Config for the serialization (allows setting advanced image properties). |
20 | | /// |
21 | | /// See [`Aviffy::new`]. |
22 | | pub struct Aviffy { |
23 | | premultiplied_alpha: bool, |
24 | | colr: ColrBox, |
25 | | min_seq_profile: u8, |
26 | | chroma_subsampling: (bool, bool), |
27 | | monochrome: bool, |
28 | | width: u32, |
29 | | height: u32, |
30 | | bit_depth: u8, |
31 | | } |
32 | | |
33 | | /// Makes an AVIF file given encoded AV1 data (create the data with [`rav1e`](https://lib.rs/rav1e)) |
34 | | /// |
35 | | /// `color_av1_data` is already-encoded AV1 image data for the color channels (YUV, RGB, etc.). |
36 | | /// [You can parse this information out of AV1 payload with `avif-parse`](https://docs.rs/avif-parse/latest/avif_parse/struct.AV1Metadata.html). |
37 | | /// |
38 | | /// The color image should have been encoded without chroma subsampling AKA YUV444 (`Cs444` in `rav1e`) |
39 | | /// AV1 handles full-res color so effortlessly, you should never need chroma subsampling ever again. |
40 | | /// |
41 | | /// Optional `alpha_av1_data` is a monochrome image (`rav1e` calls it "YUV400"/`Cs400`) representing transparency. |
42 | | /// Alpha adds a lot of header bloat, so don't specify it unless it's necessary. |
43 | | /// |
44 | | /// `width`/`height` is image size in pixels. It must of course match the size of encoded image data. |
45 | | /// `depth_bits` should be 8, 10 or 12, depending on how the image was encoded. |
46 | | /// |
47 | | /// Color and alpha must have the same dimensions and depth. |
48 | | /// |
49 | | /// Data is written (streamed) to `into_output`. |
50 | 0 | pub fn serialize<W: io::Write>(into_output: W, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> io::Result<()> { |
51 | 0 | Aviffy::new() |
52 | 0 | .set_width(width) |
53 | 0 | .set_height(height) |
54 | 0 | .set_bit_depth(depth_bits) |
55 | 0 | .write_slice(into_output, color_av1_data, alpha_av1_data) |
56 | 0 | } |
57 | | |
58 | | impl Aviffy { |
59 | | /// You will have to set image properties to match the AV1 bitstream. |
60 | | /// |
61 | | /// [You can get this information out of the AV1 payload with `avif-parse`](https://docs.rs/avif-parse/latest/avif_parse/struct.AV1Metadata.html). |
62 | | #[inline] |
63 | | #[must_use] |
64 | 0 | pub fn new() -> Self { |
65 | 0 | Self { |
66 | 0 | premultiplied_alpha: false, |
67 | 0 | min_seq_profile: 1, |
68 | 0 | chroma_subsampling: (false, false), |
69 | 0 | monochrome: false, |
70 | 0 | width: 0, |
71 | 0 | height: 0, |
72 | 0 | bit_depth: 0, |
73 | 0 | colr: Default::default(), |
74 | 0 | } |
75 | 0 | } Unexecuted instantiation: <avif_serialize::Aviffy>::new Unexecuted instantiation: <avif_serialize::Aviffy>::new Unexecuted instantiation: <avif_serialize::Aviffy>::new |
76 | | |
77 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
78 | | /// Defaults to BT.601, because that's what Safari assumes when `colr` is missing. |
79 | | /// Other browsers are smart enough to read this from the AV1 payload instead. |
80 | | #[inline] |
81 | 0 | pub fn set_matrix_coefficients(&mut self, matrix_coefficients: constants::MatrixCoefficients) -> &mut Self { |
82 | 0 | self.colr.matrix_coefficients = matrix_coefficients; |
83 | 0 | self |
84 | 0 | } |
85 | | |
86 | | #[doc(hidden)] |
87 | 0 | pub fn matrix_coefficients(&mut self, matrix_coefficients: constants::MatrixCoefficients) -> &mut Self { |
88 | 0 | self.set_matrix_coefficients(matrix_coefficients) |
89 | 0 | } |
90 | | |
91 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
92 | | /// Defaults to sRGB. |
93 | | #[inline] |
94 | 0 | pub fn set_transfer_characteristics(&mut self, transfer_characteristics: constants::TransferCharacteristics) -> &mut Self { |
95 | 0 | self.colr.transfer_characteristics = transfer_characteristics; |
96 | 0 | self |
97 | 0 | } |
98 | | |
99 | | #[doc(hidden)] |
100 | 0 | pub fn transfer_characteristics(&mut self, transfer_characteristics: constants::TransferCharacteristics) -> &mut Self { |
101 | 0 | self.set_transfer_characteristics(transfer_characteristics) |
102 | 0 | } |
103 | | |
104 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
105 | | /// Defaults to sRGB/Rec.709. |
106 | | #[inline] |
107 | 0 | pub fn set_color_primaries(&mut self, color_primaries: constants::ColorPrimaries) -> &mut Self { |
108 | 0 | self.colr.color_primaries = color_primaries; |
109 | 0 | self |
110 | 0 | } |
111 | | |
112 | | #[doc(hidden)] |
113 | 0 | pub fn color_primaries(&mut self, color_primaries: constants::ColorPrimaries) -> &mut Self { |
114 | 0 | self.set_color_primaries(color_primaries) |
115 | 0 | } |
116 | | |
117 | | /// If set, must match the AV1 color payload, and will result in `colr` box added to AVIF. |
118 | | /// Defaults to full. |
119 | | #[inline] |
120 | 0 | pub fn set_full_color_range(&mut self, full_range: bool) -> &mut Self { |
121 | 0 | self.colr.full_range_flag = full_range; |
122 | 0 | self |
123 | 0 | } |
124 | | |
125 | | #[doc(hidden)] |
126 | 0 | pub fn full_color_range(&mut self, full_range: bool) -> &mut Self { |
127 | 0 | self.set_full_color_range(full_range) |
128 | 0 | } |
129 | | |
130 | | /// Makes an AVIF file given encoded AV1 data (create the data with [`rav1e`](https://lib.rs/rav1e)) |
131 | | /// |
132 | | /// `color_av1_data` is already-encoded AV1 image data for the color channels (YUV, RGB, etc.). |
133 | | /// The color image should have been encoded without chroma subsampling AKA YUV444 (`Cs444` in `rav1e`) |
134 | | /// AV1 handles full-res color so effortlessly, you should never need chroma subsampling ever again. |
135 | | /// |
136 | | /// Optional `alpha_av1_data` is a monochrome image (`rav1e` calls it "YUV400"/`Cs400`) representing transparency. |
137 | | /// Alpha adds a lot of header bloat, so don't specify it unless it's necessary. |
138 | | /// |
139 | | /// `width`/`height` is image size in pixels. It must of course match the size of encoded image data. |
140 | | /// `depth_bits` should be 8, 10 or 12, depending on how the image has been encoded in AV1. |
141 | | /// |
142 | | /// Color and alpha must have the same dimensions and depth. |
143 | | /// |
144 | | /// Data is written (streamed) to `into_output`. |
145 | | #[inline] |
146 | 0 | pub fn write<W: io::Write>(&self, into_output: W, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> io::Result<()> { |
147 | 0 | self.make_boxes(color_av1_data, alpha_av1_data, width, height, depth_bits)?.write(into_output) |
148 | 0 | } |
149 | | |
150 | | /// See [`Self::write`] |
151 | | #[inline] |
152 | 0 | pub fn write_slice<W: io::Write>(&self, into_output: W, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>) -> io::Result<()> { |
153 | 0 | self.make_boxes(color_av1_data, alpha_av1_data, self.width, self.height, self.bit_depth)?.write(into_output) |
154 | 0 | } |
155 | | |
156 | 0 | fn make_boxes<'data>(&self, color_av1_data: &'data [u8], alpha_av1_data: Option<&'data [u8]>, width: u32, height: u32, depth_bits: u8) -> io::Result<AvifFile<'data>> { |
157 | 0 | if ![8, 10, 12].contains(&depth_bits) { |
158 | 0 | return Err(io::Error::new(io::ErrorKind::InvalidInput, "depth must be 8/10/12")); |
159 | 0 | } |
160 | 0 |
|
161 | 0 | let mut image_items = ArrayVec::new(); |
162 | 0 | let mut iloc_items = ArrayVec::new(); |
163 | 0 | let mut ipma_entries = ArrayVec::new(); |
164 | 0 | let mut data_chunks = ArrayVec::new(); |
165 | 0 | let mut irefs = ArrayVec::new(); |
166 | 0 | let mut ipco = IpcoBox::new(); |
167 | 0 | let color_image_id = 1; |
168 | 0 | let alpha_image_id = 2; |
169 | | const ESSENTIAL_BIT: u8 = 0x80; |
170 | 0 | let color_depth_bits = depth_bits; |
171 | 0 | let alpha_depth_bits = depth_bits; // Sadly, the spec requires these to match. |
172 | 0 |
|
173 | 0 | image_items.push(InfeBox { |
174 | 0 | id: color_image_id, |
175 | 0 | typ: FourCC(*b"av01"), |
176 | 0 | name: "", |
177 | 0 | }); |
178 | | |
179 | 0 | let ispe_prop = ipco.push(IpcoProp::Ispe(IspeBox { width, height })).ok_or(io::ErrorKind::InvalidInput)?; |
180 | | |
181 | | // This is redundant, but Chrome wants it, and checks that it matches :( |
182 | 0 | let av1c_color_prop = ipco.push(IpcoProp::Av1C(Av1CBox { |
183 | 0 | seq_profile: self.min_seq_profile.max(if color_depth_bits >= 12 { 2 } else { 0 }), |
184 | | seq_level_idx_0: 31, |
185 | | seq_tier_0: false, |
186 | 0 | high_bitdepth: color_depth_bits >= 10, |
187 | 0 | twelve_bit: color_depth_bits >= 12, |
188 | 0 | monochrome: self.monochrome, |
189 | 0 | chroma_subsampling_x: self.chroma_subsampling.0, |
190 | 0 | chroma_subsampling_y: self.chroma_subsampling.1, |
191 | 0 | chroma_sample_position: 0, |
192 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
193 | | |
194 | | // Useless bloat |
195 | 0 | let pixi_3 = ipco.push(IpcoProp::Pixi(PixiBox { |
196 | 0 | channels: 3, |
197 | 0 | depth: color_depth_bits, |
198 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
199 | | |
200 | 0 | let mut ipma = IpmaEntry { |
201 | 0 | item_id: color_image_id, |
202 | 0 | prop_ids: from_array([ispe_prop, av1c_color_prop | ESSENTIAL_BIT, pixi_3]) |
203 | 0 | }; |
204 | 0 |
|
205 | 0 | // Redundant info, already in AV1 |
206 | 0 | if self.colr != Default::default() { |
207 | 0 | let colr_color_prop = ipco.push(IpcoProp::Colr(self.colr)).ok_or(io::ErrorKind::InvalidInput)?; |
208 | 0 | ipma.prop_ids.push(colr_color_prop); |
209 | 0 | } |
210 | 0 | ipma_entries.push(ipma); |
211 | | |
212 | 0 | if let Some(alpha_data) = alpha_av1_data { |
213 | 0 | image_items.push(InfeBox { |
214 | 0 | id: alpha_image_id, |
215 | 0 | typ: FourCC(*b"av01"), |
216 | 0 | name: "", |
217 | 0 | }); |
218 | 0 |
|
219 | 0 | irefs.push(IrefEntryBox { |
220 | 0 | from_id: alpha_image_id, |
221 | 0 | to_id: color_image_id, |
222 | 0 | typ: FourCC(*b"auxl"), |
223 | 0 | }); |
224 | 0 |
|
225 | 0 | if self.premultiplied_alpha { |
226 | 0 | irefs.push(IrefEntryBox { |
227 | 0 | from_id: color_image_id, |
228 | 0 | to_id: alpha_image_id, |
229 | 0 | typ: FourCC(*b"prem"), |
230 | 0 | }); |
231 | 0 | } |
232 | | |
233 | 0 | let av1c_alpha_prop = ipco.push(boxes::IpcoProp::Av1C(Av1CBox { |
234 | 0 | seq_profile: if alpha_depth_bits >= 12 { 2 } else { 0 }, |
235 | | seq_level_idx_0: 31, |
236 | | seq_tier_0: false, |
237 | 0 | high_bitdepth: alpha_depth_bits >= 10, |
238 | 0 | twelve_bit: alpha_depth_bits >= 12, |
239 | 0 | monochrome: true, |
240 | 0 | chroma_subsampling_x: true, |
241 | 0 | chroma_subsampling_y: true, |
242 | 0 | chroma_sample_position: 0, |
243 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
244 | | |
245 | | // So pointless |
246 | 0 | let pixi_1 = ipco.push(IpcoProp::Pixi(PixiBox { |
247 | 0 | channels: 1, |
248 | 0 | depth: alpha_depth_bits, |
249 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
250 | | |
251 | | // that's a silly way to add 1 bit of information, isn't it? |
252 | 0 | let auxc_prop = ipco.push(IpcoProp::AuxC(AuxCBox { |
253 | 0 | urn: "urn:mpeg:mpegB:cicp:systems:auxiliary:alpha", |
254 | 0 | })).ok_or(io::ErrorKind::InvalidInput)?; |
255 | | |
256 | 0 | ipma_entries.push(IpmaEntry { |
257 | 0 | item_id: alpha_image_id, |
258 | 0 | prop_ids: from_array([ispe_prop, av1c_alpha_prop | ESSENTIAL_BIT, auxc_prop, pixi_1]), |
259 | 0 | }); |
260 | 0 |
|
261 | 0 | // Use interleaved color and alpha, with alpha first. |
262 | 0 | // Makes it possible to display partial image. |
263 | 0 | iloc_items.push(IlocItem { |
264 | 0 | id: color_image_id, |
265 | 0 | extents: [IlocExtent { |
266 | 0 | offset: IlocOffset::Relative(alpha_data.len()), |
267 | 0 | len: color_av1_data.len(), |
268 | 0 | }] |
269 | 0 | .into(), |
270 | 0 | }); |
271 | 0 | iloc_items.push(IlocItem { |
272 | 0 | id: alpha_image_id, |
273 | 0 | extents: [IlocExtent { |
274 | 0 | offset: IlocOffset::Relative(0), |
275 | 0 | len: alpha_data.len(), |
276 | 0 | }] |
277 | 0 | .into(), |
278 | 0 | }); |
279 | 0 | data_chunks.push(alpha_data); |
280 | 0 | } else { |
281 | 0 | iloc_items.push(IlocItem { |
282 | 0 | id: color_image_id, |
283 | 0 | extents: [IlocExtent { |
284 | 0 | offset: IlocOffset::Relative(0), |
285 | 0 | len: color_av1_data.len(), |
286 | 0 | }] |
287 | 0 | .into(), |
288 | 0 | }); |
289 | 0 | } |
290 | 0 | data_chunks.push(color_av1_data); |
291 | 0 | Ok(AvifFile { |
292 | 0 | ftyp: FtypBox { |
293 | 0 | major_brand: FourCC(*b"avif"), |
294 | 0 | minor_version: 0, |
295 | 0 | compatible_brands: [FourCC(*b"mif1"), FourCC(*b"miaf")].into(), |
296 | 0 | }, |
297 | 0 | meta: MetaBox { |
298 | 0 | hdlr: HdlrBox {}, |
299 | 0 | iinf: IinfBox { items: image_items }, |
300 | 0 | pitm: PitmBox(color_image_id), |
301 | 0 | iloc: IlocBox { items: iloc_items }, |
302 | 0 | iprp: IprpBox { |
303 | 0 | ipco, |
304 | 0 | // It's not enough to define these properties, |
305 | 0 | // they must be assigned to the image |
306 | 0 | ipma: IpmaBox { |
307 | 0 | entries: ipma_entries, |
308 | 0 | }, |
309 | 0 | }, |
310 | 0 | iref: IrefBox { |
311 | 0 | entries: irefs |
312 | 0 | }, |
313 | 0 | }, |
314 | 0 | // Here's the actual data. If HEIF wasn't such a kitchen sink, this |
315 | 0 | // would have been the only data this file needs. |
316 | 0 | mdat: MdatBox { data_chunks }, |
317 | 0 | }) |
318 | 0 | } |
319 | | |
320 | | /// Panics if the input arguments were invalid. Use [`Self::write`] to handle the errors. |
321 | | #[must_use] |
322 | | #[track_caller] |
323 | 0 | pub fn to_vec(&self, color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> Vec<u8> { |
324 | 0 | let mut file = self.make_boxes(color_av1_data, alpha_av1_data, width, height, depth_bits).unwrap(); |
325 | 0 | let mut out = Vec::new(); |
326 | 0 | file.write_to_vec(&mut out).unwrap(); |
327 | 0 | out |
328 | 0 | } |
329 | | |
330 | | /// `(false, false)` is 4:4:4 |
331 | | /// `(true, true)` is 4:2:0 |
332 | | /// |
333 | | /// `chroma_sample_position` is always 0. Don't use chroma subsampling with AVIF. |
334 | | #[inline] |
335 | 0 | pub fn set_chroma_subsampling(&mut self, subsampled_xy: (bool, bool)) -> &mut Self { |
336 | 0 | self.chroma_subsampling = subsampled_xy; |
337 | 0 | self |
338 | 0 | } |
339 | | |
340 | | /// Set whether the image is monochrome (grayscale). |
341 | | /// This is used to set the `monochrome` flag in the AV1 sequence header. |
342 | | #[inline] |
343 | 0 | pub fn set_monochrome(&mut self, monochrome: bool) -> &mut Self { |
344 | 0 | self.monochrome = monochrome; |
345 | 0 | self |
346 | 0 | } |
347 | | |
348 | | /// Sets minimum required |
349 | | /// |
350 | | /// Higher bit depth may increase this |
351 | | #[inline] |
352 | 0 | pub fn set_seq_profile(&mut self, seq_profile: u8) -> &mut Self { |
353 | 0 | self.min_seq_profile = seq_profile; |
354 | 0 | self |
355 | 0 | } |
356 | | |
357 | | #[inline] |
358 | 0 | pub fn set_width(&mut self, width: u32) -> &mut Self { |
359 | 0 | self.width = width; |
360 | 0 | self |
361 | 0 | } |
362 | | |
363 | | #[inline] |
364 | 0 | pub fn set_height(&mut self, height: u32) -> &mut Self { |
365 | 0 | self.height = height; |
366 | 0 | self |
367 | 0 | } |
368 | | |
369 | | /// 8, 10 or 12. |
370 | | #[inline] |
371 | 0 | pub fn set_bit_depth(&mut self, bit_depth: u8) -> &mut Self { |
372 | 0 | self.bit_depth = bit_depth; |
373 | 0 | self |
374 | 0 | } |
375 | | |
376 | | /// Set whether image's colorspace uses premultiplied alpha, i.e. RGB channels were multiplied by their alpha value, |
377 | | /// so that transparent areas are all black. Image decoders will be instructed to undo the premultiplication. |
378 | | /// |
379 | | /// Premultiplied alpha images usually compress better and tolerate heavier compression, but |
380 | | /// may not be supported correctly by less capable AVIF decoders. |
381 | | /// |
382 | | /// This just sets the configuration property. The pixel data must have already been processed before compression. |
383 | | /// If a decoder displays semitransparent colors too dark, it doesn't support premultiplied alpha. |
384 | | /// If a decoder displays semitransparent colors too bright, you didn't premultiply the colors before encoding. |
385 | | /// |
386 | | /// If you're not using premultiplied alpha, consider bleeding RGB colors into transparent areas, |
387 | | /// otherwise there may be unwanted outlines around edges of transparency. |
388 | | #[inline] |
389 | 0 | pub fn set_premultiplied_alpha(&mut self, is_premultiplied: bool) -> &mut Self { |
390 | 0 | self.premultiplied_alpha = is_premultiplied; |
391 | 0 | self |
392 | 0 | } |
393 | | |
394 | | #[doc(hidden)] |
395 | 0 | pub fn premultiplied_alpha(&mut self, is_premultiplied: bool) -> &mut Self { |
396 | 0 | self.set_premultiplied_alpha(is_premultiplied) |
397 | 0 | } |
398 | | } |
399 | | |
400 | | #[inline(always)] |
401 | 0 | fn from_array<const L1: usize, const L2: usize, T: Copy>(array: [T; L1]) -> ArrayVec<T, L2> { |
402 | 0 | assert!(L1 <= L2); |
403 | 0 | let mut tmp = ArrayVec::new_const(); |
404 | 0 | let _ = tmp.try_extend_from_slice(&array); |
405 | 0 | tmp |
406 | 0 | } Unexecuted instantiation: avif_serialize::from_array::<3, 5, u8> Unexecuted instantiation: avif_serialize::from_array::<4, 5, u8> |
407 | | |
408 | | /// See [`serialize`] for description. This one makes a `Vec` instead of using `io::Write`. |
409 | | #[must_use] |
410 | | #[track_caller] |
411 | 0 | pub fn serialize_to_vec(color_av1_data: &[u8], alpha_av1_data: Option<&[u8]>, width: u32, height: u32, depth_bits: u8) -> Vec<u8> { |
412 | 0 | Aviffy::new().to_vec(color_av1_data, alpha_av1_data, width, height, depth_bits) |
413 | 0 | } |
414 | | |
415 | | #[test] |
416 | | fn test_roundtrip_parse_mp4() { |
417 | | let test_img = b"av12356abc"; |
418 | | let avif = serialize_to_vec(test_img, None, 10, 20, 8); |
419 | | |
420 | | let ctx = mp4parse::read_avif(&mut avif.as_slice(), mp4parse::ParseStrictness::Normal).unwrap(); |
421 | | |
422 | | assert_eq!(&test_img[..], ctx.primary_item_coded_data().unwrap()); |
423 | | } |
424 | | |
425 | | #[test] |
426 | | fn test_roundtrip_parse_mp4_alpha() { |
427 | | let test_img = b"av12356abc"; |
428 | | let test_a = b"alpha"; |
429 | | let avif = serialize_to_vec(test_img, Some(test_a), 10, 20, 8); |
430 | | |
431 | | let ctx = mp4parse::read_avif(&mut avif.as_slice(), mp4parse::ParseStrictness::Normal).unwrap(); |
432 | | |
433 | | assert_eq!(&test_img[..], ctx.primary_item_coded_data().unwrap()); |
434 | | assert_eq!(&test_a[..], ctx.alpha_item_coded_data().unwrap()); |
435 | | } |
436 | | |
437 | | #[test] |
438 | | fn test_roundtrip_parse_avif() { |
439 | | let test_img = [1, 2, 3, 4, 5, 6]; |
440 | | let test_alpha = [77, 88, 99]; |
441 | | let avif = serialize_to_vec(&test_img, Some(&test_alpha), 10, 20, 8); |
442 | | |
443 | | let ctx = avif_parse::read_avif(&mut avif.as_slice()).unwrap(); |
444 | | |
445 | | assert_eq!(&test_img[..], ctx.primary_item.as_slice()); |
446 | | assert_eq!(&test_alpha[..], ctx.alpha_item.as_deref().unwrap()); |
447 | | } |
448 | | |
449 | | #[test] |
450 | | fn test_roundtrip_parse_avif_colr() { |
451 | | let test_img = [1, 2, 3, 4, 5, 6]; |
452 | | let test_alpha = [77, 88, 99]; |
453 | | let avif = Aviffy::new() |
454 | | .matrix_coefficients(constants::MatrixCoefficients::Bt709) |
455 | | .to_vec(&test_img, Some(&test_alpha), 10, 20, 8); |
456 | | |
457 | | let ctx = avif_parse::read_avif(&mut avif.as_slice()).unwrap(); |
458 | | |
459 | | assert_eq!(&test_img[..], ctx.primary_item.as_slice()); |
460 | | assert_eq!(&test_alpha[..], ctx.alpha_item.as_deref().unwrap()); |
461 | | } |
462 | | |
463 | | #[test] |
464 | | fn premultiplied_flag() { |
465 | | let test_img = [1,2,3,4]; |
466 | | let test_alpha = [55,66,77,88,99]; |
467 | | let avif = Aviffy::new().premultiplied_alpha(true).to_vec(&test_img, Some(&test_alpha), 5, 5, 8); |
468 | | |
469 | | let ctx = avif_parse::read_avif(&mut avif.as_slice()).unwrap(); |
470 | | |
471 | | assert!(ctx.premultiplied_alpha); |
472 | | assert_eq!(&test_img[..], ctx.primary_item.as_slice()); |
473 | | assert_eq!(&test_alpha[..], ctx.alpha_item.as_deref().unwrap()); |
474 | | } |
475 | | |
476 | | #[test] |
477 | | fn size_required() { |
478 | | assert!(Aviffy::new().set_bit_depth(10).write_slice(&mut vec![], &[], None).is_err()); |
479 | | } |
480 | | |
481 | | #[test] |
482 | | fn depth_required() { |
483 | | assert!(Aviffy::new().set_width(1).set_height(1).write_slice(&mut vec![], &[], None).is_err()); |
484 | | } |