/src/image/src/codecs/hdr/encoder.rs
Line | Count | Source |
1 | | use std::cmp::Ordering; |
2 | | use std::io::{Result, Write}; |
3 | | |
4 | | use crate::codecs::hdr::{rgbe8, Rgbe8Pixel, SIGNATURE}; |
5 | | use crate::color::Rgb; |
6 | | use crate::error::{ImageResult, UnsupportedError, UnsupportedErrorKind}; |
7 | | use crate::{ExtendedColorType, ImageEncoder, ImageError, ImageFormat}; |
8 | | |
9 | | /// Radiance HDR encoder |
10 | | pub struct HdrEncoder<W: Write> { |
11 | | w: W, |
12 | | } |
13 | | |
14 | | impl<W: Write> ImageEncoder for HdrEncoder<W> { |
15 | 0 | fn write_image( |
16 | 0 | self, |
17 | 0 | unaligned_bytes: &[u8], |
18 | 0 | width: u32, |
19 | 0 | height: u32, |
20 | 0 | color_type: ExtendedColorType, |
21 | 0 | ) -> ImageResult<()> { |
22 | 0 | match color_type { |
23 | | ExtendedColorType::Rgb32F => { |
24 | 0 | let bytes_per_pixel = color_type.bits_per_pixel() as usize / 8; |
25 | 0 | let rgbe_pixels = unaligned_bytes |
26 | 0 | .chunks_exact(bytes_per_pixel) |
27 | 0 | .map(|bytes| to_rgbe8(Rgb::<f32>(bytemuck::pod_read_unaligned(bytes)))); Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<_> as image::io::encoder::ImageEncoder>::write_image::{closure#0}Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<&mut std::io::cursor::Cursor<alloc::vec::Vec<u8>>> as image::io::encoder::ImageEncoder>::write_image::{closure#0} |
28 | | |
29 | | // the length will be checked inside encode_pixels |
30 | 0 | self.encode_pixels(rgbe_pixels, width as usize, height as usize) |
31 | | } |
32 | 0 | _ => Err(ImageError::Unsupported( |
33 | 0 | UnsupportedError::from_format_and_kind( |
34 | 0 | ImageFormat::Hdr.into(), |
35 | 0 | UnsupportedErrorKind::Color(color_type), |
36 | 0 | ), |
37 | 0 | )), |
38 | | } |
39 | 0 | } Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<_> as image::io::encoder::ImageEncoder>::write_image Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<&mut std::io::cursor::Cursor<alloc::vec::Vec<u8>>> as image::io::encoder::ImageEncoder>::write_image |
40 | | } |
41 | | |
42 | | impl<W: Write> HdrEncoder<W> { |
43 | | /// Creates encoder |
44 | 0 | pub fn new(w: W) -> HdrEncoder<W> { |
45 | 0 | HdrEncoder { w } |
46 | 0 | } Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<_>>::new Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<&mut std::io::cursor::Cursor<alloc::vec::Vec<u8>>>>::new |
47 | | |
48 | | /// Encodes the image ```rgb``` |
49 | | /// that has dimensions ```width``` and ```height``` |
50 | 0 | pub fn encode(self, rgb: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()> { |
51 | 0 | self.encode_pixels(rgb.iter().map(|&rgb| to_rgbe8(rgb)), width, height) |
52 | 0 | } |
53 | | |
54 | | /// Encodes the image ```flattened_rgbe_pixels``` |
55 | | /// that has dimensions ```width``` and ```height```. |
56 | | /// The callback must return the color for the given flattened index of the pixel (row major). |
57 | 0 | fn encode_pixels( |
58 | 0 | mut self, |
59 | 0 | mut flattened_rgbe_pixels: impl ExactSizeIterator<Item = Rgbe8Pixel>, |
60 | 0 | width: usize, |
61 | 0 | height: usize, |
62 | 0 | ) -> ImageResult<()> { |
63 | 0 | assert!( |
64 | 0 | flattened_rgbe_pixels.len() >= width * height, |
65 | 0 | "not enough pixels provided" |
66 | | ); // bonus: this might elide some bounds checks |
67 | | |
68 | 0 | let w = &mut self.w; |
69 | 0 | w.write_all(SIGNATURE)?; |
70 | 0 | w.write_all(b"\n")?; |
71 | 0 | w.write_all(b"# Rust HDR encoder\n")?; |
72 | 0 | w.write_all(b"FORMAT=32-bit_rle_rgbe\n\n")?; |
73 | 0 | w.write_all(format!("-Y {height} +X {width}\n").as_bytes())?; |
74 | | |
75 | 0 | if !(8..=32_768).contains(&width) { |
76 | 0 | for pixel in flattened_rgbe_pixels { |
77 | 0 | write_rgbe8(w, pixel)?; |
78 | | } |
79 | | } else { |
80 | | // new RLE marker contains scanline width |
81 | 0 | let marker = rgbe8(2, 2, (width / 256) as u8, (width % 256) as u8); |
82 | | // buffers for encoded pixels |
83 | 0 | let mut bufr = vec![0; width]; |
84 | 0 | let mut bufg = vec![0; width]; |
85 | 0 | let mut bufb = vec![0; width]; |
86 | 0 | let mut bufe = vec![0; width]; |
87 | 0 | let mut rle_buf = vec![0; width]; |
88 | 0 | for _scanline_index in 0..height { |
89 | 0 | assert!(flattened_rgbe_pixels.len() >= width); // may reduce the bound checks |
90 | | |
91 | 0 | for ((((r, g), b), e), pixel) in bufr |
92 | 0 | .iter_mut() |
93 | 0 | .zip(bufg.iter_mut()) |
94 | 0 | .zip(bufb.iter_mut()) |
95 | 0 | .zip(bufe.iter_mut()) |
96 | 0 | .zip(&mut flattened_rgbe_pixels) |
97 | 0 | { |
98 | 0 | *r = pixel.c[0]; |
99 | 0 | *g = pixel.c[1]; |
100 | 0 | *b = pixel.c[2]; |
101 | 0 | *e = pixel.e; |
102 | 0 | } |
103 | | |
104 | 0 | write_rgbe8(w, marker)?; // New RLE encoding marker |
105 | 0 | rle_buf.clear(); |
106 | 0 | rle_compress(&bufr[..], &mut rle_buf); |
107 | 0 | w.write_all(&rle_buf[..])?; |
108 | 0 | rle_buf.clear(); |
109 | 0 | rle_compress(&bufg[..], &mut rle_buf); |
110 | 0 | w.write_all(&rle_buf[..])?; |
111 | 0 | rle_buf.clear(); |
112 | 0 | rle_compress(&bufb[..], &mut rle_buf); |
113 | 0 | w.write_all(&rle_buf[..])?; |
114 | 0 | rle_buf.clear(); |
115 | 0 | rle_compress(&bufe[..], &mut rle_buf); |
116 | 0 | w.write_all(&rle_buf[..])?; |
117 | | } |
118 | | } |
119 | 0 | Ok(()) |
120 | 0 | } Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<_>>::encode_pixels::<_> Unexecuted instantiation: <image::codecs::hdr::encoder::HdrEncoder<&mut std::io::cursor::Cursor<alloc::vec::Vec<u8>>>>::encode_pixels::<core::iter::adapters::map::Map<core::slice::iter::ChunksExact<u8>, <image::codecs::hdr::encoder::HdrEncoder<&mut std::io::cursor::Cursor<alloc::vec::Vec<u8>>> as image::io::encoder::ImageEncoder>::write_image::{closure#0}>> |
121 | | } |
122 | | |
123 | | #[derive(Debug, PartialEq, Eq)] |
124 | | enum RunOrNot { |
125 | | Run(u8, usize), |
126 | | Norun(usize, usize), |
127 | | } |
128 | | |
129 | | use self::RunOrNot::{Norun, Run}; |
130 | | |
131 | | const RUN_MAX_LEN: usize = 127; |
132 | | const NORUN_MAX_LEN: usize = 128; |
133 | | |
134 | | struct RunIterator<'a> { |
135 | | data: &'a [u8], |
136 | | curidx: usize, |
137 | | } |
138 | | |
139 | | impl<'a> RunIterator<'a> { |
140 | 0 | fn new(data: &'a [u8]) -> RunIterator<'a> { |
141 | 0 | RunIterator { data, curidx: 0 } |
142 | 0 | } |
143 | | } |
144 | | |
145 | | impl Iterator for RunIterator<'_> { |
146 | | type Item = RunOrNot; |
147 | | |
148 | 0 | fn next(&mut self) -> Option<Self::Item> { |
149 | 0 | if self.curidx == self.data.len() { |
150 | 0 | None |
151 | | } else { |
152 | 0 | let cv = self.data[self.curidx]; |
153 | 0 | let crun = self.data[self.curidx..] |
154 | 0 | .iter() |
155 | 0 | .take_while(|&&v| v == cv) |
156 | 0 | .take(RUN_MAX_LEN) |
157 | 0 | .count(); |
158 | 0 | let ret = if crun > 2 { |
159 | 0 | Run(cv, crun) |
160 | | } else { |
161 | 0 | Norun(self.curidx, crun) |
162 | | }; |
163 | 0 | self.curidx += crun; |
164 | 0 | Some(ret) |
165 | | } |
166 | 0 | } |
167 | | } |
168 | | |
169 | | struct NorunCombineIterator<'a> { |
170 | | runiter: RunIterator<'a>, |
171 | | prev: Option<RunOrNot>, |
172 | | } |
173 | | |
174 | | impl<'a> NorunCombineIterator<'a> { |
175 | 0 | fn new(data: &'a [u8]) -> NorunCombineIterator<'a> { |
176 | 0 | NorunCombineIterator { |
177 | 0 | runiter: RunIterator::new(data), |
178 | 0 | prev: None, |
179 | 0 | } |
180 | 0 | } |
181 | | } |
182 | | |
183 | | // Combines sequential noruns produced by RunIterator |
184 | | impl Iterator for NorunCombineIterator<'_> { |
185 | | type Item = RunOrNot; |
186 | | |
187 | 0 | fn next(&mut self) -> Option<Self::Item> { |
188 | | loop { |
189 | 0 | match self.prev.take() { |
190 | 0 | Some(Run(c, len)) => { |
191 | | // Just return stored run |
192 | 0 | return Some(Run(c, len)); |
193 | | } |
194 | 0 | Some(Norun(idx, len)) => { |
195 | | // Let's see if we need to continue norun |
196 | 0 | match self.runiter.next() { |
197 | 0 | Some(Norun(_, len1)) => { |
198 | | // norun continues |
199 | 0 | let clen = len + len1; // combined length |
200 | 0 | match clen.cmp(&NORUN_MAX_LEN) { |
201 | 0 | Ordering::Equal => return Some(Norun(idx, clen)), |
202 | | Ordering::Greater => { |
203 | | // combined norun exceeds maximum length. store extra part of norun |
204 | 0 | self.prev = |
205 | 0 | Some(Norun(idx + NORUN_MAX_LEN, clen - NORUN_MAX_LEN)); |
206 | | // then return maximal norun |
207 | 0 | return Some(Norun(idx, NORUN_MAX_LEN)); |
208 | | } |
209 | 0 | Ordering::Less => { |
210 | 0 | // len + len1 < NORUN_MAX_LEN |
211 | 0 | self.prev = Some(Norun(idx, len + len1)); |
212 | 0 | // combine and continue loop |
213 | 0 | } |
214 | | } |
215 | | } |
216 | 0 | Some(Run(c, len1)) => { |
217 | | // Run encountered. Store it |
218 | 0 | self.prev = Some(Run(c, len1)); |
219 | 0 | return Some(Norun(idx, len)); // and return combined norun |
220 | | } |
221 | | None => { |
222 | | // End of sequence |
223 | 0 | return Some(Norun(idx, len)); // return combined norun |
224 | | } |
225 | | } |
226 | | } // End match self.prev.take() == Some(NoRun()) |
227 | | None => { |
228 | | // No norun to combine |
229 | 0 | match self.runiter.next() { |
230 | 0 | Some(Norun(idx, len)) => { |
231 | 0 | self.prev = Some(Norun(idx, len)); |
232 | 0 | // store for combine and continue the loop |
233 | 0 | } |
234 | 0 | Some(Run(c, len)) => { |
235 | | // Some run. Just return it |
236 | 0 | return Some(Run(c, len)); |
237 | | } |
238 | | None => { |
239 | | // That's all, folks |
240 | 0 | return None; |
241 | | } |
242 | | } |
243 | | } // End match self.prev.take() == None |
244 | | } // End match |
245 | | } // End loop |
246 | 0 | } |
247 | | } |
248 | | |
249 | | // Appends RLE compressed ```data``` to ```rle``` |
250 | 0 | fn rle_compress(data: &[u8], rle: &mut Vec<u8>) { |
251 | 0 | rle.clear(); |
252 | 0 | if data.is_empty() { |
253 | 0 | rle.push(0); // Technically correct. It means read next 0 bytes. |
254 | 0 | return; |
255 | 0 | } |
256 | | // Task: split data into chunks of repeating (max 127) and non-repeating bytes (max 128) |
257 | | // Prepend non-repeating chunk with its length |
258 | | // Replace repeating byte with (run length + 128) and the byte |
259 | 0 | for rnr in NorunCombineIterator::new(data) { |
260 | 0 | match rnr { |
261 | 0 | Run(c, len) => { |
262 | 0 | assert!(len <= 127); |
263 | 0 | rle.push(128u8 + len as u8); |
264 | 0 | rle.push(c); |
265 | | } |
266 | 0 | Norun(idx, len) => { |
267 | 0 | assert!(len <= 128); |
268 | 0 | rle.push(len as u8); |
269 | 0 | rle.extend_from_slice(&data[idx..idx + len]); |
270 | | } |
271 | | } |
272 | | } |
273 | 0 | } |
274 | | |
275 | 0 | fn write_rgbe8<W: Write>(w: &mut W, v: Rgbe8Pixel) -> Result<()> { |
276 | 0 | w.write_all(&[v.c[0], v.c[1], v.c[2], v.e]) |
277 | 0 | } Unexecuted instantiation: image::codecs::hdr::encoder::write_rgbe8::<_> Unexecuted instantiation: image::codecs::hdr::encoder::write_rgbe8::<&mut std::io::cursor::Cursor<alloc::vec::Vec<u8>>> |
278 | | |
279 | | /// Converts ```Rgb<f32>``` into ```Rgbe8Pixel``` |
280 | 0 | pub(crate) fn to_rgbe8(pix: Rgb<f32>) -> Rgbe8Pixel { |
281 | 0 | let pix = pix.0; |
282 | 0 | let mx = f32::max(pix[0], f32::max(pix[1], pix[2])); |
283 | 0 | if mx <= 0.0 { |
284 | 0 | Rgbe8Pixel { c: [0, 0, 0], e: 0 } |
285 | | } else { |
286 | | // let (frac, exp) = mx.frexp(); // unstable yet |
287 | 0 | let exp = mx.log2().floor() as i32 + 1; |
288 | 0 | let mul = f32::powi(2.0, exp); |
289 | 0 | let mut conv = [0u8; 3]; |
290 | 0 | for (cv, &sv) in conv.iter_mut().zip(pix.iter()) { |
291 | 0 | *cv = f32::trunc(sv / mul * 256.0) as u8; |
292 | 0 | } |
293 | 0 | Rgbe8Pixel { |
294 | 0 | c: conv, |
295 | 0 | e: (exp + 128) as u8, |
296 | 0 | } |
297 | | } |
298 | 0 | } |
299 | | |
300 | | #[test] |
301 | | fn to_rgbe8_test() { |
302 | | use crate::codecs::hdr::rgbe8; |
303 | | let test_cases = vec![rgbe8(0, 0, 0, 0), rgbe8(1, 1, 128, 128)]; |
304 | | for &pix in &test_cases { |
305 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
306 | | } |
307 | | for mc in 128..255 { |
308 | | // TODO: use inclusive range when stable |
309 | | let pix = rgbe8(mc, mc, mc, 100); |
310 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
311 | | let pix = rgbe8(mc, 0, mc, 130); |
312 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
313 | | let pix = rgbe8(0, 0, mc, 140); |
314 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
315 | | let pix = rgbe8(1, 0, mc, 150); |
316 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
317 | | let pix = rgbe8(1, mc, 10, 128); |
318 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
319 | | for c in 0..255 { |
320 | | // Radiance HDR seems to be pre IEEE 754. |
321 | | // exponent can be -128 (represented as 0u8), so some colors cannot be represented in normalized f32 |
322 | | // Let's exclude exponent value of -128 (0u8) from testing |
323 | | let pix = rgbe8(1, mc, c, if c == 0 { 1 } else { c }); |
324 | | assert_eq!(pix, to_rgbe8(pix.to_hdr())); |
325 | | } |
326 | | } |
327 | | fn relative_dist(a: Rgb<f32>, b: Rgb<f32>) -> f32 { |
328 | | // maximal difference divided by maximal value |
329 | | let max_diff = |
330 | | a.0.iter() |
331 | | .zip(b.0.iter()) |
332 | | .fold(0.0, |diff, (&a, &b)| f32::max(diff, (a - b).abs())); |
333 | | let max_val = |
334 | | a.0.iter() |
335 | | .chain(b.0.iter()) |
336 | | .fold(0.0, |maxv, &a| f32::max(maxv, a)); |
337 | | if max_val == 0.0 { |
338 | | 0.0 |
339 | | } else { |
340 | | max_diff / max_val |
341 | | } |
342 | | } |
343 | | let test_values = vec![ |
344 | | 0.000_001, 0.000_02, 0.000_3, 0.004, 0.05, 0.6, 7.0, 80.0, 900.0, 1_000.0, 20_000.0, |
345 | | 300_000.0, |
346 | | ]; |
347 | | for &r in &test_values { |
348 | | for &g in &test_values { |
349 | | for &b in &test_values { |
350 | | let c1 = Rgb([r, g, b]); |
351 | | let c2 = to_rgbe8(c1).to_hdr(); |
352 | | let rel_dist = relative_dist(c1, c2); |
353 | | // Maximal value is normalized to the range 128..256, thus we have 1/128 precision |
354 | | assert!( |
355 | | rel_dist <= 1.0 / 128.0, |
356 | | "Relative distance ({rel_dist}) exceeds 1/128 for {c1:?} and {c2:?}" |
357 | | ); |
358 | | } |
359 | | } |
360 | | } |
361 | | } |
362 | | |
363 | | #[test] |
364 | | fn runiterator_test() { |
365 | | let data = []; |
366 | | let mut run_iter = RunIterator::new(&data[..]); |
367 | | assert_eq!(run_iter.next(), None); |
368 | | let data = [5]; |
369 | | let mut run_iter = RunIterator::new(&data[..]); |
370 | | assert_eq!(run_iter.next(), Some(Norun(0, 1))); |
371 | | assert_eq!(run_iter.next(), None); |
372 | | let data = [1, 1]; |
373 | | let mut run_iter = RunIterator::new(&data[..]); |
374 | | assert_eq!(run_iter.next(), Some(Norun(0, 2))); |
375 | | assert_eq!(run_iter.next(), None); |
376 | | let data = [0, 0, 0]; |
377 | | let mut run_iter = RunIterator::new(&data[..]); |
378 | | assert_eq!(run_iter.next(), Some(Run(0u8, 3))); |
379 | | assert_eq!(run_iter.next(), None); |
380 | | let data = [0, 0, 1, 1]; |
381 | | let mut run_iter = RunIterator::new(&data[..]); |
382 | | assert_eq!(run_iter.next(), Some(Norun(0, 2))); |
383 | | assert_eq!(run_iter.next(), Some(Norun(2, 2))); |
384 | | assert_eq!(run_iter.next(), None); |
385 | | let data = [0, 0, 0, 1, 1]; |
386 | | let mut run_iter = RunIterator::new(&data[..]); |
387 | | assert_eq!(run_iter.next(), Some(Run(0u8, 3))); |
388 | | assert_eq!(run_iter.next(), Some(Norun(3, 2))); |
389 | | assert_eq!(run_iter.next(), None); |
390 | | let data = [1, 2, 2, 2]; |
391 | | let mut run_iter = RunIterator::new(&data[..]); |
392 | | assert_eq!(run_iter.next(), Some(Norun(0, 1))); |
393 | | assert_eq!(run_iter.next(), Some(Run(2u8, 3))); |
394 | | assert_eq!(run_iter.next(), None); |
395 | | let data = [1, 1, 2, 2, 2]; |
396 | | let mut run_iter = RunIterator::new(&data[..]); |
397 | | assert_eq!(run_iter.next(), Some(Norun(0, 2))); |
398 | | assert_eq!(run_iter.next(), Some(Run(2u8, 3))); |
399 | | assert_eq!(run_iter.next(), None); |
400 | | let data = [2; 128]; |
401 | | let mut run_iter = RunIterator::new(&data[..]); |
402 | | assert_eq!(run_iter.next(), Some(Run(2u8, 127))); |
403 | | assert_eq!(run_iter.next(), Some(Norun(127, 1))); |
404 | | assert_eq!(run_iter.next(), None); |
405 | | let data = [2; 129]; |
406 | | let mut run_iter = RunIterator::new(&data[..]); |
407 | | assert_eq!(run_iter.next(), Some(Run(2u8, 127))); |
408 | | assert_eq!(run_iter.next(), Some(Norun(127, 2))); |
409 | | assert_eq!(run_iter.next(), None); |
410 | | let data = [2; 130]; |
411 | | let mut run_iter = RunIterator::new(&data[..]); |
412 | | assert_eq!(run_iter.next(), Some(Run(2u8, 127))); |
413 | | assert_eq!(run_iter.next(), Some(Run(2u8, 3))); |
414 | | assert_eq!(run_iter.next(), None); |
415 | | } |
416 | | |
417 | | #[test] |
418 | | fn noruncombine_test() { |
419 | | fn a<T>(mut v: Vec<T>, mut other: Vec<T>) -> Vec<T> { |
420 | | v.append(&mut other); |
421 | | v |
422 | | } |
423 | | |
424 | | let v = []; |
425 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
426 | | assert_eq!(rsi.next(), None); |
427 | | |
428 | | let v = [1]; |
429 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
430 | | assert_eq!(rsi.next(), Some(Norun(0, 1))); |
431 | | assert_eq!(rsi.next(), None); |
432 | | |
433 | | let v = [2, 2]; |
434 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
435 | | assert_eq!(rsi.next(), Some(Norun(0, 2))); |
436 | | assert_eq!(rsi.next(), None); |
437 | | |
438 | | let v = [3, 3, 3]; |
439 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
440 | | assert_eq!(rsi.next(), Some(Run(3, 3))); |
441 | | assert_eq!(rsi.next(), None); |
442 | | |
443 | | let v = [4, 4, 3, 3, 3]; |
444 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
445 | | assert_eq!(rsi.next(), Some(Norun(0, 2))); |
446 | | assert_eq!(rsi.next(), Some(Run(3, 3))); |
447 | | assert_eq!(rsi.next(), None); |
448 | | |
449 | | let v = vec![40; 400]; |
450 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
451 | | assert_eq!(rsi.next(), Some(Run(40, 127))); |
452 | | assert_eq!(rsi.next(), Some(Run(40, 127))); |
453 | | assert_eq!(rsi.next(), Some(Run(40, 127))); |
454 | | assert_eq!(rsi.next(), Some(Run(40, 19))); |
455 | | assert_eq!(rsi.next(), None); |
456 | | |
457 | | let v = a(a(vec![5; 3], vec![6; 129]), vec![7, 3, 7, 10, 255]); |
458 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
459 | | assert_eq!(rsi.next(), Some(Run(5, 3))); |
460 | | assert_eq!(rsi.next(), Some(Run(6, 127))); |
461 | | assert_eq!(rsi.next(), Some(Norun(130, 7))); |
462 | | assert_eq!(rsi.next(), None); |
463 | | |
464 | | let v = a(a(vec![5; 2], vec![6; 129]), vec![7, 3, 7, 7, 255]); |
465 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
466 | | assert_eq!(rsi.next(), Some(Norun(0, 2))); |
467 | | assert_eq!(rsi.next(), Some(Run(6, 127))); |
468 | | assert_eq!(rsi.next(), Some(Norun(129, 7))); |
469 | | assert_eq!(rsi.next(), None); |
470 | | |
471 | | let v: Vec<_> = std::iter::repeat(()) |
472 | | .flat_map(|()| 0..2) |
473 | | .take(257) |
474 | | .collect(); |
475 | | let mut rsi = NorunCombineIterator::new(&v[..]); |
476 | | assert_eq!(rsi.next(), Some(Norun(0, 128))); |
477 | | assert_eq!(rsi.next(), Some(Norun(128, 128))); |
478 | | assert_eq!(rsi.next(), Some(Norun(256, 1))); |
479 | | assert_eq!(rsi.next(), None); |
480 | | } |