Coverage Report

Created: 2026-01-19 07:25

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/rust/registry/src/index.crates.io-1949cf8c6b5b557f/pic-scale-safe-0.1.6/src/alpha.rs
Line
Count
Source
1
/*
2
 * Copyright (c) Radzivon Bartoshyk, 10/2024. All rights reserved.
3
 *
4
 * Redistribution and use in source and binary forms, with or without modification,
5
 * are permitted provided that the following conditions are met:
6
 *
7
 * 1.  Redistributions of source code must retain the above copyright notice, this
8
 * list of conditions and the following disclaimer.
9
 *
10
 * 2.  Redistributions in binary form must reproduce the above copyright notice,
11
 * this list of conditions and the following disclaimer in the documentation
12
 * and/or other materials provided with the distribution.
13
 *
14
 * 3.  Neither the name of the copyright holder nor the names of its
15
 * contributors may be used to endorse or promote products derived from
16
 * this software without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21
 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24
 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25
 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26
 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
29
30
#[inline]
31
0
fn div_by_255(v: u16) -> u8 {
32
0
    ((((v + 0x80) >> 8) + v + 0x80) >> 8).min(255) as u8
33
0
}
34
35
0
const fn make_unpremultiplication_table() -> [u8; 65536] {
36
0
    let mut alpha = 0usize;
37
0
    let mut buf = [0u8; 65536];
38
0
    while alpha < 256 {
39
0
        let mut pixel = 0usize;
40
0
        while pixel < 256 {
41
0
            if alpha == 0 {
42
0
                buf[alpha * 255 + pixel] = 0;
43
0
            } else {
44
0
                let value = (pixel * 255 + alpha / 2) / alpha;
45
0
                buf[alpha * 255 + pixel] = if value > 255 { 255 } else { value as u8 };
46
            }
47
0
            pixel += 1;
48
        }
49
0
        alpha += 1;
50
    }
51
0
    buf
52
0
}
53
54
pub(crate) static UNPREMULTIPLICATION_TABLE: [u8; 65536] = make_unpremultiplication_table();
55
56
/// Associate alpha in place
57
///
58
/// Note, for scaling alpha must be *associated*
59
///
60
/// # Arguments
61
///
62
/// * `in_place`: Slice to where premultiply
63
///
64
0
pub fn premultiply_rgba8(in_place: &mut [u8]) {
65
    // Almost all loops are not auto-vectorised without doing anything dirty.
66
    // So everywhere is just added something beautiful.
67
0
    for chunk in in_place.chunks_exact_mut(4) {
68
0
        let a = chunk[3] as u16;
69
0
        chunk[0] = div_by_255(chunk[0] as u16 * a);
70
0
        chunk[1] = div_by_255(chunk[1] as u16 * a);
71
0
        chunk[2] = div_by_255(chunk[2] as u16 * a);
72
0
        chunk[3] = div_by_255(255 * a);
73
0
    }
74
0
}
75
76
/// Associate alpha to new slice
77
///
78
/// Faster if you need to do a copy first.
79
/// Note, for scaling alpha must be *associated*
80
///
81
/// # Arguments
82
///
83
/// * `source`: Source slice with RGBA data
84
///
85
0
pub fn premultiplied_rgba8(source: &[u8]) -> Vec<u8> {
86
0
    let mut target = vec![0u8; source.len()];
87
    // Almost all loops are not auto-vectorised without doing anything dirty.
88
    // So everywhere is just added something beautiful.
89
0
    for (dst, src) in target.chunks_exact_mut(4).zip(source.chunks_exact(4)) {
90
0
        let a = src[3] as u16;
91
0
        dst[0] = div_by_255(src[0] as u16 * a);
92
0
        dst[1] = div_by_255(src[1] as u16 * a);
93
0
        dst[2] = div_by_255(src[2] as u16 * a);
94
0
        dst[3] = div_by_255(255 * a);
95
0
    }
96
0
    target
97
0
}
98
99
/// Un premultiply alpha in place
100
///
101
/// Note, for scaling alpha must be *associated*
102
///
103
/// # Arguments
104
///
105
/// * `in_place`: Slice to work on
106
///
107
///
108
0
pub fn unpremultiply_rgba8(in_place: &mut [u8]) {
109
0
    for chunk in in_place.chunks_exact_mut(4) {
110
0
        let a = chunk[3];
111
0
        let z = a as u16 * 255;
112
0
        chunk[0] = UNPREMULTIPLICATION_TABLE[(z + chunk[0] as u16) as usize];
113
0
        chunk[1] = UNPREMULTIPLICATION_TABLE[(z + chunk[1] as u16) as usize];
114
0
        chunk[2] = UNPREMULTIPLICATION_TABLE[(z + chunk[2] as u16) as usize];
115
0
    }
116
0
}
117
118
/// Associate alpha in place
119
///
120
/// Note, for scaling alpha must be *associated*
121
///
122
/// # Arguments
123
///
124
/// * `in_place`: Slice to where premultiply
125
///
126
0
pub fn premultiply_la8(in_place: &mut [u8]) {
127
    // Almost all loops are not auto-vectorized without doing anything dirty.
128
    // So everywhere is just added something beautiful.
129
0
    for chunk in in_place.chunks_exact_mut(2) {
130
0
        let a = chunk[1] as u16;
131
0
        chunk[0] = div_by_255(chunk[0] as u16 * a);
132
0
        chunk[1] = div_by_255(255 * a);
133
0
    }
134
0
}
135
136
/// Associate alpha to a new destination
137
///
138
/// Faster if you need to do a copy first.
139
/// Note, for scaling alpha must be *associated*
140
///
141
/// # Arguments
142
///
143
/// * `source`: Source slice with LA data
144
///
145
0
pub fn premultiplied_la8(source: &[u8]) -> Vec<u8> {
146
0
    let mut target = vec![0u8; source.len()];
147
    // Almost all loops are not auto-vectorised without doing anything dirty.
148
    // So everywhere is just added something beautiful.
149
0
    for (dst, src) in target.chunks_exact_mut(2).zip(source.chunks_exact(2)) {
150
0
        let a = src[1] as u16;
151
0
        dst[0] = div_by_255(src[0] as u16 * a);
152
0
        dst[1] = div_by_255(255 * a);
153
0
    }
154
0
    target
155
0
}
156
157
/// Un premultiply alpha in place
158
///
159
/// Note, for scaling alpha must be *associated*
160
///
161
/// # Arguments
162
///
163
/// * `in_place`: Slice to work on
164
///
165
0
pub fn unpremultiply_la8(in_place: &mut [u8]) {
166
    // Almost all loops are not auto-vectorised without doing anything dirty.
167
    // So everywhere is just added something beautiful.
168
0
    for chunk in in_place.chunks_exact_mut(2) {
169
0
        let a = chunk[1];
170
0
        let z = a as u16 * 255;
171
0
        chunk[0] = UNPREMULTIPLICATION_TABLE[(z + chunk[0] as u16) as usize];
172
0
    }
173
0
}
174
175
/// Computes `round(v / (2**n - 1))`. The result is expected to fit in u16.
176
#[inline(always)]
177
0
fn div_by_2pn_m1(v: u32, n: u32) -> u16 {
178
0
    debug_assert!(n > 0 && n <= 16);
179
0
    let round = 1 << (n - 1);
180
0
    let v = v + round;
181
0
    (((v >> n) + v) >> n) as u16
182
0
}
183
184
#[inline]
185
0
fn div_by_1023(v: u32) -> u16 {
186
0
    div_by_2pn_m1(v, 10)
187
0
}
188
189
#[inline]
190
0
fn div_by_4095(v: u32) -> u16 {
191
0
    div_by_2pn_m1(v, 12)
192
0
}
193
194
#[inline]
195
0
fn div_by_65535(v: u32) -> u16 {
196
0
    div_by_2pn_m1(v, 16)
197
0
}
198
199
/// Associate alpha in place
200
///
201
/// Note, for scaling alpha must be *associated*
202
///
203
/// # Arguments
204
///
205
/// * `in_place`: Slice to where premultiply
206
/// * `bit_depth`: Bit-depth of the image
207
///
208
0
pub fn premultiply_rgba16(in_place: &mut [u16], bit_depth: u32) {
209
    // Almost all loops are not auto-vectorised without doing anything dirty.
210
    // So everywhere is just added something beautiful.
211
0
    assert!(bit_depth > 0 && bit_depth <= 16);
212
0
    let max_colors = (1 << bit_depth) - 1;
213
0
    if bit_depth == 10 {
214
0
        for chunk in in_place.chunks_exact_mut(4) {
215
0
            let a = chunk[3] as u32;
216
0
            chunk[0] = div_by_1023(chunk[0] as u32 * a);
217
0
            chunk[1] = div_by_1023(chunk[1] as u32 * a);
218
0
            chunk[2] = div_by_1023(chunk[2] as u32 * a);
219
0
            chunk[3] = div_by_1023(1023 * a);
220
0
        }
221
0
    } else if bit_depth == 12 {
222
0
        for chunk in in_place.chunks_exact_mut(4) {
223
0
            let a = chunk[3] as u32;
224
0
            chunk[0] = div_by_4095(chunk[0] as u32 * a);
225
0
            chunk[1] = div_by_4095(chunk[1] as u32 * a);
226
0
            chunk[2] = div_by_4095(chunk[2] as u32 * a);
227
0
            chunk[3] = div_by_4095(4095 * a);
228
0
        }
229
0
    } else if bit_depth == 16 {
230
0
        for chunk in in_place.chunks_exact_mut(4) {
231
0
            let a = chunk[3] as u32;
232
0
            chunk[0] = div_by_65535(chunk[0] as u32 * a);
233
0
            chunk[1] = div_by_65535(chunk[1] as u32 * a);
234
0
            chunk[2] = div_by_65535(chunk[2] as u32 * a);
235
0
            chunk[3] = div_by_65535(65535 * a);
236
0
        }
237
    } else {
238
0
        for chunk in in_place.chunks_exact_mut(4) {
239
0
            let a = chunk[3] as u32;
240
0
            chunk[0] = div_by_2pn_m1(chunk[0] as u32 * a, bit_depth);
241
0
            chunk[1] = div_by_2pn_m1(chunk[1] as u32 * a, bit_depth);
242
0
            chunk[2] = div_by_2pn_m1(chunk[2] as u32 * a, bit_depth);
243
0
            chunk[3] = div_by_2pn_m1(max_colors * a, bit_depth);
244
0
        }
245
    }
246
0
}
247
248
/// Associate alpha to a new destination
249
///
250
/// Faster, if you need to copy data first.
251
/// Note, for scaling alpha must be *associated*
252
///
253
/// # Arguments
254
///
255
/// * `source`: Source slice with RGBA16 data
256
/// * `bit_depth`: Bit-depth of the image
257
///
258
0
pub fn premultiplied_rgba16(source: &[u16], bit_depth: u32) -> Vec<u16> {
259
0
    let mut target = vec![0u16; source.len()];
260
    // Almost all loops are not auto-vectorised without doing anything dirty.
261
    // So everywhere is just added something beautiful.
262
0
    assert!(bit_depth > 0 && bit_depth <= 16);
263
0
    let max_colors = (1 << bit_depth) - 1;
264
0
    if bit_depth == 10 {
265
0
        for (dst, src) in target.chunks_exact_mut(4).zip(source.chunks_exact(4)) {
266
0
            let a = src[3] as u32;
267
0
            dst[0] = div_by_1023(src[0] as u32 * a);
268
0
            dst[1] = div_by_1023(src[1] as u32 * a);
269
0
            dst[2] = div_by_1023(src[2] as u32 * a);
270
0
            dst[3] = div_by_1023(1023 * a);
271
0
        }
272
0
    } else if bit_depth == 12 {
273
0
        for (dst, src) in target.chunks_exact_mut(4).zip(source.chunks_exact(4)) {
274
0
            let a = src[3] as u32;
275
0
            dst[0] = div_by_4095(src[0] as u32 * a);
276
0
            dst[1] = div_by_4095(src[1] as u32 * a);
277
0
            dst[2] = div_by_4095(src[2] as u32 * a);
278
0
            dst[3] = div_by_4095(4095 * a);
279
0
        }
280
0
    } else if bit_depth == 16 {
281
0
        for (dst, src) in target.chunks_exact_mut(4).zip(source.chunks_exact(4)) {
282
0
            let a = src[3] as u32;
283
0
            dst[0] = div_by_65535(src[0] as u32 * a);
284
0
            dst[1] = div_by_65535(src[1] as u32 * a);
285
0
            dst[2] = div_by_65535(src[2] as u32 * a);
286
0
            dst[3] = div_by_65535(65535 * a);
287
0
        }
288
    } else {
289
0
        for (dst, src) in target.chunks_exact_mut(4).zip(source.chunks_exact(4)) {
290
0
            let a = src[3] as u32;
291
0
            dst[0] = div_by_2pn_m1(src[0] as u32 * a, bit_depth);
292
0
            dst[1] = div_by_2pn_m1(src[1] as u32 * a, bit_depth);
293
0
            dst[2] = div_by_2pn_m1(src[2] as u32 * a, bit_depth);
294
0
            dst[3] = div_by_2pn_m1(max_colors * a, bit_depth);
295
0
        }
296
    }
297
0
    target
298
0
}
299
300
/// Associate alpha in place for up to 16 bit-depth image
301
///
302
/// Note, for scaling alpha must be *associated*
303
///
304
/// # Arguments
305
///
306
/// * `in_place`: Slice to where premultiply
307
/// * `bit_depth`: Bit-depth of the image
308
///
309
0
pub fn premultiply_la16(in_place: &mut [u16], bit_depth: u32) {
310
    // Almost all loops are not auto-vectorised without doing anything dirty.
311
    // So everywhere is just added something beautiful.
312
0
    assert!(bit_depth > 0 && bit_depth <= 16);
313
0
    let max_colors = (1 << bit_depth) - 1;
314
0
    if bit_depth == 10 {
315
0
        for chunk in in_place.chunks_exact_mut(2) {
316
0
            let a = chunk[1] as u32;
317
0
            chunk[0] = div_by_1023(chunk[0] as u32 * a);
318
0
            chunk[1] = div_by_1023(1023 * a);
319
0
        }
320
0
    } else if bit_depth == 12 {
321
0
        for chunk in in_place.chunks_exact_mut(2) {
322
0
            let a = chunk[1] as u32;
323
0
            chunk[0] = div_by_4095(chunk[0] as u32 * a);
324
0
            chunk[1] = div_by_4095(4095 * a);
325
0
        }
326
0
    } else if bit_depth == 16 {
327
0
        for chunk in in_place.chunks_exact_mut(2) {
328
0
            let a = chunk[1] as u32;
329
0
            chunk[0] = div_by_65535(chunk[0] as u32 * a);
330
0
            chunk[1] = div_by_65535(65535 * a);
331
0
        }
332
    } else {
333
0
        for chunk in in_place.chunks_exact_mut(2) {
334
0
            let a = chunk[1] as u32;
335
0
            chunk[0] = div_by_2pn_m1(chunk[0] as u32 * a, bit_depth);
336
0
            chunk[1] = div_by_2pn_m1(max_colors * a, bit_depth);
337
0
        }
338
    }
339
0
}
340
341
/// Associate alpha for up to 16 bit-depth image to a new destination
342
///
343
/// Faster, if you need to copy data first.
344
/// Note, for scaling alpha must be *associated*
345
///
346
/// # Arguments
347
///
348
/// * `source`: Slice with source LA16 data
349
/// * `bit_depth`: Bit-depth of the image
350
///
351
0
pub fn premultiplied_la16(source: &[u16], bit_depth: u32) -> Vec<u16> {
352
0
    let mut target = vec![0u16; source.len()];
353
    // Almost all loops are not auto-vectorised without doing anything dirty.
354
    // So everywhere is just added something beautiful.
355
0
    assert!(bit_depth > 0 && bit_depth <= 16);
356
0
    let max_colors = (1 << bit_depth) - 1;
357
0
    if bit_depth == 10 {
358
0
        for (dst, src) in target.chunks_exact_mut(2).zip(source.chunks_exact(2)) {
359
0
            let a = src[1] as u32;
360
0
            dst[0] = div_by_1023(src[0] as u32 * a);
361
0
            dst[1] = div_by_1023(1023 * a);
362
0
        }
363
0
    } else if bit_depth == 12 {
364
0
        for (dst, src) in target.chunks_exact_mut(2).zip(source.chunks_exact(2)) {
365
0
            let a = src[1] as u32;
366
0
            dst[0] = div_by_4095(src[0] as u32 * a);
367
0
            dst[1] = div_by_4095(4095 * a);
368
0
        }
369
0
    } else if bit_depth == 16 {
370
0
        for (dst, src) in target.chunks_exact_mut(2).zip(source.chunks_exact(2)) {
371
0
            let a = src[1] as u32;
372
0
            dst[0] = div_by_65535(src[0] as u32 * a);
373
0
            dst[1] = div_by_65535(65535 * a);
374
0
        }
375
    } else {
376
0
        for (dst, src) in target.chunks_exact_mut(2).zip(source.chunks_exact(2)) {
377
0
            let a = src[1] as u32;
378
0
            dst[0] = div_by_2pn_m1(src[0] as u32 * a, bit_depth);
379
0
            dst[1] = div_by_2pn_m1(max_colors * a, bit_depth);
380
0
        }
381
    }
382
0
    target
383
0
}
384
385
/// Un premultiply alpha in place for up to 16 bit-depth image
386
///
387
/// Note, for scaling alpha must be *associated*
388
///
389
/// # Arguments
390
///
391
/// * `in_place`: Slice to work on
392
/// * `bit_depth`: Bit-depth of the image
393
///
394
///
395
0
pub fn unpremultiply_la16(in_place: &mut [u16], bit_depth: u32) {
396
    // Almost all loops are not auto-vectorised without doing anything dirty.
397
    // So everywhere is just added something beautiful.
398
0
    assert!(bit_depth > 0 && bit_depth <= 16);
399
0
    let max_colors = (1 << bit_depth) - 1;
400
0
    for chunk in in_place.chunks_exact_mut(2) {
401
0
        let a = chunk[1] as u32;
402
0
        if a != 0 {
403
0
            let a_recip = max_colors as f32 / a as f32;
404
0
            chunk[0] = (chunk[0] as f32 * a_recip) as u16;
405
0
        }
406
    }
407
0
}
408
409
/// Un premultiply alpha in place
410
///
411
/// Note, for scaling alpha must be *associated*
412
///
413
/// # Arguments
414
///
415
/// * `in_place`: Slice to work on
416
/// * `bit_depth`: Bit-depth of the image
417
///
418
///
419
0
pub fn unpremultiply_rgba16(in_place: &mut [u16], bit_depth: u32) {
420
    // Almost all loops are not auto-vectorised without doing anything dirty.
421
    // So everywhere is just added something beautiful.
422
0
    assert!(bit_depth > 0 && bit_depth <= 16);
423
0
    let max_colors = (1 << bit_depth) - 1;
424
0
    for chunk in in_place.chunks_exact_mut(4) {
425
0
        let a = chunk[3] as u32;
426
0
        if a != 0 {
427
0
            let a_recip = max_colors as f32 / a as f32;
428
0
            chunk[0] = (chunk[0] as f32 * a_recip) as u16;
429
0
            chunk[1] = (chunk[1] as f32 * a_recip) as u16;
430
0
            chunk[2] = (chunk[2] as f32 * a_recip) as u16;
431
0
        }
432
    }
433
0
}
434
435
/// Associate alpha in place
436
///
437
/// Note, for scaling alpha must be *associated*
438
///
439
/// # Arguments
440
///
441
/// * `in_place`: Slice to where premultiply
442
///
443
0
pub fn premultiply_rgba_f32(in_place: &mut [f32]) {
444
    // Almost all loops are not auto-vectorised without doing anything dirty.
445
    // So everywhere is just added something beautiful.
446
0
    for chunk in in_place.chunks_exact_mut(4) {
447
0
        let a = chunk[3];
448
0
        chunk[0] *= a;
449
0
        chunk[1] *= a;
450
0
        chunk[2] *= a;
451
0
        chunk[3] = a;
452
0
    }
453
0
}
454
455
/// Associate alpha in place
456
///
457
/// Note, for scaling alpha must be *associated*
458
///
459
/// # Arguments
460
///
461
/// * `in_place`: Slice to where premultiply
462
///
463
0
pub fn premultiply_luma_alpha_f32(in_place: &mut [f32]) {
464
    // Almost all loops are not auto-vectorised without doing anything dirty.
465
    // So everywhere is just added something beautiful.
466
0
    for chunk in in_place.chunks_exact_mut(2) {
467
0
        let a = chunk[1];
468
0
        chunk[0] *= a;
469
0
        chunk[2] = a;
470
0
    }
471
0
}
472
473
/// Associate alpha to a new destination
474
///
475
/// Faster, if you need to do a copy first
476
/// Note, for scaling alpha must be *associated*
477
///
478
/// # Arguments
479
///
480
/// * `source`: Source slice with luma alpha
481
///
482
0
pub fn premultiplied_luma_alpha_f32(source: &[f32]) -> Vec<f32> {
483
0
    let mut target = vec![0.; source.len()];
484
    // Almost all loops are not auto-vectorised without doing anything dirty.
485
    // So everywhere is just added something beautiful.
486
0
    for (dst, src) in target.chunks_exact_mut(2).zip(source.chunks_exact(2)) {
487
0
        let a = src[2];
488
0
        dst[0] = src[0] * a;
489
0
        dst[1] = a;
490
0
    }
491
0
    target
492
0
}
493
494
/// Associate alpha to a new destination
495
///
496
/// Faster, if you need to do a copy first
497
/// Note, for scaling alpha must be *associated*
498
///
499
/// # Arguments
500
///
501
/// * `source`: Source rgba slice
502
///
503
0
pub fn premultiplied_rgba_f32(source: &[f32]) -> Vec<f32> {
504
0
    let mut target = vec![0.; source.len()];
505
    // Almost all loops are not auto-vectorised without doing anything dirty.
506
    // So everywhere is just added something beautiful.
507
0
    for (dst, src) in target.chunks_exact_mut(4).zip(source.chunks_exact(4)) {
508
0
        let a = src[3];
509
0
        dst[0] = src[0] * a;
510
0
        dst[1] = src[1] * a;
511
0
        dst[2] = src[2] * a;
512
0
        dst[3] = a;
513
0
    }
514
0
    target
515
0
}
516
517
/// Un-premultiply alpha in place
518
///
519
/// Note, for scaling alpha must be *associated*
520
///
521
/// # Arguments
522
///
523
/// * `in_place`: Slice to work on
524
///
525
0
pub fn unpremultiply_rgba_f32(in_place: &mut [f32]) {
526
0
    for chunk in in_place.chunks_exact_mut(4) {
527
0
        let a = chunk[3];
528
0
        if a != 0. {
529
0
            let a_recip = 1. / a;
530
0
            chunk[0] *= a_recip;
531
0
            chunk[1] *= a_recip;
532
0
            chunk[2] *= a_recip;
533
0
            chunk[3] = a;
534
0
        }
535
    }
536
0
}
537
538
/// Un-premultiply alpha in place
539
///
540
/// Note, for scaling alpha must be *associated*
541
///
542
/// # Arguments
543
///
544
/// * `in_place`: Slice to work on
545
///
546
0
pub fn unpremultiply_luma_alpha_f32(in_place: &mut [f32]) {
547
0
    for chunk in in_place.chunks_exact_mut(2) {
548
0
        let a = chunk[1];
549
0
        if a != 0. {
550
0
            let a_recip = 1. / a;
551
0
            chunk[0] *= a_recip;
552
0
            chunk[1] = a;
553
0
        }
554
    }
555
0
}