Coverage Report

Created: 2025-10-13 06:56

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/libsndfile/src/ALAC/matrix_enc.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2011 Apple Inc. All rights reserved.
3
 * Copyright (C) 2012-2014 Erik de Castro Lopo <erikd@mega-nerd.com>
4
 *
5
 * @APPLE_APACHE_LICENSE_HEADER_START@
6
 *
7
 * Licensed under the Apache License, Version 2.0 (the "License") ;
8
 * you may not use this file except in compliance with the License.
9
 * You may obtain a copy of the License at
10
 *
11
 *     http://www.apache.org/licenses/LICENSE-2.0
12
 *
13
 * Unless required by applicable law or agreed to in writing, software
14
 * distributed under the License is distributed on an "AS IS" BASIS,
15
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
 * See the License for the specific language governing permissions and
17
 * limitations under the License.
18
 *
19
 * @APPLE_APACHE_LICENSE_HEADER_END@
20
 */
21
22
/*
23
  File:   matrix_enc.c
24
25
  Contains: ALAC mixing/matrixing encode routines.
26
27
  Copyright:  (c) 2004-2011 Apple, Inc.
28
*/
29
30
#include "config.h"
31
32
#include "matrixlib.h"
33
#include "ALACAudioTypes.h"
34
35
/*
36
    There is no plain middle-side option ; instead there are various mixing
37
    modes including middle-side, each lossless, as embodied in the mix ()
38
    and unmix () functions.  These functions exploit a generalized middle-side
39
    transformation:
40
41
    u := [(rL + (m-r)R)/m] ;
42
    v := L - R ;
43
44
    where [ ] denotes integer floor.  The (lossless) inverse is
45
46
    L = u + v - [rV/m] ;
47
    R = L - v ;
48
*/
49
50
// 16-bit routines
51
52
void
53
mix16 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t mixbits, int32_t mixres)
54
0
{
55
0
  int32_t   j ;
56
57
0
  if (mixres != 0)
58
0
  {
59
0
    int32_t   mod = 1 << mixbits ;
60
0
    int32_t   m2 ;
61
62
    /* matrixed stereo */
63
0
    m2 = mod - mixres ;
64
0
    for (j = 0 ; j < numSamples ; j++)
65
0
    {
66
0
      int32_t   l, r ;
67
68
0
      l = in [0] >> 16 ;
69
0
      r = in [1] >> 16 ;
70
0
      in += stride ;
71
0
      u [j] = (mixres * l + m2 * r) >> mixbits ;
72
0
      v [j] = l - r ;
73
0
    }
74
0
  }
75
0
  else
76
0
  {
77
    /* Conventional separated stereo. */
78
0
    for (j = 0 ; j < numSamples ; j++)
79
0
    {
80
0
      u [j] = in [0] >> 16 ;
81
0
      v [j] = in [1] >> 16 ;
82
0
      in += stride ;
83
0
    }
84
0
  }
85
0
}
86
87
// 20-bit routines
88
// - the 20 bits of data are left-justified in 3 bytes of storage but right-aligned for input/output predictor buffers
89
90
void
91
mix20 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t mixbits, int32_t mixres)
92
0
{
93
0
  int32_t   l, r ;
94
0
  int32_t   j ;
95
96
0
  if (mixres != 0)
97
0
  {
98
    /* matrixed stereo */
99
0
    int32_t   mod = 1 << mixbits ;
100
0
    int32_t   m2 = mod - mixres ;
101
102
0
    for (j = 0 ; j < numSamples ; j++)
103
0
    {
104
0
      l = in [0] >> 12 ;
105
0
      r = in [1] >> 12 ;
106
0
      in += stride ;
107
108
0
      u [j] = (mixres * l + m2 * r) >> mixbits ;
109
0
      v [j] = l - r ;
110
0
    }
111
0
  }
112
0
  else
113
0
  {
114
    /* Conventional separated stereo. */
115
0
    for (j = 0 ; j < numSamples ; j++)
116
0
    {
117
0
      u [j] = in [0] >> 12 ;
118
0
      v [j] = in [1] >> 12 ;
119
0
      in += stride ;
120
0
    }
121
0
  }
122
0
}
123
124
// 24-bit routines
125
// - the 24 bits of data are right-justified in the input/output predictor buffers
126
127
void
128
mix24 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples,
129
      int32_t mixbits, int32_t mixres, uint16_t * shiftUV, int32_t bytesShifted)
130
0
{
131
0
  int32_t   l, r ;
132
0
  int32_t   shift = bytesShifted * 8 ;
133
0
  uint32_t  mask = (1ul << shift) - 1 ;
134
0
  int32_t   j, k ;
135
136
0
  if (mixres != 0)
137
0
  {
138
    /* matrixed stereo */
139
0
    int32_t   mod = 1 << mixbits ;
140
0
    int32_t   m2 = mod - mixres ;
141
142
0
    if (bytesShifted != 0)
143
0
    {
144
0
      for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
145
0
      {
146
0
        l = in [0] >> 8 ;
147
0
        r = in [1] >> 8 ;
148
0
        in += stride ;
149
150
0
        shiftUV [k + 0] = (uint16_t) (l & mask) ;
151
0
        shiftUV [k + 1] = (uint16_t) (r & mask) ;
152
153
0
        l >>= shift ;
154
0
        r >>= shift ;
155
156
0
        u [j] = (mixres * l + m2 * r) >> mixbits ;
157
0
        v [j] = l - r ;
158
0
      }
159
0
    }
160
0
    else
161
0
    {
162
0
      for (j = 0 ; j < numSamples ; j++)
163
0
      {
164
0
        l = in [0] >> 8 ;
165
0
        r = in [1] >> 8 ;
166
0
        in += stride ;
167
168
0
        u [j] = (mixres * l + m2 * r) >> mixbits ;
169
0
        v [j] = l - r ;
170
0
      }
171
0
    }
172
0
  }
173
0
  else
174
0
  {
175
    /* Conventional separated stereo. */
176
0
    if (bytesShifted != 0)
177
0
    {
178
0
      for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
179
0
      {
180
0
        l = in [0] >> 8 ;
181
0
        r = in [1] >> 8 ;
182
0
        in += stride ;
183
184
0
        shiftUV [k + 0] = (uint16_t) (l & mask) ;
185
0
        shiftUV [k + 1] = (uint16_t) (r & mask) ;
186
187
0
        l >>= shift ;
188
0
        r >>= shift ;
189
190
0
        u [j] = l ;
191
0
        v [j] = r ;
192
0
      }
193
0
    }
194
0
    else
195
0
    {
196
0
      for (j = 0 ; j < numSamples ; j++)
197
0
      {
198
0
        l = in [0] >> 8 ;
199
0
        r = in [1] >> 8 ;
200
0
        in += stride ;
201
0
      }
202
0
    }
203
0
  }
204
0
}
205
206
// 32-bit routines
207
// - note that these really expect the internal data width to be < 32 but the arrays are 32-bit
208
// - otherwise, the calculations might overflow into the 33rd bit and be lost
209
// - therefore, these routines deal with the specified "unused lower" bytes in the "shift" buffers
210
211
void
212
mix32 (const int32_t * in, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples,
213
      int32_t mixbits, int32_t mixres, uint16_t * shiftUV, int32_t bytesShifted)
214
0
{
215
0
  int32_t   shift = bytesShifted * 8 ;
216
0
  uint32_t  mask = (1ul << shift) - 1 ;
217
0
  int32_t   l, r ;
218
0
  int32_t   j, k ;
219
220
0
  if (mixres != 0)
221
0
  {
222
0
    int32_t   mod = 1 << mixbits ;
223
0
    int32_t   m2 ;
224
225
    //Assert (bytesShifted != 0) ;
226
227
    /* matrixed stereo with shift */
228
0
    m2 = mod - mixres ;
229
0
    for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
230
0
    {
231
0
      l = in [0] ;
232
0
      r = in [1] ;
233
0
      in += stride ;
234
235
0
      shiftUV [k + 0] = (uint16_t) (l & mask) ;
236
0
      shiftUV [k + 1] = (uint16_t) (r & mask) ;
237
238
0
      l >>= shift ;
239
0
      r >>= shift ;
240
241
0
      u [j] = (mixres * l + m2 * r) >> mixbits ;
242
0
      v [j] = l - r ;
243
0
    }
244
0
  }
245
0
  else
246
0
  {
247
0
    if (bytesShifted == 0)
248
0
    {
249
      /* de-interleaving w/o shift */
250
0
      for (j = 0 ; j < numSamples ; j++)
251
0
      {
252
0
        u [j] = in [0] ;
253
0
        v [j] = in [1] ;
254
0
        in += stride ;
255
0
      }
256
0
    }
257
0
    else
258
0
    {
259
      /* de-interleaving with shift */
260
0
      for (j = 0, k = 0 ; j < numSamples ; j++, k += 2)
261
0
      {
262
0
        l = in [0] ;
263
0
        r = in [1] ;
264
0
        in += stride ;
265
266
0
        shiftUV [k + 0] = (uint16_t) (l & mask) ;
267
0
        shiftUV [k + 1] = (uint16_t) (r & mask) ;
268
269
0
        l >>= shift ;
270
0
        r >>= shift ;
271
272
0
        u [j] = l ;
273
0
        v [j] = r ;
274
0
      }
275
0
    }
276
0
  }
277
0
}