Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/dom/media/webaudio/AnalyserNode.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim:set ts=2 sw=2 sts=2 et cindent: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "mozilla/dom/AnalyserNode.h"
8
#include "mozilla/dom/AnalyserNodeBinding.h"
9
#include "AudioNodeEngine.h"
10
#include "AudioNodeStream.h"
11
#include "mozilla/Mutex.h"
12
#include "mozilla/PodOperations.h"
13
14
namespace mozilla {
15
16
static const uint32_t MAX_FFT_SIZE = 32768;
17
static const size_t CHUNK_COUNT = MAX_FFT_SIZE >> WEBAUDIO_BLOCK_SIZE_BITS;
18
static_assert(MAX_FFT_SIZE == CHUNK_COUNT * WEBAUDIO_BLOCK_SIZE,
19
              "MAX_FFT_SIZE must be a multiple of WEBAUDIO_BLOCK_SIZE");
20
static_assert((CHUNK_COUNT & (CHUNK_COUNT - 1)) == 0,
21
              "CHUNK_COUNT must be power of 2 for remainder behavior");
22
23
namespace dom {
24
25
class AnalyserNodeEngine final : public AudioNodeEngine
26
{
27
  class TransferBuffer final : public Runnable
28
  {
29
  public:
30
    TransferBuffer(AudioNodeStream* aStream, const AudioChunk& aChunk)
31
      : Runnable("dom::AnalyserNodeEngine::TransferBuffer")
32
      , mStream(aStream)
33
      , mChunk(aChunk)
34
0
    {
35
0
    }
36
37
    NS_IMETHOD Run() override
38
0
    {
39
0
      RefPtr<AnalyserNode> node =
40
0
        static_cast<AnalyserNode*>(mStream->Engine()->NodeMainThread());
41
0
      if (node) {
42
0
        node->AppendChunk(mChunk);
43
0
      }
44
0
      return NS_OK;
45
0
    }
46
47
  private:
48
    RefPtr<AudioNodeStream> mStream;
49
    AudioChunk mChunk;
50
  };
51
52
public:
53
  explicit AnalyserNodeEngine(AnalyserNode* aNode)
54
    : AudioNodeEngine(aNode)
55
0
  {
56
0
    MOZ_ASSERT(NS_IsMainThread());
57
0
  }
58
59
  virtual void ProcessBlock(AudioNodeStream* aStream,
60
                            GraphTime aFrom,
61
                            const AudioBlock& aInput,
62
                            AudioBlock* aOutput,
63
                            bool* aFinished) override
64
0
  {
65
0
    *aOutput = aInput;
66
0
67
0
    if (aInput.IsNull()) {
68
0
      // If AnalyserNode::mChunks has only null chunks, then there is no need
69
0
      // to send further null chunks.
70
0
      if (mChunksToProcess == 0) {
71
0
        return;
72
0
      }
73
0
74
0
      --mChunksToProcess;
75
0
      if (mChunksToProcess == 0) {
76
0
        aStream->ScheduleCheckForInactive();
77
0
      }
78
0
79
0
    } else {
80
0
      // This many null chunks will be required to empty AnalyserNode::mChunks.
81
0
      mChunksToProcess = CHUNK_COUNT;
82
0
    }
83
0
84
0
    RefPtr<TransferBuffer> transfer =
85
0
      new TransferBuffer(aStream, aInput.AsAudioChunk());
86
0
    mAbstractMainThread->Dispatch(transfer.forget());
87
0
  }
88
89
  virtual bool IsActive() const override
90
0
  {
91
0
    return mChunksToProcess != 0;
92
0
  }
93
94
  virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
95
0
  {
96
0
    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
97
0
  }
98
99
  uint32_t mChunksToProcess = 0;
100
};
101
102
/* static */ already_AddRefed<AnalyserNode>
103
AnalyserNode::Create(AudioContext& aAudioContext,
104
                     const AnalyserOptions& aOptions,
105
                     ErrorResult& aRv)
106
0
{
107
0
  if (aAudioContext.CheckClosed(aRv)) {
108
0
    return nullptr;
109
0
  }
110
0
111
0
  RefPtr<AnalyserNode> analyserNode = new AnalyserNode(&aAudioContext);
112
0
113
0
  analyserNode->Initialize(aOptions, aRv);
114
0
  if (NS_WARN_IF(aRv.Failed())) {
115
0
    return nullptr;
116
0
  }
117
0
118
0
  analyserNode->SetFftSize(aOptions.mFftSize, aRv);
119
0
  if (NS_WARN_IF(aRv.Failed())) {
120
0
    return nullptr;
121
0
  }
122
0
123
0
  analyserNode->SetMinAndMaxDecibels(aOptions.mMinDecibels,
124
0
                                     aOptions.mMaxDecibels,
125
0
                                     aRv);
126
0
  if (NS_WARN_IF(aRv.Failed())) {
127
0
    return nullptr;
128
0
  }
129
0
130
0
  analyserNode->SetSmoothingTimeConstant(aOptions.mSmoothingTimeConstant, aRv);
131
0
  if (NS_WARN_IF(aRv.Failed())) {
132
0
    return nullptr;
133
0
  }
134
0
135
0
  return analyserNode.forget();
136
0
}
137
138
AnalyserNode::AnalyserNode(AudioContext* aContext)
139
  : AudioNode(aContext,
140
              1,
141
              ChannelCountMode::Max,
142
              ChannelInterpretation::Speakers)
143
  , mAnalysisBlock(2048)
144
  , mMinDecibels(-100.)
145
  , mMaxDecibels(-30.)
146
  , mSmoothingTimeConstant(.8)
147
0
{
148
0
  mStream = AudioNodeStream::Create(aContext,
149
0
                                    new AnalyserNodeEngine(this),
150
0
                                    AudioNodeStream::NO_STREAM_FLAGS,
151
0
                                    aContext->Graph());
152
0
153
0
  // Enough chunks must be recorded to handle the case of fftSize being
154
0
  // increased to maximum immediately before getFloatTimeDomainData() is
155
0
  // called, for example.
156
0
  Unused << mChunks.SetLength(CHUNK_COUNT, fallible);
157
0
158
0
  AllocateBuffer();
159
0
}
160
161
size_t
162
AnalyserNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
163
0
{
164
0
  size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
165
0
  amount += mAnalysisBlock.SizeOfExcludingThis(aMallocSizeOf);
166
0
  amount += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
167
0
  amount += mOutputBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf);
168
0
  return amount;
169
0
}
170
171
size_t
172
AnalyserNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
173
0
{
174
0
  return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
175
0
}
176
177
JSObject*
178
AnalyserNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
179
0
{
180
0
  return AnalyserNode_Binding::Wrap(aCx, this, aGivenProto);
181
0
}
182
183
void
184
AnalyserNode::SetFftSize(uint32_t aValue, ErrorResult& aRv)
185
0
{
186
0
  // Disallow values that are not a power of 2 and outside the [32,32768] range
187
0
  if (aValue < 32 ||
188
0
      aValue > MAX_FFT_SIZE ||
189
0
      (aValue & (aValue - 1)) != 0) {
190
0
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
191
0
    return;
192
0
  }
193
0
  if (FftSize() != aValue) {
194
0
    mAnalysisBlock.SetFFTSize(aValue);
195
0
    AllocateBuffer();
196
0
  }
197
0
}
198
199
void
200
AnalyserNode::SetMinDecibels(double aValue, ErrorResult& aRv)
201
0
{
202
0
  if (aValue >= mMaxDecibels) {
203
0
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
204
0
    return;
205
0
  }
206
0
  mMinDecibels = aValue;
207
0
}
208
209
void
210
AnalyserNode::SetMaxDecibels(double aValue, ErrorResult& aRv)
211
0
{
212
0
  if (aValue <= mMinDecibels) {
213
0
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
214
0
    return;
215
0
  }
216
0
  mMaxDecibels = aValue;
217
0
}
218
219
void
220
AnalyserNode::SetMinAndMaxDecibels(double aMinValue, double aMaxValue, ErrorResult& aRv)
221
0
{
222
0
  if (aMinValue >= aMaxValue) {
223
0
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
224
0
    return;
225
0
  }
226
0
  mMinDecibels = aMinValue;
227
0
  mMaxDecibels = aMaxValue;
228
0
}
229
230
void
231
AnalyserNode::SetSmoothingTimeConstant(double aValue, ErrorResult& aRv)
232
0
{
233
0
  if (aValue < 0 || aValue > 1) {
234
0
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
235
0
    return;
236
0
  }
237
0
  mSmoothingTimeConstant = aValue;
238
0
}
239
240
void
241
AnalyserNode::GetFloatFrequencyData(const Float32Array& aArray)
242
0
{
243
0
  if (!FFTAnalysis()) {
244
0
    // Might fail to allocate memory
245
0
    return;
246
0
  }
247
0
248
0
  aArray.ComputeLengthAndData();
249
0
250
0
  float* buffer = aArray.Data();
251
0
  size_t length = std::min(size_t(aArray.Length()), mOutputBuffer.Length());
252
0
253
0
  for (size_t i = 0; i < length; ++i) {
254
0
    buffer[i] =
255
0
      WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i],
256
0
                                             -std::numeric_limits<float>::infinity());
257
0
  }
258
0
}
259
260
void
261
AnalyserNode::GetByteFrequencyData(const Uint8Array& aArray)
262
0
{
263
0
  if (!FFTAnalysis()) {
264
0
    // Might fail to allocate memory
265
0
    return;
266
0
  }
267
0
268
0
  const double rangeScaleFactor = 1.0 / (mMaxDecibels - mMinDecibels);
269
0
270
0
  aArray.ComputeLengthAndData();
271
0
272
0
  unsigned char* buffer = aArray.Data();
273
0
  size_t length = std::min(size_t(aArray.Length()), mOutputBuffer.Length());
274
0
275
0
  for (size_t i = 0; i < length; ++i) {
276
0
    const double decibels = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels);
277
0
    // scale down the value to the range of [0, UCHAR_MAX]
278
0
    const double scaled = std::max(0.0, std::min(double(UCHAR_MAX),
279
0
                                                 UCHAR_MAX * (decibels - mMinDecibels) * rangeScaleFactor));
280
0
    buffer[i] = static_cast<unsigned char>(scaled);
281
0
  }
282
0
}
283
284
void
285
AnalyserNode::GetFloatTimeDomainData(const Float32Array& aArray)
286
0
{
287
0
  aArray.ComputeLengthAndData();
288
0
289
0
  float* buffer = aArray.Data();
290
0
  size_t length = std::min(aArray.Length(), FftSize());
291
0
292
0
  GetTimeDomainData(buffer, length);
293
0
}
294
295
void
296
AnalyserNode::GetByteTimeDomainData(const Uint8Array& aArray)
297
0
{
298
0
  aArray.ComputeLengthAndData();
299
0
300
0
  size_t length = std::min(aArray.Length(), FftSize());
301
0
302
0
  AlignedTArray<float> tmpBuffer;
303
0
  if (!tmpBuffer.SetLength(length, fallible)) {
304
0
    return;
305
0
  }
306
0
307
0
  GetTimeDomainData(tmpBuffer.Elements(), length);
308
0
309
0
  unsigned char* buffer = aArray.Data();
310
0
  for (size_t i = 0; i < length; ++i) {
311
0
    const float value = tmpBuffer[i];
312
0
    // scale the value to the range of [0, UCHAR_MAX]
313
0
    const float scaled = std::max(0.0f, std::min(float(UCHAR_MAX),
314
0
                                                 128.0f * (value + 1.0f)));
315
0
    buffer[i] = static_cast<unsigned char>(scaled);
316
0
  }
317
0
}
318
319
bool
320
AnalyserNode::FFTAnalysis()
321
0
{
322
0
  AlignedTArray<float> tmpBuffer;
323
0
  size_t fftSize = FftSize();
324
0
  if (!tmpBuffer.SetLength(fftSize, fallible)) {
325
0
    return false;
326
0
  }
327
0
328
0
  float* inputBuffer = tmpBuffer.Elements();
329
0
  GetTimeDomainData(inputBuffer, fftSize);
330
0
  ApplyBlackmanWindow(inputBuffer, fftSize);
331
0
  mAnalysisBlock.PerformFFT(inputBuffer);
332
0
333
0
  // Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor).
334
0
  const double magnitudeScale = 1.0 / fftSize;
335
0
336
0
  for (uint32_t i = 0; i < mOutputBuffer.Length(); ++i) {
337
0
    double scalarMagnitude = NS_hypot(mAnalysisBlock.RealData(i),
338
0
                                      mAnalysisBlock.ImagData(i)) *
339
0
                             magnitudeScale;
340
0
    mOutputBuffer[i] = mSmoothingTimeConstant * mOutputBuffer[i] +
341
0
                       (1.0 - mSmoothingTimeConstant) * scalarMagnitude;
342
0
  }
343
0
344
0
  return true;
345
0
}
346
347
void
348
AnalyserNode::ApplyBlackmanWindow(float* aBuffer, uint32_t aSize)
349
0
{
350
0
  double alpha = 0.16;
351
0
  double a0 = 0.5 * (1.0 - alpha);
352
0
  double a1 = 0.5;
353
0
  double a2 = 0.5 * alpha;
354
0
355
0
  for (uint32_t i = 0; i < aSize; ++i) {
356
0
    double x = double(i) / aSize;
357
0
    double window = a0 - a1 * cos(2 * M_PI * x) + a2 * cos(4 * M_PI * x);
358
0
    aBuffer[i] *= window;
359
0
  }
360
0
}
361
362
bool
363
AnalyserNode::AllocateBuffer()
364
0
{
365
0
  bool result = true;
366
0
  if (mOutputBuffer.Length() != FrequencyBinCount()) {
367
0
    if (!mOutputBuffer.SetLength(FrequencyBinCount(), fallible)) {
368
0
      return false;
369
0
    }
370
0
    memset(mOutputBuffer.Elements(), 0, sizeof(float) * FrequencyBinCount());
371
0
  }
372
0
  return result;
373
0
}
374
375
void
376
AnalyserNode::AppendChunk(const AudioChunk& aChunk)
377
0
{
378
0
  if (mChunks.Length() == 0) {
379
0
    return;
380
0
  }
381
0
382
0
  ++mCurrentChunk;
383
0
  mChunks[mCurrentChunk & (CHUNK_COUNT - 1)] = aChunk;
384
0
}
385
386
// Reads into aData the oldest aLength samples of the fftSize most recent
387
// samples.
388
void
389
AnalyserNode::GetTimeDomainData(float* aData, size_t aLength)
390
0
{
391
0
  size_t fftSize = FftSize();
392
0
  MOZ_ASSERT(aLength <= fftSize);
393
0
394
0
  if (mChunks.Length() == 0) {
395
0
    PodZero(aData, aLength);
396
0
    return;
397
0
  }
398
0
399
0
  size_t readChunk =
400
0
    mCurrentChunk - ((fftSize - 1) >> WEBAUDIO_BLOCK_SIZE_BITS);
401
0
  size_t readIndex = (0 - fftSize) & (WEBAUDIO_BLOCK_SIZE - 1);
402
0
  MOZ_ASSERT(readIndex == 0 || readIndex + fftSize == WEBAUDIO_BLOCK_SIZE);
403
0
404
0
  for (size_t writeIndex = 0; writeIndex < aLength; ) {
405
0
    const AudioChunk& chunk = mChunks[readChunk & (CHUNK_COUNT - 1)];
406
0
    const size_t channelCount = chunk.ChannelCount();
407
0
    size_t copyLength =
408
0
      std::min<size_t>(aLength - writeIndex, WEBAUDIO_BLOCK_SIZE);
409
0
    float* dataOut = &aData[writeIndex];
410
0
411
0
    if (channelCount == 0) {
412
0
      PodZero(dataOut, copyLength);
413
0
    } else {
414
0
      float scale = chunk.mVolume / channelCount;
415
0
      { // channel 0
416
0
        auto channelData =
417
0
          static_cast<const float*>(chunk.mChannelData[0]) + readIndex;
418
0
        AudioBufferCopyWithScale(channelData, scale, dataOut, copyLength);
419
0
      }
420
0
      for (uint32_t i = 1; i < channelCount; ++i) {
421
0
        auto channelData =
422
0
          static_cast<const float*>(chunk.mChannelData[i]) + readIndex;
423
0
        AudioBufferAddWithScale(channelData, scale, dataOut, copyLength);
424
0
      }
425
0
    }
426
0
427
0
    readChunk++;
428
0
    writeIndex += copyLength;
429
0
  }
430
0
}
431
432
} // namespace dom
433
} // namespace mozilla
434