Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/dom/media/webaudio/ConvolverNode.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim:set ts=2 sw=2 sts=2 et cindent: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "ConvolverNode.h"
8
#include "mozilla/dom/ConvolverNodeBinding.h"
9
#include "nsAutoPtr.h"
10
#include "AlignmentUtils.h"
11
#include "AudioNodeEngine.h"
12
#include "AudioNodeStream.h"
13
#include "blink/Reverb.h"
14
#include "PlayingRefChangeHandler.h"
15
16
namespace mozilla {
17
namespace dom {
18
19
NS_IMPL_CYCLE_COLLECTION_INHERITED(ConvolverNode, AudioNode, mBuffer)
20
21
0
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(ConvolverNode)
22
0
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
23
24
NS_IMPL_ADDREF_INHERITED(ConvolverNode, AudioNode)
25
NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode)
26
27
class ConvolverNodeEngine final : public AudioNodeEngine
28
{
29
  typedef PlayingRefChangeHandler PlayingRefChanged;
30
public:
31
  ConvolverNodeEngine(AudioNode* aNode, bool aNormalize)
32
    : AudioNodeEngine(aNode)
33
    , mUseBackgroundThreads(!aNode->Context()->IsOffline())
34
    , mNormalize(aNormalize)
35
0
  {
36
0
  }
37
38
  // Indicates how the right output channel is generated.
39
  enum class RightConvolverMode {
40
    // A right convolver is always used when there is more than one impulse
41
    // response channel.
42
    Always,
43
    // With a single response channel, the mode may be either Direct or
44
    // Difference.  The decision on which to use is made when stereo input is
45
    // received.  Once the right convolver is in use, convolver state is
46
    // suitable only for the selected mode, and so the mode cannot change
47
    // until the right convolver contains only silent history.
48
    //
49
    // With Direct mode, each convolver processes a corresponding channel.
50
    // This mode is selected when input is initially stereo or
51
    // channelInterpretation is "discrete" at the time or starting the right
52
    // convolver when input changes from non-silent mono to stereo.
53
    Direct,
54
    // Difference mode is selected if channelInterpretation is "speakers" at
55
    // the time starting the right convolver when the input changes from mono
56
    // to stereo.
57
    //
58
    // When non-silent input is initially mono, with a single response
59
    // channel, the right output channel is not produced until input becomes
60
    // stereo.  Only a single convolver is used for mono processing.  When
61
    // stereo input arrives after mono input, output must be as if the mono
62
    // signal remaining in the left convolver is up-mixed, but the right
63
    // convolver has not been initialized with the history of the mono input.
64
    // Copying the state of the left convolver into the right convolver is not
65
    // desirable, because there is considerable state to copy, and the
66
    // different convolvers are intended to process out of phase, which means
67
    // that state from one convolver would not directly map to state in
68
    // another convolver.
69
    //
70
    // Instead the distributive property of convolution is used to generate
71
    // the right output channel using information in the left output channel.
72
    // Using l and r to denote the left and right channel input signals, g the
73
    // impulse response, and * convolution, the convolution of the right
74
    // channel can be given by
75
    //
76
    //   r * g = (l + (r - l)) * g
77
    //         = l * g + (r - l) * g
78
    //
79
    // The left convolver continues to process the left channel l to produce
80
    // l * g.  The right convolver processes the difference of input channel
81
    // signals r - l to produce (r - l) * g.  The outputs of the two
82
    // convolvers are added to generate the right channel output r * g.
83
    //
84
    // The benefit of doing this is that the history of the r - l input for a
85
    // "speakers" up-mixed mono signal is zero, and so an empty convolver
86
    // already has exactly the right history for mixing the previous mono
87
    // signal with the new stereo signal.
88
    Difference
89
  };
90
91
  enum Parameters {
92
    SAMPLE_RATE,
93
    NORMALIZE
94
  };
95
  void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
96
0
  {
97
0
    switch (aIndex) {
98
0
    case NORMALIZE:
99
0
      mNormalize = !!aParam;
100
0
      break;
101
0
    default:
102
0
      NS_ERROR("Bad ConvolverNodeEngine Int32Parameter");
103
0
    }
104
0
  }
105
  void SetDoubleParameter(uint32_t aIndex, double aParam) override
106
0
  {
107
0
    switch (aIndex) {
108
0
    case SAMPLE_RATE:
109
0
      mSampleRate = aParam;
110
0
      // The buffer is passed after the sample rate.
111
0
      // mReverb will be set using this sample rate when the buffer is received.
112
0
      break;
113
0
    default:
114
0
      NS_ERROR("Bad ConvolverNodeEngine DoubleParameter");
115
0
    }
116
0
  }
117
  void SetBuffer(AudioChunk&& aBuffer) override
118
0
  {
119
0
    // Note about empirical tuning (this is copied from Blink)
120
0
    // The maximum FFT size affects reverb performance and accuracy.
121
0
    // If the reverb is single-threaded and processes entirely in the real-time audio thread,
122
0
    // it's important not to make this too high.  In this case 8192 is a good value.
123
0
    // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy.
124
0
    // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise.
125
0
    const size_t MaxFFTSize = 32768;
126
0
127
0
    // Reset.
128
0
    mRemainingLeftOutput = INT32_MIN;
129
0
    mRemainingRightOutput = 0;
130
0
    mRemainingRightHistory = 0;
131
0
132
0
    if (aBuffer.IsNull() || !mSampleRate) {
133
0
      mReverb = nullptr;
134
0
      return;
135
0
    }
136
0
137
0
    // Assume for now that convolution of channel difference is not required.
138
0
    // Direct may change to Difference during processing.
139
0
    mRightConvolverMode =
140
0
      aBuffer.ChannelCount() == 1 ? RightConvolverMode::Direct
141
0
      : RightConvolverMode::Always;
142
0
143
0
    mReverb = new WebCore::Reverb(aBuffer, MaxFFTSize, mUseBackgroundThreads,
144
0
                                  mNormalize, mSampleRate);
145
0
  }
146
147
  void AllocateReverbInput(const AudioBlock& aInput,
148
                           uint32_t aTotalChannelCount)
149
0
  {
150
0
    uint32_t inputChannelCount = aInput.ChannelCount();
151
0
    MOZ_ASSERT(inputChannelCount <= aTotalChannelCount);
152
0
    mReverbInput.AllocateChannels(aTotalChannelCount);
153
0
    // Pre-multiply the input's volume
154
0
    for (uint32_t i = 0; i < inputChannelCount; ++i) {
155
0
      const float* src = static_cast<const float*>(aInput.mChannelData[i]);
156
0
      float* dest = mReverbInput.ChannelFloatsForWrite(i);
157
0
      AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest);
158
0
    }
159
0
    // Fill remaining channels with silence
160
0
    for (uint32_t i = inputChannelCount; i < aTotalChannelCount; ++i) {
161
0
      float* dest = mReverbInput.ChannelFloatsForWrite(i);
162
0
      std::fill_n(dest, WEBAUDIO_BLOCK_SIZE, 0.0f);
163
0
    }
164
0
  }
165
166
  void ProcessBlock(AudioNodeStream* aStream,
167
                    GraphTime aFrom,
168
                    const AudioBlock& aInput,
169
                    AudioBlock* aOutput,
170
                    bool* aFinished) override;
171
172
  bool IsActive() const override
173
0
  {
174
0
    return mRemainingLeftOutput != INT32_MIN;
175
0
  }
176
177
  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
178
0
  {
179
0
    size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
180
0
181
0
    amount += mReverbInput.SizeOfExcludingThis(aMallocSizeOf, false);
182
0
183
0
    if (mReverb) {
184
0
      amount += mReverb->sizeOfIncludingThis(aMallocSizeOf);
185
0
    }
186
0
187
0
    return amount;
188
0
  }
189
190
  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
191
0
  {
192
0
    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
193
0
  }
194
195
private:
196
  // Keeping mReverbInput across process calls avoids unnecessary reallocation.
197
  AudioBlock mReverbInput;
198
  nsAutoPtr<WebCore::Reverb> mReverb;
199
  // Tracks samples of the tail remaining to be output.  INT32_MIN is a
200
  // special value to indicate that the end of any previous tail has been
201
  // handled.
202
  int32_t mRemainingLeftOutput = INT32_MIN;
203
  // mRemainingRightOutput and mRemainingRightHistory are only used when
204
  // mRightOutputMode != Always.  There is no special handling required at the
205
  // end of tail times and so INT32_MIN is not used.
206
  // mRemainingRightOutput tracks how much longer this node needs to continue
207
  // to produce a right output channel.
208
  int32_t mRemainingRightOutput = 0;
209
  // mRemainingRightHistory tracks how much silent input would be required to
210
  // drain the right convolver, which may sometimes be longer than the period
211
  // a right output channel is required.
212
  int32_t mRemainingRightHistory = 0;
213
  float mSampleRate = 0.0f;
214
  RightConvolverMode mRightConvolverMode = RightConvolverMode::Always;
215
  bool mUseBackgroundThreads;
216
  bool mNormalize;
217
};
218
219
static void
220
AddScaledLeftToRight(AudioBlock* aBlock, float aScale)
221
0
{
222
0
  const float* left = static_cast<const float*>(aBlock->mChannelData[0]);
223
0
  float* right = aBlock->ChannelFloatsForWrite(1);
224
0
  AudioBlockAddChannelWithScale(left, aScale, right);
225
0
}
226
227
void
228
ConvolverNodeEngine::ProcessBlock(AudioNodeStream* aStream,
229
                                  GraphTime aFrom,
230
                                  const AudioBlock& aInput,
231
                                  AudioBlock* aOutput,
232
                                  bool* aFinished)
233
0
{
234
0
  if (!mReverb) {
235
0
    aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
236
0
    return;
237
0
  }
238
0
239
0
  uint32_t inputChannelCount = aInput.ChannelCount();
240
0
  if (aInput.IsNull()) {
241
0
    if (mRemainingLeftOutput > 0) {
242
0
      mRemainingLeftOutput -= WEBAUDIO_BLOCK_SIZE;
243
0
      AllocateReverbInput(aInput, 1); // floats for silence
244
0
    } else {
245
0
      if (mRemainingLeftOutput != INT32_MIN) {
246
0
        mRemainingLeftOutput = INT32_MIN;
247
0
        MOZ_ASSERT(mRemainingRightOutput <= 0);
248
0
        MOZ_ASSERT(mRemainingRightHistory <= 0);
249
0
        aStream->ScheduleCheckForInactive();
250
0
        RefPtr<PlayingRefChanged> refchanged =
251
0
          new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE);
252
0
        aStream->Graph()->
253
0
          DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
254
0
      }
255
0
      aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
256
0
      return;
257
0
    }
258
0
  } else {
259
0
    if (mRemainingLeftOutput <= 0) {
260
0
      RefPtr<PlayingRefChanged> refchanged =
261
0
        new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF);
262
0
      aStream->Graph()->
263
0
        DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget());
264
0
    }
265
0
266
0
    // Use mVolume as a flag to detect whether AllocateReverbInput() gets
267
0
    // called.
268
0
    mReverbInput.mVolume = 0.0f;
269
0
270
0
    // Special handling of input channel count changes is used when there is
271
0
    // only a single impulse response channel.  See RightConvolverMode.
272
0
    if (mRightConvolverMode != RightConvolverMode::Always) {
273
0
      ChannelInterpretation channelInterpretation =
274
0
        aStream->GetChannelInterpretation();
275
0
      if (inputChannelCount == 2) {
276
0
        if (mRemainingRightHistory <= 0) {
277
0
          // Will start the second convolver.  Choose to convolve the right
278
0
          // channel directly if there is no left tail to up-mix or up-mixing
279
0
          // is "discrete".
280
0
          mRightConvolverMode =
281
0
            (mRemainingLeftOutput <= 0 ||
282
0
             channelInterpretation == ChannelInterpretation::Discrete) ?
283
0
            RightConvolverMode::Direct : RightConvolverMode::Difference;
284
0
        }
285
0
        // The extra WEBAUDIO_BLOCK_SIZE is subtracted below.
286
0
        mRemainingRightOutput =
287
0
          mReverb->impulseResponseLength() + WEBAUDIO_BLOCK_SIZE;
288
0
        mRemainingRightHistory = mRemainingRightOutput;
289
0
        if (mRightConvolverMode == RightConvolverMode::Difference) {
290
0
          AllocateReverbInput(aInput, 2);
291
0
          // Subtract left from right.
292
0
          AddScaledLeftToRight(&mReverbInput, -1.0f);
293
0
        }
294
0
      } else if (mRemainingRightHistory > 0) {
295
0
        // There is one channel of input, but a second convolver also
296
0
        // requires input.  Up-mix appropriately for the second convolver.
297
0
        if ((mRightConvolverMode == RightConvolverMode::Difference) ^
298
0
            (channelInterpretation == ChannelInterpretation::Discrete)) {
299
0
          MOZ_ASSERT(
300
0
            (mRightConvolverMode == RightConvolverMode::Difference &&
301
0
             channelInterpretation == ChannelInterpretation::Speakers) ||
302
0
            (mRightConvolverMode == RightConvolverMode::Direct &&
303
0
             channelInterpretation == ChannelInterpretation::Discrete));
304
0
          // The state is one of the following combinations:
305
0
          // 1) Difference and speakers.
306
0
          //    Up-mixing gives r = l.
307
0
          //    The input to the second convolver is r - l.
308
0
          // 2) Direct and discrete.
309
0
          //    Up-mixing gives r = 0.
310
0
          //    The input to the second convolver is r.
311
0
          //
312
0
          // In each case the input for the second convolver is silence, which
313
0
          // will drain the convolver.
314
0
          AllocateReverbInput(aInput, 2);
315
0
        } else {
316
0
          if (channelInterpretation == ChannelInterpretation::Discrete) {
317
0
            MOZ_ASSERT(mRightConvolverMode == RightConvolverMode::Difference);
318
0
            // channelInterpretation has changed since the second convolver
319
0
            // was added.  "discrete" up-mixing of input would produce a
320
0
            // silent right channel r = 0, but the second convolver needs
321
0
            // r - l for RightConvolverMode::Difference.
322
0
            AllocateReverbInput(aInput, 2);
323
0
            AddScaledLeftToRight(&mReverbInput, -1.0f);
324
0
          } else {
325
0
            MOZ_ASSERT(channelInterpretation ==
326
0
                       ChannelInterpretation::Speakers);
327
0
            MOZ_ASSERT(mRightConvolverMode == RightConvolverMode::Direct);
328
0
            // The Reverb will essentially up-mix the single input channel by
329
0
            // feeding it into both convolvers.
330
0
          }
331
0
          // The second convolver does not have silent input, and so it will
332
0
          // not drain.  It will need to continue processing up-mixed input
333
0
          // because the next input block may be stereo, which would be mixed
334
0
          // with the signal remaining in the convolvers.
335
0
          // The extra WEBAUDIO_BLOCK_SIZE is subtracted below.
336
0
          mRemainingRightHistory =
337
0
            mReverb->impulseResponseLength() + WEBAUDIO_BLOCK_SIZE;
338
0
        }
339
0
      }
340
0
    }
341
0
342
0
    if (mReverbInput.mVolume == 0.0f) { // not yet set
343
0
      if (aInput.mVolume != 1.0f) {
344
0
        AllocateReverbInput(aInput, inputChannelCount); // pre-multiply
345
0
      } else {
346
0
        mReverbInput = aInput;
347
0
      }
348
0
    }
349
0
350
0
    mRemainingLeftOutput = mReverb->impulseResponseLength();
351
0
    MOZ_ASSERT(mRemainingLeftOutput > 0);
352
0
  }
353
0
354
0
  // "The ConvolverNode produces a mono output only in the single case where
355
0
  // there is a single input channel and a single-channel buffer."
356
0
  uint32_t outputChannelCount = 2;
357
0
  uint32_t reverbOutputChannelCount = 2;
358
0
  if (mRightConvolverMode != RightConvolverMode::Always) {
359
0
    // When the input changes from stereo to mono, the output continues to be
360
0
    // stereo for the length of the tail time, during which the two channels
361
0
    // may differ.
362
0
    if (mRemainingRightOutput > 0) {
363
0
      MOZ_ASSERT(mRemainingRightHistory > 0);
364
0
      mRemainingRightOutput -= WEBAUDIO_BLOCK_SIZE;
365
0
    } else {
366
0
      outputChannelCount = 1;
367
0
    }
368
0
    // The second convolver keeps processing until it drains.
369
0
    if (mRemainingRightHistory > 0) {
370
0
      mRemainingRightHistory -= WEBAUDIO_BLOCK_SIZE;
371
0
    } else {
372
0
      reverbOutputChannelCount = 1;
373
0
    }
374
0
  }
375
0
376
0
  // If there are two convolvers, then they each need an output buffer, even
377
0
  // if the second convolver is only processing to keep history of up-mixed
378
0
  // input.
379
0
  aOutput->AllocateChannels(reverbOutputChannelCount);
380
0
381
0
  mReverb->process(&mReverbInput, aOutput);
382
0
383
0
  if (mRightConvolverMode == RightConvolverMode::Difference &&
384
0
      outputChannelCount == 2) {
385
0
    // Add left to right.
386
0
    AddScaledLeftToRight(aOutput, 1.0f);
387
0
  } else {
388
0
    // Trim if outputChannelCount < reverbOutputChannelCount
389
0
    aOutput->mChannelData.TruncateLength(outputChannelCount);
390
0
  }
391
0
}
392
393
ConvolverNode::ConvolverNode(AudioContext* aContext)
394
  : AudioNode(aContext,
395
              2,
396
              ChannelCountMode::Clamped_max,
397
              ChannelInterpretation::Speakers)
398
  , mNormalize(true)
399
0
{
400
0
  ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize);
401
0
  mStream = AudioNodeStream::Create(aContext, engine,
402
0
                                    AudioNodeStream::NO_STREAM_FLAGS,
403
0
                                    aContext->Graph());
404
0
}
405
406
/* static */ already_AddRefed<ConvolverNode>
407
ConvolverNode::Create(JSContext* aCx, AudioContext& aAudioContext,
408
                      const ConvolverOptions& aOptions,
409
                      ErrorResult& aRv)
410
0
{
411
0
  if (aAudioContext.CheckClosed(aRv)) {
412
0
    return nullptr;
413
0
  }
414
0
415
0
  RefPtr<ConvolverNode> audioNode = new ConvolverNode(&aAudioContext);
416
0
417
0
  audioNode->Initialize(aOptions, aRv);
418
0
  if (NS_WARN_IF(aRv.Failed())) {
419
0
    return nullptr;
420
0
  }
421
0
422
0
  // This must be done before setting the buffer.
423
0
  audioNode->SetNormalize(!aOptions.mDisableNormalization);
424
0
425
0
  if (aOptions.mBuffer.WasPassed()) {
426
0
    MOZ_ASSERT(aCx);
427
0
    audioNode->SetBuffer(aCx, aOptions.mBuffer.Value(), aRv);
428
0
    if (NS_WARN_IF(aRv.Failed())) {
429
0
      return nullptr;
430
0
    }
431
0
  }
432
0
433
0
  return audioNode.forget();
434
0
}
435
436
size_t
437
ConvolverNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
438
0
{
439
0
  size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
440
0
  if (mBuffer) {
441
0
    // NB: mBuffer might be shared with the associated engine, by convention
442
0
    //     the AudioNode will report.
443
0
    amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf);
444
0
  }
445
0
  return amount;
446
0
}
447
448
size_t
449
ConvolverNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
450
0
{
451
0
  return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
452
0
}
453
454
JSObject*
455
ConvolverNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
456
0
{
457
0
  return ConvolverNode_Binding::Wrap(aCx, this, aGivenProto);
458
0
}
459
460
void
461
ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer, ErrorResult& aRv)
462
0
{
463
0
  if (aBuffer) {
464
0
    switch (aBuffer->NumberOfChannels()) {
465
0
    case 1:
466
0
    case 2:
467
0
    case 4:
468
0
      // Supported number of channels
469
0
      break;
470
0
    default:
471
0
      aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
472
0
      return;
473
0
    }
474
0
  }
475
0
476
0
  // Send the buffer to the stream
477
0
  AudioNodeStream* ns = mStream;
478
0
  MOZ_ASSERT(ns, "Why don't we have a stream here?");
479
0
  if (aBuffer) {
480
0
    AudioChunk data = aBuffer->GetThreadSharedChannelsForRate(aCx);
481
0
    if (data.mBufferFormat == AUDIO_FORMAT_S16) {
482
0
      // Reverb expects data in float format.
483
0
      // Convert on the main thread so as to minimize allocations on the audio
484
0
      // thread.
485
0
      // Reverb will dispose of the buffer once initialized, so convert here
486
0
      // and leave the smaller arrays in the AudioBuffer.
487
0
      // There is currently no value in providing 16/32-byte aligned data
488
0
      // because PadAndMakeScaledDFT() will copy the data (without SIMD
489
0
      // instructions) to aligned arrays for the FFT.
490
0
      RefPtr<SharedBuffer> floatBuffer =
491
0
        SharedBuffer::Create(sizeof(float) *
492
0
                             data.mDuration * data.ChannelCount());
493
0
      if (!floatBuffer) {
494
0
        aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
495
0
        return;
496
0
      }
497
0
      auto floatData = static_cast<float*>(floatBuffer->Data());
498
0
      for (size_t i = 0; i < data.ChannelCount(); ++i) {
499
0
        ConvertAudioSamples(data.ChannelData<int16_t>()[i],
500
0
                            floatData, data.mDuration);
501
0
        data.mChannelData[i] = floatData;
502
0
        floatData += data.mDuration;
503
0
      }
504
0
      data.mBuffer = std::move(floatBuffer);
505
0
      data.mBufferFormat = AUDIO_FORMAT_FLOAT32;
506
0
    }
507
0
    SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE,
508
0
                                aBuffer->SampleRate());
509
0
    ns->SetBuffer(std::move(data));
510
0
  } else {
511
0
    ns->SetBuffer(AudioChunk());
512
0
  }
513
0
514
0
  mBuffer = aBuffer;
515
0
}
516
517
void
518
ConvolverNode::SetNormalize(bool aNormalize)
519
0
{
520
0
  mNormalize = aNormalize;
521
0
  SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize);
522
0
}
523
524
} // namespace dom
525
} // namespace mozilla