Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/dom/media/webaudio/ScriptProcessorNode.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim:set ts=2 sw=2 sts=2 et cindent: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "ScriptProcessorNode.h"
8
#include "mozilla/dom/ScriptProcessorNodeBinding.h"
9
#include "AudioBuffer.h"
10
#include "AudioDestinationNode.h"
11
#include "AudioNodeEngine.h"
12
#include "AudioNodeStream.h"
13
#include "AudioProcessingEvent.h"
14
#include "WebAudioUtils.h"
15
#include "mozilla/dom/ScriptSettings.h"
16
#include "mozilla/Mutex.h"
17
#include "mozilla/PodOperations.h"
18
#include "nsAutoPtr.h"
19
#include <deque>
20
21
namespace mozilla {
22
namespace dom {
23
24
// The maximum latency, in seconds, that we can live with before dropping
25
// buffers.
26
static const float MAX_LATENCY_S = 0.5;
27
28
// This class manages a queue of output buffers shared between
29
// the main thread and the Media Stream Graph thread.
30
class SharedBuffers final
31
{
32
private:
33
  class OutputQueue final
34
  {
35
  public:
36
    explicit OutputQueue(const char* aName)
37
      : mMutex(aName)
38
0
    {}
39
40
    size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
41
0
    {
42
0
      mMutex.AssertCurrentThreadOwns();
43
0
44
0
      size_t amount = 0;
45
0
      for (size_t i = 0; i < mBufferList.size(); i++) {
46
0
        amount += mBufferList[i].SizeOfExcludingThis(aMallocSizeOf, false);
47
0
      }
48
0
49
0
      return amount;
50
0
    }
51
52
0
    Mutex& Lock() const { return const_cast<OutputQueue*>(this)->mMutex; }
53
54
    size_t ReadyToConsume() const
55
0
    {
56
0
      // Accessed on both main thread and media graph thread.
57
0
      mMutex.AssertCurrentThreadOwns();
58
0
      return mBufferList.size();
59
0
    }
60
61
    // Produce one buffer
62
    AudioChunk& Produce()
63
0
    {
64
0
      mMutex.AssertCurrentThreadOwns();
65
0
      MOZ_ASSERT(NS_IsMainThread());
66
0
      mBufferList.push_back(AudioChunk());
67
0
      return mBufferList.back();
68
0
    }
69
70
    // Consumes one buffer.
71
    AudioChunk Consume()
72
0
    {
73
0
      mMutex.AssertCurrentThreadOwns();
74
0
      MOZ_ASSERT(!NS_IsMainThread());
75
0
      MOZ_ASSERT(ReadyToConsume() > 0);
76
0
      AudioChunk front = mBufferList.front();
77
0
      mBufferList.pop_front();
78
0
      return front;
79
0
    }
80
81
    // Empties the buffer queue.
82
    void Clear()
83
0
    {
84
0
      mMutex.AssertCurrentThreadOwns();
85
0
      mBufferList.clear();
86
0
    }
87
88
  private:
89
    typedef std::deque<AudioChunk> BufferList;
90
91
    // Synchronizes access to mBufferList.  Note that it's the responsibility
92
    // of the callers to perform the required locking, and we assert that every
93
    // time we access mBufferList.
94
    Mutex mMutex;
95
    // The list representing the queue.
96
    BufferList mBufferList;
97
  };
98
99
public:
100
  explicit SharedBuffers(float aSampleRate)
101
    : mOutputQueue("SharedBuffers::outputQueue")
102
    , mDelaySoFar(STREAM_TIME_MAX)
103
    , mSampleRate(aSampleRate)
104
    , mLatency(0.0)
105
    , mDroppingBuffers(false)
106
0
  {
107
0
  }
108
109
  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
110
0
  {
111
0
    size_t amount = aMallocSizeOf(this);
112
0
113
0
    {
114
0
      MutexAutoLock lock(mOutputQueue.Lock());
115
0
      amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf);
116
0
    }
117
0
118
0
    return amount;
119
0
  }
120
121
  // main thread
122
123
  // NotifyNodeIsConnected() may be called even when the state has not
124
  // changed.
125
  void NotifyNodeIsConnected(bool aIsConnected)
126
0
  {
127
0
    MOZ_ASSERT(NS_IsMainThread());
128
0
    if (!aIsConnected) {
129
0
      // Reset main thread state for FinishProducingOutputBuffer().
130
0
      mLatency = 0.0f;
131
0
      mLastEventTime = TimeStamp();
132
0
      mDroppingBuffers = false;
133
0
      // Don't flush the output buffer here because the graph thread may be
134
0
      // using it now.  The graph thread will flush when it knows it is
135
0
      // disconnected.
136
0
    }
137
0
    mNodeIsConnected = aIsConnected;
138
0
  }
139
140
  void FinishProducingOutputBuffer(const AudioChunk& aBuffer)
141
0
  {
142
0
    MOZ_ASSERT(NS_IsMainThread());
143
0
144
0
    if (!mNodeIsConnected) {
145
0
      // The output buffer is not used, and mLastEventTime will not be
146
0
      // initialized until the node is re-connected.
147
0
      return;
148
0
    }
149
0
150
0
    TimeStamp now = TimeStamp::Now();
151
0
152
0
    if (mLastEventTime.IsNull()) {
153
0
      mLastEventTime = now;
154
0
    } else {
155
0
      // When main thread blocking has built up enough so
156
0
      // |mLatency > MAX_LATENCY_S|, frame dropping starts. It continues until
157
0
      // the output buffer is completely empty, at which point the accumulated
158
0
      // latency is also reset to 0.
159
0
      // It could happen that the output queue becomes empty before the input
160
0
      // node has fully caught up. In this case there will be events where
161
0
      // |(now - mLastEventTime)| is very short, making mLatency negative.
162
0
      // As this happens and the size of |mLatency| becomes greater than
163
0
      // MAX_LATENCY_S, frame dropping starts again to maintain an as short
164
0
      // output queue as possible.
165
0
      float latency = (now - mLastEventTime).ToSeconds();
166
0
      float bufferDuration = aBuffer.mDuration / mSampleRate;
167
0
      mLatency += latency - bufferDuration;
168
0
      mLastEventTime = now;
169
0
      if (fabs(mLatency) > MAX_LATENCY_S) {
170
0
        mDroppingBuffers = true;
171
0
      }
172
0
    }
173
0
174
0
    MutexAutoLock lock(mOutputQueue.Lock());
175
0
    if (mDroppingBuffers) {
176
0
      if (mOutputQueue.ReadyToConsume()) {
177
0
        return;
178
0
      }
179
0
      mDroppingBuffers = false;
180
0
      mLatency = 0;
181
0
    }
182
0
183
0
    for (uint32_t offset = 0; offset < aBuffer.mDuration;
184
0
         offset += WEBAUDIO_BLOCK_SIZE) {
185
0
      AudioChunk& chunk = mOutputQueue.Produce();
186
0
      chunk = aBuffer;
187
0
      chunk.SliceTo(offset, offset + WEBAUDIO_BLOCK_SIZE);
188
0
    }
189
0
  }
190
191
  // graph thread
192
193
  AudioChunk GetOutputBuffer()
194
0
  {
195
0
    MOZ_ASSERT(!NS_IsMainThread());
196
0
    AudioChunk buffer;
197
0
198
0
    {
199
0
      MutexAutoLock lock(mOutputQueue.Lock());
200
0
      if (mOutputQueue.ReadyToConsume() > 0) {
201
0
        if (mDelaySoFar == STREAM_TIME_MAX) {
202
0
          mDelaySoFar = 0;
203
0
        }
204
0
        buffer = mOutputQueue.Consume();
205
0
      } else {
206
0
        // If we're out of buffers to consume, just output silence
207
0
        buffer.SetNull(WEBAUDIO_BLOCK_SIZE);
208
0
        if (mDelaySoFar != STREAM_TIME_MAX) {
209
0
          // Remember the delay that we just hit
210
0
          mDelaySoFar += WEBAUDIO_BLOCK_SIZE;
211
0
        }
212
0
      }
213
0
    }
214
0
215
0
    return buffer;
216
0
  }
217
218
  StreamTime DelaySoFar() const
219
0
  {
220
0
    MOZ_ASSERT(!NS_IsMainThread());
221
0
    return mDelaySoFar == STREAM_TIME_MAX ? 0 : mDelaySoFar;
222
0
  }
223
224
  void Flush()
225
0
  {
226
0
    MOZ_ASSERT(!NS_IsMainThread());
227
0
    mDelaySoFar = STREAM_TIME_MAX;
228
0
    {
229
0
      MutexAutoLock lock(mOutputQueue.Lock());
230
0
      mOutputQueue.Clear();
231
0
    }
232
0
  }
233
234
private:
235
  OutputQueue mOutputQueue;
236
  // How much delay we've seen so far.  This measures the amount of delay
237
  // caused by the main thread lagging behind in producing output buffers.
238
  // STREAM_TIME_MAX means that we have not received our first buffer yet.
239
  // Graph thread only.
240
  StreamTime mDelaySoFar;
241
  // The samplerate of the context.
242
  const float mSampleRate;
243
  // The remaining members are main thread only.
244
  // This is the latency caused by the buffering. If this grows too high, we
245
  // will drop buffers until it is acceptable.
246
  float mLatency;
247
  // This is the time at which we last produced a buffer, to detect if the main
248
  // thread has been blocked.
249
  TimeStamp mLastEventTime;
250
  // True if we should be dropping buffers.
251
  bool mDroppingBuffers;
252
  // True iff the AudioNode has at least one input or output connected.
253
  bool mNodeIsConnected;
254
};
255
256
class ScriptProcessorNodeEngine final : public AudioNodeEngine
257
{
258
public:
259
  ScriptProcessorNodeEngine(ScriptProcessorNode* aNode,
260
                            AudioDestinationNode* aDestination,
261
                            uint32_t aBufferSize,
262
                            uint32_t aNumberOfInputChannels)
263
    : AudioNodeEngine(aNode)
264
    , mDestination(aDestination->Stream())
265
    , mSharedBuffers(new SharedBuffers(mDestination->SampleRate()))
266
    , mBufferSize(aBufferSize)
267
    , mInputChannelCount(aNumberOfInputChannels)
268
    , mInputWriteIndex(0)
269
0
  {
270
0
  }
271
272
  SharedBuffers* GetSharedBuffers() const
273
0
  {
274
0
    return mSharedBuffers;
275
0
  }
276
277
  enum {
278
    IS_CONNECTED,
279
  };
280
281
  void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
282
0
  {
283
0
    switch (aIndex) {
284
0
      case IS_CONNECTED:
285
0
        mIsConnected = aParam;
286
0
        break;
287
0
      default:
288
0
        NS_ERROR("Bad Int32Parameter");
289
0
    } // End index switch.
290
0
  }
291
292
  void ProcessBlock(AudioNodeStream* aStream,
293
                    GraphTime aFrom,
294
                    const AudioBlock& aInput,
295
                    AudioBlock* aOutput,
296
                    bool* aFinished) override
297
0
  {
298
0
    // This node is not connected to anything. Per spec, we don't fire the
299
0
    // onaudioprocess event. We also want to clear out the input and output
300
0
    // buffer queue, and output a null buffer.
301
0
    if (!mIsConnected) {
302
0
      aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
303
0
      mSharedBuffers->Flush();
304
0
      mInputWriteIndex = 0;
305
0
      return;
306
0
    }
307
0
308
0
    // The input buffer is allocated lazily when non-null input is received.
309
0
    if (!aInput.IsNull() && !mInputBuffer) {
310
0
      mInputBuffer = ThreadSharedFloatArrayBufferList::
311
0
        Create(mInputChannelCount, mBufferSize, fallible);
312
0
      if (mInputBuffer && mInputWriteIndex) {
313
0
        // Zero leading for null chunks that were skipped.
314
0
        for (uint32_t i = 0; i < mInputChannelCount; ++i) {
315
0
          float* channelData = mInputBuffer->GetDataForWrite(i);
316
0
          PodZero(channelData, mInputWriteIndex);
317
0
        }
318
0
      }
319
0
    }
320
0
321
0
    // First, record our input buffer, if its allocation succeeded.
322
0
    uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
323
0
    for (uint32_t i = 0; i < inputChannelCount; ++i) {
324
0
      float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
325
0
      if (aInput.IsNull()) {
326
0
        PodZero(writeData, aInput.GetDuration());
327
0
      } else {
328
0
        MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
329
0
        MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount);
330
0
        AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
331
0
                                       aInput.mVolume, writeData);
332
0
      }
333
0
    }
334
0
    mInputWriteIndex += aInput.GetDuration();
335
0
336
0
    // Now, see if we have data to output
337
0
    // Note that we need to do this before sending the buffer to the main
338
0
    // thread so that our delay time is updated.
339
0
    *aOutput = mSharedBuffers->GetOutputBuffer();
340
0
341
0
    if (mInputWriteIndex >= mBufferSize) {
342
0
      SendBuffersToMainThread(aStream, aFrom);
343
0
      mInputWriteIndex -= mBufferSize;
344
0
    }
345
0
  }
346
347
  bool IsActive() const override
348
0
  {
349
0
    // Could return false when !mIsConnected after all output chunks produced
350
0
    // by main thread events calling
351
0
    // SharedBuffers::FinishProducingOutputBuffer() have been processed.
352
0
    return true;
353
0
  }
354
355
  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
356
0
  {
357
0
    // Not owned:
358
0
    // - mDestination (probably)
359
0
    size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
360
0
    amount += mSharedBuffers->SizeOfIncludingThis(aMallocSizeOf);
361
0
    if (mInputBuffer) {
362
0
      amount += mInputBuffer->SizeOfIncludingThis(aMallocSizeOf);
363
0
    }
364
0
365
0
    return amount;
366
0
  }
367
368
  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
369
0
  {
370
0
    return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
371
0
  }
372
373
private:
374
  void SendBuffersToMainThread(AudioNodeStream* aStream, GraphTime aFrom)
375
0
  {
376
0
    MOZ_ASSERT(!NS_IsMainThread());
377
0
378
0
    // we now have a full input buffer ready to be sent to the main thread.
379
0
    StreamTime playbackTick = mDestination->GraphTimeToStreamTime(aFrom);
380
0
    // Add the duration of the current sample
381
0
    playbackTick += WEBAUDIO_BLOCK_SIZE;
382
0
    // Add the delay caused by the main thread
383
0
    playbackTick += mSharedBuffers->DelaySoFar();
384
0
    // Compute the playback time in the coordinate system of the destination
385
0
    double playbackTime = mDestination->StreamTimeToSeconds(playbackTick);
386
0
387
0
    class Command final : public Runnable
388
0
    {
389
0
    public:
390
0
      Command(AudioNodeStream* aStream,
391
0
              already_AddRefed<ThreadSharedFloatArrayBufferList> aInputBuffer,
392
0
              double aPlaybackTime)
393
0
        : mozilla::Runnable("Command")
394
0
        , mStream(aStream)
395
0
        , mInputBuffer(aInputBuffer)
396
0
        , mPlaybackTime(aPlaybackTime)
397
0
      {
398
0
      }
399
0
400
0
      NS_IMETHOD Run() override
401
0
      {
402
0
403
0
        auto engine =
404
0
          static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
405
0
        AudioChunk output;
406
0
        output.SetNull(engine->mBufferSize);
407
0
        {
408
0
          auto node = static_cast<ScriptProcessorNode*>
409
0
            (engine->NodeMainThread());
410
0
          if (!node) {
411
0
            return NS_OK;
412
0
          }
413
0
414
0
          if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) {
415
0
            DispatchAudioProcessEvent(node, &output);
416
0
          }
417
0
          // The node may have been destroyed during event dispatch.
418
0
        }
419
0
420
0
        // Append it to our output buffer queue
421
0
        engine->GetSharedBuffers()->FinishProducingOutputBuffer(output);
422
0
423
0
        return NS_OK;
424
0
      }
425
0
426
0
      // Sets up |output| iff buffers are set in event handlers.
427
0
      void DispatchAudioProcessEvent(ScriptProcessorNode* aNode,
428
0
                                     AudioChunk* aOutput)
429
0
      {
430
0
        AudioContext* context = aNode->Context();
431
0
        if (!context) {
432
0
          return;
433
0
        }
434
0
435
0
        AutoJSAPI jsapi;
436
0
        if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
437
0
          return;
438
0
        }
439
0
        JSContext* cx = jsapi.cx();
440
0
        uint32_t inputChannelCount = aNode->ChannelCount();
441
0
442
0
        // Create the input buffer
443
0
        RefPtr<AudioBuffer> inputBuffer;
444
0
        if (mInputBuffer) {
445
0
          ErrorResult rv;
446
0
          inputBuffer =
447
0
            AudioBuffer::Create(context->GetOwner(), inputChannelCount,
448
0
                                aNode->BufferSize(), context->SampleRate(),
449
0
                                mInputBuffer.forget(), rv);
450
0
          if (rv.Failed()) {
451
0
            rv.SuppressException();
452
0
            return;
453
0
          }
454
0
        }
455
0
456
0
        // Ask content to produce data in the output buffer
457
0
        // Note that we always avoid creating the output buffer here, and we try to
458
0
        // avoid creating the input buffer as well.  The AudioProcessingEvent class
459
0
        // knows how to lazily create them if needed once the script tries to access
460
0
        // them.  Otherwise, we may be able to get away without creating them!
461
0
        RefPtr<AudioProcessingEvent> event =
462
0
          new AudioProcessingEvent(aNode, nullptr, nullptr);
463
0
        event->InitEvent(inputBuffer, inputChannelCount, mPlaybackTime);
464
0
        aNode->DispatchTrustedEvent(event);
465
0
466
0
        // Steal the output buffers if they have been set.
467
0
        // Don't create a buffer if it hasn't been used to return output;
468
0
        // FinishProducingOutputBuffer() will optimize output = null.
469
0
        // GetThreadSharedChannelsForRate() may also return null after OOM.
470
0
        if (event->HasOutputBuffer()) {
471
0
          ErrorResult rv;
472
0
          AudioBuffer* buffer = event->GetOutputBuffer(rv);
473
0
          // HasOutputBuffer() returning true means that GetOutputBuffer()
474
0
          // will not fail.
475
0
          MOZ_ASSERT(!rv.Failed());
476
0
          *aOutput = buffer->GetThreadSharedChannelsForRate(cx);
477
0
          MOZ_ASSERT(aOutput->IsNull() ||
478
0
                     aOutput->mBufferFormat == AUDIO_FORMAT_FLOAT32,
479
0
                     "AudioBuffers initialized from JS have float data");
480
0
        }
481
0
      }
482
0
    private:
483
0
      RefPtr<AudioNodeStream> mStream;
484
0
      RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
485
0
      double mPlaybackTime;
486
0
    };
487
0
488
0
    RefPtr<Command> command = new Command(aStream, mInputBuffer.forget(),
489
0
                                          playbackTime);
490
0
    mAbstractMainThread->Dispatch(command.forget());
491
0
  }
492
493
  friend class ScriptProcessorNode;
494
495
  RefPtr<AudioNodeStream> mDestination;
496
  nsAutoPtr<SharedBuffers> mSharedBuffers;
497
  RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
498
  const uint32_t mBufferSize;
499
  const uint32_t mInputChannelCount;
500
  // The write index into the current input buffer
501
  uint32_t mInputWriteIndex;
502
  bool mIsConnected = false;
503
};
504
505
ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
506
                                         uint32_t aBufferSize,
507
                                         uint32_t aNumberOfInputChannels,
508
                                         uint32_t aNumberOfOutputChannels)
509
  : AudioNode(aContext,
510
              aNumberOfInputChannels,
511
              mozilla::dom::ChannelCountMode::Explicit,
512
              mozilla::dom::ChannelInterpretation::Speakers)
513
  , mBufferSize(aBufferSize ?
514
                  aBufferSize : // respect what the web developer requested
515
                  4096)         // choose our own buffer size -- 4KB for now
516
  , mNumberOfOutputChannels(aNumberOfOutputChannels)
517
0
{
518
0
  MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size");
519
0
  ScriptProcessorNodeEngine* engine =
520
0
    new ScriptProcessorNodeEngine(this,
521
0
                                  aContext->Destination(),
522
0
                                  BufferSize(),
523
0
                                  aNumberOfInputChannels);
524
0
  mStream = AudioNodeStream::Create(aContext, engine,
525
0
                                    AudioNodeStream::NO_STREAM_FLAGS,
526
0
                                    aContext->Graph());
527
0
}
528
529
ScriptProcessorNode::~ScriptProcessorNode()
530
0
{
531
0
}
532
533
size_t
534
ScriptProcessorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
535
0
{
536
0
  size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
537
0
  return amount;
538
0
}
539
540
size_t
541
ScriptProcessorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
542
0
{
543
0
  return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
544
0
}
545
546
void
547
ScriptProcessorNode::EventListenerAdded(nsAtom* aType)
548
0
{
549
0
  AudioNode::EventListenerAdded(aType);
550
0
  if (aType == nsGkAtoms::onaudioprocess) {
551
0
    UpdateConnectedStatus();
552
0
  }
553
0
}
554
555
void
556
ScriptProcessorNode::EventListenerRemoved(nsAtom* aType)
557
0
{
558
0
  AudioNode::EventListenerRemoved(aType);
559
0
  if (aType == nsGkAtoms::onaudioprocess) {
560
0
    UpdateConnectedStatus();
561
0
  }
562
0
}
563
564
JSObject*
565
ScriptProcessorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
566
0
{
567
0
  return ScriptProcessorNode_Binding::Wrap(aCx, this, aGivenProto);
568
0
}
569
570
void
571
ScriptProcessorNode::UpdateConnectedStatus()
572
0
{
573
0
  bool isConnected =
574
0
    mHasPhantomInput || !(OutputNodes().IsEmpty() && OutputParams().IsEmpty() &&
575
0
                          InputNodes().IsEmpty());
576
0
577
0
  // Events are queued even when there is no listener because a listener
578
0
  // may be added while events are in the queue.
579
0
  SendInt32ParameterToStream(ScriptProcessorNodeEngine::IS_CONNECTED,
580
0
                             isConnected);
581
0
582
0
  if (isConnected && HasListenersFor(nsGkAtoms::onaudioprocess)) {
583
0
    MarkActive();
584
0
  } else {
585
0
    MarkInactive();
586
0
  }
587
0
588
0
  auto engine = static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
589
0
  engine->GetSharedBuffers()->NotifyNodeIsConnected(isConnected);
590
0
}
591
592
} // namespace dom
593
} // namespace mozilla
594