Coverage Report

Created: 2018-09-25 14:53

/work/obj-fuzz/dist/include/AudioNodeStream.h
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
2
/* This Source Code Form is subject to the terms of the Mozilla Public
3
 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4
 * You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6
#ifndef MOZILLA_AUDIONODESTREAM_H_
7
#define MOZILLA_AUDIONODESTREAM_H_
8
9
#include "MediaStreamGraph.h"
10
#include "mozilla/dom/AudioNodeBinding.h"
11
#include "nsAutoPtr.h"
12
#include "AlignedTArray.h"
13
#include "AudioBlock.h"
14
#include "AudioSegment.h"
15
16
namespace mozilla {
17
18
namespace dom {
19
struct ThreeDPoint;
20
struct AudioTimelineEvent;
21
class AudioContext;
22
} // namespace dom
23
24
class AbstractThread;
25
class ThreadSharedFloatArrayBufferList;
26
class AudioNodeEngine;
27
28
typedef AlignedAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE, 16> DownmixBufferType;
29
30
/**
31
 * An AudioNodeStream produces one audio track with ID AUDIO_TRACK.
32
 * The start time of the AudioTrack is aligned to the start time of the
33
 * AudioContext's destination node stream, plus some multiple of BLOCK_SIZE
34
 * samples.
35
 *
36
 * An AudioNodeStream has an AudioNodeEngine plugged into it that does the
37
 * actual audio processing. AudioNodeStream contains the glue code that
38
 * integrates audio processing with the MediaStreamGraph.
39
 */
40
class AudioNodeStream : public ProcessedMediaStream
41
{
42
  typedef dom::ChannelCountMode ChannelCountMode;
43
  typedef dom::ChannelInterpretation ChannelInterpretation;
44
45
public:
46
  typedef mozilla::dom::AudioContext AudioContext;
47
48
  enum { AUDIO_TRACK = 1 };
49
50
  typedef AutoTArray<AudioBlock, 1> OutputChunks;
51
52
  // Flags re main thread updates and stream output.
53
  typedef unsigned Flags;
54
  enum : Flags {
55
    NO_STREAM_FLAGS = 0U,
56
    NEED_MAIN_THREAD_FINISHED = 1U << 0,
57
    NEED_MAIN_THREAD_CURRENT_TIME = 1U << 1,
58
    // Internal AudioNodeStreams can only pass their output to another
59
    // AudioNode, whereas external AudioNodeStreams can pass their output
60
    // to other ProcessedMediaStreams or hardware audio output.
61
    EXTERNAL_OUTPUT = 1U << 2,
62
  };
63
  /**
64
   * Create a stream that will process audio for an AudioNode.
65
   * Takes ownership of aEngine.
66
   * aGraph is required and equals the graph of aCtx in most cases. An exception
67
   * is AudioDestinationNode where the context's graph hasn't been set up yet.
68
   */
69
  static already_AddRefed<AudioNodeStream>
70
  Create(AudioContext* aCtx, AudioNodeEngine* aEngine, Flags aKind,
71
         MediaStreamGraph* aGraph);
72
73
protected:
74
  /**
75
   * Transfers ownership of aEngine to the new AudioNodeStream.
76
   */
77
  AudioNodeStream(AudioNodeEngine* aEngine,
78
                  Flags aFlags,
79
                  TrackRate aSampleRate);
80
81
  ~AudioNodeStream();
82
83
public:
84
  // Control API
85
  /**
86
   * Sets a parameter that's a time relative to some stream's played time.
87
   * This time is converted to a time relative to this stream when it's set.
88
   */
89
  void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
90
                              double aStreamTime);
91
  void SetDoubleParameter(uint32_t aIndex, double aValue);
92
  void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
93
  void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue);
94
  void SetBuffer(AudioChunk&& aBuffer);
95
  // This sends a single event to the timeline on the MSG thread side.
96
  void SendTimelineEvent(uint32_t aIndex, const dom::AudioTimelineEvent& aEvent);
97
  // This consumes the contents of aData.  aData will be emptied after this returns.
98
  void SetRawArrayData(nsTArray<float>& aData);
99
  void SetChannelMixingParameters(uint32_t aNumberOfChannels,
100
                                  ChannelCountMode aChannelCountMoe,
101
                                  ChannelInterpretation aChannelInterpretation);
102
  void SetPassThrough(bool aPassThrough);
103
  ChannelInterpretation GetChannelInterpretation()
104
  {
105
    return mChannelInterpretation;
106
  }
107
108
  void SetAudioParamHelperStream()
109
  {
110
    MOZ_ASSERT(!mAudioParamStream, "Can only do this once");
111
    mAudioParamStream = true;
112
  }
113
114
  /*
115
   * Resume stream after updating its concept of current time by aAdvance.
116
   * Main thread.  Used only from AudioDestinationNode when resuming a stream
117
   * suspended to save running the MediaStreamGraph when there are no other
118
   * nodes in the AudioContext.
119
   */
120
  void AdvanceAndResume(StreamTime aAdvance);
121
122
  AudioNodeStream* AsAudioNodeStream() override { return this; }
123
  void AddInput(MediaInputPort* aPort) override;
124
  void RemoveInput(MediaInputPort* aPort) override;
125
126
  // Graph thread only
127
  void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
128
                                  double aStreamTime);
129
  void SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
130
                                      ChannelCountMode aChannelCountMoe,
131
                                      ChannelInterpretation aChannelInterpretation);
132
  void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
133
  /**
134
   * Produce the next block of output, before input is provided.
135
   * ProcessInput() will be called later, and it then should not change
136
   * the output.  This is used only for DelayNodeEngine in a feedback loop.
137
   */
138
  void ProduceOutputBeforeInput(GraphTime aFrom);
139
  bool IsAudioParamStream() const
140
  {
141
    return mAudioParamStream;
142
  }
143
144
  const OutputChunks& LastChunks() const
145
  {
146
    return mLastChunks;
147
  }
148
  bool MainThreadNeedsUpdates() const override
149
  {
150
    return ((mFlags & NEED_MAIN_THREAD_FINISHED) && mFinished) ||
151
      (mFlags & NEED_MAIN_THREAD_CURRENT_TIME);
152
  }
153
154
  // Any thread
155
0
  AudioNodeEngine* Engine() { return mEngine; }
156
0
  TrackRate SampleRate() const { return mSampleRate; }
157
158
  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
159
  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
160
161
  void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
162
                                     AudioNodeSizes& aUsage) const;
163
164
  /*
165
   * SetActive() is called when either an active input is added or the engine
166
   * for a source node transitions from inactive to active.  This is not
167
   * called from engines for processing nodes because they only become active
168
   * when there are active input streams, in which case this stream is already
169
   * active.
170
   */
171
  void SetActive();
172
  /*
173
   * ScheduleCheckForInactive() is called during stream processing when the
174
   * engine transitions from active to inactive, or the stream finishes.  It
175
   * schedules a call to CheckForInactive() after stream processing.
176
   */
177
  void ScheduleCheckForInactive();
178
179
protected:
180
  class AdvanceAndResumeMessage;
181
  class CheckForInactiveMessage;
182
183
  void DestroyImpl() override;
184
185
  /*
186
   * CheckForInactive() is called when the engine transitions from active to
187
   * inactive, or an active input is removed, or the stream finishes.  If the
188
   * stream is now inactive, then mInputChunks will be cleared and mLastChunks
189
   * will be set to null.  ProcessBlock() will not be called on the engine
190
   * again until SetActive() is called.
191
   */
192
  void CheckForInactive();
193
194
  void AdvanceOutputSegment();
195
  void FinishOutput();
196
  void AccumulateInputChunk(uint32_t aInputIndex, const AudioBlock& aChunk,
197
                            AudioBlock* aBlock,
198
                            DownmixBufferType* aDownmixBuffer);
199
  void UpMixDownMixChunk(const AudioBlock* aChunk, uint32_t aOutputChannelCount,
200
                         nsTArray<const float*>& aOutputChannels,
201
                         DownmixBufferType& aDownmixBuffer);
202
203
  uint32_t ComputedNumberOfChannels(uint32_t aInputChannelCount);
204
  void ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex);
205
  void IncrementActiveInputCount();
206
  void DecrementActiveInputCount();
207
208
  // The engine that will generate output for this node.
209
  nsAutoPtr<AudioNodeEngine> mEngine;
210
  // The mixed input blocks are kept from iteration to iteration to avoid
211
  // reallocating channel data arrays and any buffers for mixing.
212
  OutputChunks mInputChunks;
213
  // The last block produced by this node.
214
  OutputChunks mLastChunks;
215
  // The stream's sampling rate
216
  const TrackRate mSampleRate;
217
  // Whether this is an internal or external stream
218
  const Flags mFlags;
219
  // The number of input streams that may provide non-silent input.
220
  uint32_t mActiveInputCount = 0;
221
  // The number of input channels that this stream requires. 0 means don't care.
222
  uint32_t mNumberOfInputChannels;
223
  // The mixing modes
224
  ChannelCountMode mChannelCountMode;
225
  ChannelInterpretation mChannelInterpretation;
226
  // Streams are considered active if the stream has not finished and either
227
  // the engine is active or there are active input streams.
228
  bool mIsActive;
229
  // Whether the stream should be marked as finished as soon
230
  // as the current time range has been computed block by block.
231
  bool mMarkAsFinishedAfterThisBlock;
232
  // Whether the stream is an AudioParamHelper stream.
233
  bool mAudioParamStream;
234
  // Whether the stream just passes its input through.
235
  bool mPassThrough;
236
};
237
238
} // namespace mozilla
239
240
#endif /* MOZILLA_AUDIONODESTREAM_H_ */