Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/dom/media/webaudio/AudioNodeExternalInputStream.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
2
/* This Source Code Form is subject to the terms of the Mozilla Public
3
 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4
 * You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6
#include "AlignedTArray.h"
7
#include "AlignmentUtils.h"
8
#include "AudioNodeEngine.h"
9
#include "AudioNodeExternalInputStream.h"
10
#include "AudioChannelFormat.h"
11
#include "mozilla/dom/MediaStreamAudioSourceNode.h"
12
13
using namespace mozilla::dom;
14
15
namespace mozilla {
16
17
AudioNodeExternalInputStream::AudioNodeExternalInputStream(
18
  AudioNodeEngine* aEngine,
19
  TrackRate aSampleRate)
20
  : AudioNodeStream(aEngine, NO_STREAM_FLAGS, aSampleRate)
21
0
{
22
0
  MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
23
0
}
24
25
AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
26
0
{
27
0
  MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
28
0
}
29
30
/* static */ already_AddRefed<AudioNodeExternalInputStream>
31
AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
32
                                     AudioNodeEngine* aEngine)
33
0
{
34
0
  AudioContext* ctx = aEngine->NodeMainThread()->Context();
35
0
  MOZ_ASSERT(NS_IsMainThread());
36
0
  MOZ_ASSERT(aGraph->GraphRate() == ctx->SampleRate());
37
0
38
0
  RefPtr<AudioNodeExternalInputStream> stream =
39
0
    new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate());
40
0
  stream->mSuspendedCount += ctx->ShouldSuspendNewStream();
41
0
  aGraph->AddStream(stream);
42
0
  return stream.forget();
43
0
}
44
45
/**
46
 * Copies the data in aInput to aOffsetInBlock within aBlock.
47
 * aBlock must have been allocated with AllocateInputBlock and have a channel
48
 * count that's a superset of the channels in aInput.
49
 */
50
template <typename T>
51
static void
52
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
53
                 uint32_t aOffsetInBlock)
54
0
{
55
0
  uint32_t blockChannels = aBlock->ChannelCount();
56
0
  AutoTArray<const T*,2> channels;
57
0
  if (aInput.IsNull()) {
58
0
    channels.SetLength(blockChannels);
59
0
    PodZero(channels.Elements(), blockChannels);
60
0
  } else {
61
0
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
62
0
    channels.SetLength(inputChannels.Length());
63
0
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
64
0
    if (channels.Length() != blockChannels) {
65
0
      // We only need to upmix here because aBlock's channel count has been
66
0
      // chosen to be a superset of the channel count of every chunk.
67
0
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
68
0
    }
69
0
  }
70
0
71
0
  for (uint32_t c = 0; c < blockChannels; ++c) {
72
0
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
73
0
    if (channels[c]) {
74
0
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
75
0
    } else {
76
0
      PodZero(outputData, aInput.GetDuration());
77
0
    }
78
0
  }
79
0
}
Unexecuted instantiation: Unified_cpp_dom_media_webaudio0.cpp:void mozilla::CopyChunkToBlock<short>(mozilla::AudioChunk&, mozilla::AudioBlock*, unsigned int)
Unexecuted instantiation: Unified_cpp_dom_media_webaudio0.cpp:void mozilla::CopyChunkToBlock<float>(mozilla::AudioChunk&, mozilla::AudioBlock*, unsigned int)
80
81
/**
82
 * Converts the data in aSegment to a single chunk aBlock. aSegment must have
83
 * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the
84
 * channels in every chunk of aSegment. aBlock must be float format or null.
85
 */
86
static void ConvertSegmentToAudioBlock(AudioSegment* aSegment,
87
                                       AudioBlock* aBlock,
88
                                       int32_t aFallbackChannelCount)
89
0
{
90
0
  NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");
91
0
92
0
  {
93
0
    AudioSegment::ChunkIterator ci(*aSegment);
94
0
    NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!");
95
0
    if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE &&
96
0
        (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) {
97
0
98
0
      bool aligned = true;
99
0
      for (size_t i = 0; i < ci->mChannelData.Length(); ++i) {
100
0
        if (!IS_ALIGNED16(ci->mChannelData[i])) {
101
0
            aligned = false;
102
0
            break;
103
0
        }
104
0
      }
105
0
106
0
      // Return this chunk directly to avoid copying data.
107
0
      if (aligned) {
108
0
        *aBlock = *ci;
109
0
        return;
110
0
      }
111
0
    }
112
0
  }
113
0
114
0
  aBlock->AllocateChannels(aFallbackChannelCount);
115
0
116
0
  uint32_t duration = 0;
117
0
  for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) {
118
0
    switch (ci->mBufferFormat) {
119
0
      case AUDIO_FORMAT_S16: {
120
0
        CopyChunkToBlock<int16_t>(*ci, aBlock, duration);
121
0
        break;
122
0
      }
123
0
      case AUDIO_FORMAT_FLOAT32: {
124
0
        CopyChunkToBlock<float>(*ci, aBlock, duration);
125
0
        break;
126
0
      }
127
0
      case AUDIO_FORMAT_SILENCE: {
128
0
        // The actual type of the sample does not matter here, but we still need
129
0
        // to send some audio to the graph.
130
0
        CopyChunkToBlock<float>(*ci, aBlock, duration);
131
0
        break;
132
0
      }
133
0
    }
134
0
    duration += ci->GetDuration();
135
0
  }
136
0
}
137
138
void
139
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
140
                                           uint32_t aFlags)
141
0
{
142
0
  // According to spec, number of outputs is always 1.
143
0
  MOZ_ASSERT(mLastChunks.Length() == 1);
144
0
145
0
  // GC stuff can result in our input stream being destroyed before this stream.
146
0
  // Handle that.
147
0
  if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
148
0
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
149
0
    return;
150
0
  }
151
0
152
0
  MOZ_ASSERT(mInputs.Length() == 1);
153
0
154
0
  MediaStream* source = mInputs[0]->GetSource();
155
0
  AutoTArray<AudioSegment,1> audioSegments;
156
0
  uint32_t inputChannels = 0;
157
0
  for (StreamTracks::TrackIter tracks(source->mTracks);
158
0
       !tracks.IsEnded(); tracks.Next()) {
159
0
    const StreamTracks::Track& inputTrack = *tracks;
160
0
    if (!mInputs[0]->PassTrackThrough(tracks->GetID())) {
161
0
      continue;
162
0
    }
163
0
164
0
    if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) {
165
0
      MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks");
166
0
      continue;
167
0
    }
168
0
169
0
    const AudioSegment& inputSegment =
170
0
        *static_cast<AudioSegment*>(inputTrack.GetSegment());
171
0
    if (inputSegment.IsNull()) {
172
0
      continue;
173
0
    }
174
0
175
0
    AudioSegment& segment = *audioSegments.AppendElement();
176
0
    GraphTime next;
177
0
    for (GraphTime t = aFrom; t < aTo; t = next) {
178
0
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
179
0
      interval.mEnd = std::min(interval.mEnd, aTo);
180
0
      if (interval.mStart >= interval.mEnd)
181
0
        break;
182
0
      next = interval.mEnd;
183
0
184
0
      // We know this stream does not block during the processing interval ---
185
0
      // we're not finished, we don't underrun, and we're not suspended.
186
0
      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
187
0
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
188
0
      StreamTime ticks = outputEnd - outputStart;
189
0
190
0
      if (interval.mInputIsBlocked) {
191
0
        segment.AppendNullData(ticks);
192
0
      } else {
193
0
        // The input stream is not blocked in this interval, so no need to call
194
0
        // GraphTimeToStreamTimeWithBlocking.
195
0
        StreamTime inputStart =
196
0
          std::min(inputSegment.GetDuration(),
197
0
                   source->GraphTimeToStreamTime(interval.mStart));
198
0
        StreamTime inputEnd =
199
0
          std::min(inputSegment.GetDuration(),
200
0
                   source->GraphTimeToStreamTime(interval.mEnd));
201
0
202
0
        segment.AppendSlice(inputSegment, inputStart, inputEnd);
203
0
        // Pad if we're looking past the end of the track
204
0
        segment.AppendNullData(ticks - (inputEnd - inputStart));
205
0
      }
206
0
    }
207
0
208
0
    for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
209
0
      inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
210
0
    }
211
0
  }
212
0
213
0
  uint32_t accumulateIndex = 0;
214
0
  if (inputChannels) {
215
0
    DownmixBufferType downmixBuffer;
216
0
    ASSERT_ALIGNED16(downmixBuffer.Elements());
217
0
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
218
0
      AudioBlock tmpChunk;
219
0
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
220
0
      if (!tmpChunk.IsNull()) {
221
0
        if (accumulateIndex == 0) {
222
0
          mLastChunks[0].AllocateChannels(inputChannels);
223
0
        }
224
0
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
225
0
        accumulateIndex++;
226
0
      }
227
0
    }
228
0
  }
229
0
  if (accumulateIndex == 0) {
230
0
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
231
0
  }
232
0
}
233
234
bool
235
AudioNodeExternalInputStream::IsEnabled()
236
0
{
237
0
  return ((MediaStreamAudioSourceNodeEngine*)Engine())->IsEnabled();
238
0
}
239
240
} // namespace mozilla