/src/mozilla-central/dom/media/webaudio/AudioBufferSourceNode.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "AudioBufferSourceNode.h" |
8 | | #include "nsDebug.h" |
9 | | #include "mozilla/dom/AudioBufferSourceNodeBinding.h" |
10 | | #include "mozilla/dom/AudioParam.h" |
11 | | #include "mozilla/FloatingPoint.h" |
12 | | #include "nsContentUtils.h" |
13 | | #include "nsMathUtils.h" |
14 | | #include "AlignmentUtils.h" |
15 | | #include "AudioNodeEngine.h" |
16 | | #include "AudioNodeStream.h" |
17 | | #include "AudioDestinationNode.h" |
18 | | #include "AudioParamTimeline.h" |
19 | | #include <limits> |
20 | | #include <algorithm> |
21 | | |
22 | | namespace mozilla { |
23 | | namespace dom { |
24 | | |
25 | | NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode, |
26 | | AudioScheduledSourceNode, mBuffer, |
27 | | mPlaybackRate, mDetune) |
28 | | |
29 | 0 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(AudioBufferSourceNode) |
30 | 0 | NS_INTERFACE_MAP_END_INHERITING(AudioScheduledSourceNode) |
31 | | |
32 | | NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioScheduledSourceNode) |
33 | | NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioScheduledSourceNode) |
34 | | |
35 | | /** |
36 | | * Media-thread playback engine for AudioBufferSourceNode. |
37 | | * Nothing is played until a non-null buffer has been set (via |
38 | | * AudioNodeStream::SetBuffer) and a non-zero mBufferEnd has been set (via |
39 | | * AudioNodeStream::SetInt32Parameter). |
40 | | */ |
41 | | class AudioBufferSourceNodeEngine final : public AudioNodeEngine |
42 | | { |
43 | | public: |
44 | | AudioBufferSourceNodeEngine(AudioNode* aNode, |
45 | | AudioDestinationNode* aDestination) : |
46 | | AudioNodeEngine(aNode), |
47 | | mStart(0.0), mBeginProcessing(0), |
48 | | mStop(STREAM_TIME_MAX), |
49 | | mResampler(nullptr), mRemainingResamplerTail(0), |
50 | | mBufferEnd(0), |
51 | | mLoopStart(0), mLoopEnd(0), |
52 | | mBufferPosition(0), mBufferSampleRate(0), |
53 | | // mResamplerOutRate is initialized in UpdateResampler(). |
54 | | mChannels(0), |
55 | | mDopplerShift(1.0f), |
56 | | mDestination(aDestination->Stream()), |
57 | | mPlaybackRateTimeline(1.0f), |
58 | | mDetuneTimeline(0.0f), |
59 | | mLoop(false) |
60 | 0 | {} |
61 | | |
62 | | ~AudioBufferSourceNodeEngine() |
63 | 0 | { |
64 | 0 | if (mResampler) { |
65 | 0 | speex_resampler_destroy(mResampler); |
66 | 0 | } |
67 | 0 | } |
68 | | |
69 | | void SetSourceStream(AudioNodeStream* aSource) |
70 | 0 | { |
71 | 0 | mSource = aSource; |
72 | 0 | } |
73 | | |
74 | | void RecvTimelineEvent(uint32_t aIndex, |
75 | | dom::AudioTimelineEvent& aEvent) override |
76 | 0 | { |
77 | 0 | MOZ_ASSERT(mDestination); |
78 | 0 | WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, |
79 | 0 | mDestination); |
80 | 0 |
|
81 | 0 | switch (aIndex) { |
82 | 0 | case AudioBufferSourceNode::PLAYBACKRATE: |
83 | 0 | mPlaybackRateTimeline.InsertEvent<int64_t>(aEvent); |
84 | 0 | break; |
85 | 0 | case AudioBufferSourceNode::DETUNE: |
86 | 0 | mDetuneTimeline.InsertEvent<int64_t>(aEvent); |
87 | 0 | break; |
88 | 0 | default: |
89 | 0 | NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter"); |
90 | 0 | } |
91 | 0 | } |
92 | | void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override |
93 | 0 | { |
94 | 0 | switch (aIndex) { |
95 | 0 | case AudioBufferSourceNode::STOP: mStop = aParam; break; |
96 | 0 | default: |
97 | 0 | NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter"); |
98 | 0 | } |
99 | 0 | } |
100 | | void SetDoubleParameter(uint32_t aIndex, double aParam) override |
101 | 0 | { |
102 | 0 | switch (aIndex) { |
103 | 0 | case AudioBufferSourceNode::START: |
104 | 0 | MOZ_ASSERT(!mStart, "Another START?"); |
105 | 0 | mStart = aParam * mDestination->SampleRate(); |
106 | 0 | // Round to nearest |
107 | 0 | mBeginProcessing = mStart + 0.5; |
108 | 0 | break; |
109 | 0 | case AudioBufferSourceNode::DOPPLERSHIFT: |
110 | 0 | mDopplerShift = (aParam <= 0 || mozilla::IsNaN(aParam)) ? 1.0 : aParam; |
111 | 0 | break; |
112 | 0 | default: |
113 | 0 | NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter."); |
114 | 0 | }; |
115 | 0 | } |
116 | | void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override |
117 | 0 | { |
118 | 0 | switch (aIndex) { |
119 | 0 | case AudioBufferSourceNode::SAMPLE_RATE: |
120 | 0 | MOZ_ASSERT(aParam > 0); |
121 | 0 | mBufferSampleRate = aParam; |
122 | 0 | mSource->SetActive(); |
123 | 0 | break; |
124 | 0 | case AudioBufferSourceNode::BUFFERSTART: |
125 | 0 | MOZ_ASSERT(aParam >= 0); |
126 | 0 | if (mBufferPosition == 0) { |
127 | 0 | mBufferPosition = aParam; |
128 | 0 | } |
129 | 0 | break; |
130 | 0 | case AudioBufferSourceNode::BUFFEREND: |
131 | 0 | MOZ_ASSERT(aParam >= 0); |
132 | 0 | mBufferEnd = aParam; |
133 | 0 | break; |
134 | 0 | case AudioBufferSourceNode::LOOP: mLoop = !!aParam; break; |
135 | 0 | case AudioBufferSourceNode::LOOPSTART: |
136 | 0 | MOZ_ASSERT(aParam >= 0); |
137 | 0 | mLoopStart = aParam; |
138 | 0 | break; |
139 | 0 | case AudioBufferSourceNode::LOOPEND: |
140 | 0 | MOZ_ASSERT(aParam >= 0); |
141 | 0 | mLoopEnd = aParam; |
142 | 0 | break; |
143 | 0 | default: |
144 | 0 | NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter"); |
145 | 0 | } |
146 | 0 | } |
147 | | void SetBuffer(AudioChunk&& aBuffer) override |
148 | 0 | { |
149 | 0 | mBuffer = aBuffer; |
150 | 0 | } |
151 | | |
152 | | bool BegunResampling() |
153 | 0 | { |
154 | 0 | return mBeginProcessing == -STREAM_TIME_MAX; |
155 | 0 | } |
156 | | |
157 | | void UpdateResampler(int32_t aOutRate, uint32_t aChannels) |
158 | 0 | { |
159 | 0 | if (mResampler && |
160 | 0 | (aChannels != mChannels || |
161 | 0 | // If the resampler has begun, then it will have moved |
162 | 0 | // mBufferPosition to after the samples it has read, but it hasn't |
163 | 0 | // output its buffered samples. Keep using the resampler, even if |
164 | 0 | // the rates now match, so that this latent segment is output. |
165 | 0 | (aOutRate == mBufferSampleRate && !BegunResampling()))) { |
166 | 0 | speex_resampler_destroy(mResampler); |
167 | 0 | mResampler = nullptr; |
168 | 0 | mRemainingResamplerTail = 0; |
169 | 0 | mBeginProcessing = mStart + 0.5; |
170 | 0 | } |
171 | 0 |
|
172 | 0 | if (aChannels == 0 || |
173 | 0 | (aOutRate == mBufferSampleRate && !mResampler)) { |
174 | 0 | mResamplerOutRate = aOutRate; |
175 | 0 | return; |
176 | 0 | } |
177 | 0 | |
178 | 0 | if (!mResampler) { |
179 | 0 | mChannels = aChannels; |
180 | 0 | mResampler = speex_resampler_init(mChannels, mBufferSampleRate, aOutRate, |
181 | 0 | SPEEX_RESAMPLER_QUALITY_MIN, |
182 | 0 | nullptr); |
183 | 0 | } else { |
184 | 0 | if (mResamplerOutRate == aOutRate) { |
185 | 0 | return; |
186 | 0 | } |
187 | 0 | if (speex_resampler_set_rate(mResampler, mBufferSampleRate, aOutRate) != RESAMPLER_ERR_SUCCESS) { |
188 | 0 | NS_ASSERTION(false, "speex_resampler_set_rate failed"); |
189 | 0 | return; |
190 | 0 | } |
191 | 0 | } |
192 | 0 |
|
193 | 0 | mResamplerOutRate = aOutRate; |
194 | 0 |
|
195 | 0 | if (!BegunResampling()) { |
196 | 0 | // Low pass filter effects from the resampler mean that samples before |
197 | 0 | // the start time are influenced by resampling the buffer. The input |
198 | 0 | // latency indicates half the filter width. |
199 | 0 | int64_t inputLatency = speex_resampler_get_input_latency(mResampler); |
200 | 0 | uint32_t ratioNum, ratioDen; |
201 | 0 | speex_resampler_get_ratio(mResampler, &ratioNum, &ratioDen); |
202 | 0 | // The output subsample resolution supported in aligning the resampler |
203 | 0 | // is ratioNum. First round the start time to the nearest subsample. |
204 | 0 | int64_t subsample = mStart * ratioNum + 0.5; |
205 | 0 | // Now include the leading effects of the filter, and round *up* to the |
206 | 0 | // next whole tick, because there is no effect on samples outside the |
207 | 0 | // filter width. |
208 | 0 | mBeginProcessing = |
209 | 0 | (subsample - inputLatency * ratioDen + ratioNum - 1) / ratioNum; |
210 | 0 | } |
211 | 0 | } |
212 | | |
213 | | // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer |
214 | | // at offset aSourceOffset. This avoids copying memory. |
215 | | void BorrowFromInputBuffer(AudioBlock* aOutput, |
216 | | uint32_t aChannels) |
217 | 0 | { |
218 | 0 | aOutput->SetBuffer(mBuffer.mBuffer); |
219 | 0 | aOutput->mChannelData.SetLength(aChannels); |
220 | 0 | for (uint32_t i = 0; i < aChannels; ++i) { |
221 | 0 | aOutput->mChannelData[i] = |
222 | 0 | mBuffer.ChannelData<float>()[i] + mBufferPosition; |
223 | 0 | } |
224 | 0 | aOutput->mVolume = mBuffer.mVolume; |
225 | 0 | aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32; |
226 | 0 | } |
227 | | |
228 | | // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset |
229 | | // and put it at offset aBufferOffset in the destination buffer. |
230 | | template <typename T> void |
231 | | CopyFromInputBuffer(AudioBlock* aOutput, |
232 | | uint32_t aChannels, |
233 | | uintptr_t aOffsetWithinBlock, |
234 | | uint32_t aNumberOfFrames) |
235 | 0 | { |
236 | 0 | MOZ_ASSERT(mBuffer.mVolume == 1.0f); |
237 | 0 | for (uint32_t i = 0; i < aChannels; ++i) { |
238 | 0 | float* baseChannelData = aOutput->ChannelFloatsForWrite(i); |
239 | 0 | ConvertAudioSamples(mBuffer.ChannelData<T>()[i] + mBufferPosition, |
240 | 0 | baseChannelData + aOffsetWithinBlock, |
241 | 0 | aNumberOfFrames); |
242 | 0 | } |
243 | 0 | } Unexecuted instantiation: void mozilla::dom::AudioBufferSourceNodeEngine::CopyFromInputBuffer<float>(mozilla::AudioBlock*, unsigned int, unsigned long, unsigned int) Unexecuted instantiation: void mozilla::dom::AudioBufferSourceNodeEngine::CopyFromInputBuffer<short>(mozilla::AudioBlock*, unsigned int, unsigned long, unsigned int) |
244 | | |
245 | | // Resamples input data to an output buffer, according to |mBufferSampleRate| and |
246 | | // the playbackRate/detune. |
247 | | // The number of frames consumed/produced depends on the amount of space |
248 | | // remaining in both the input and output buffer, and the playback rate (that |
249 | | // is, the ratio between the output samplerate and the input samplerate). |
250 | | void CopyFromInputBufferWithResampling(AudioBlock* aOutput, |
251 | | uint32_t aChannels, |
252 | | uint32_t* aOffsetWithinBlock, |
253 | | uint32_t aAvailableInOutput, |
254 | | StreamTime* aCurrentPosition, |
255 | | uint32_t aBufferMax) |
256 | 0 | { |
257 | 0 | if (*aOffsetWithinBlock == 0) { |
258 | 0 | aOutput->AllocateChannels(aChannels); |
259 | 0 | } |
260 | 0 | SpeexResamplerState* resampler = mResampler; |
261 | 0 | MOZ_ASSERT(aChannels > 0); |
262 | 0 |
|
263 | 0 | if (mBufferPosition < aBufferMax) { |
264 | 0 | uint32_t availableInInputBuffer = aBufferMax - mBufferPosition; |
265 | 0 | uint32_t ratioNum, ratioDen; |
266 | 0 | speex_resampler_get_ratio(resampler, &ratioNum, &ratioDen); |
267 | 0 | // Limit the number of input samples copied and possibly |
268 | 0 | // format-converted for resampling by estimating how many will be used. |
269 | 0 | // This may be a little small if still filling the resampler with |
270 | 0 | // initial data, but we'll get called again and it will work out. |
271 | 0 | uint32_t inputLimit = aAvailableInOutput * ratioNum / ratioDen + 10; |
272 | 0 | if (!BegunResampling()) { |
273 | 0 | // First time the resampler is used. |
274 | 0 | uint32_t inputLatency = speex_resampler_get_input_latency(resampler); |
275 | 0 | inputLimit += inputLatency; |
276 | 0 | // If starting after mStart, then play from the beginning of the |
277 | 0 | // buffer, but correct for input latency. If starting before mStart, |
278 | 0 | // then align the resampler so that the time corresponding to the |
279 | 0 | // first input sample is mStart. |
280 | 0 | int64_t skipFracNum = static_cast<int64_t>(inputLatency) * ratioDen; |
281 | 0 | double leadTicks = mStart - *aCurrentPosition; |
282 | 0 | if (leadTicks > 0.0) { |
283 | 0 | // Round to nearest output subsample supported by the resampler at |
284 | 0 | // these rates. |
285 | 0 | int64_t leadSubsamples = leadTicks * ratioNum + 0.5; |
286 | 0 | MOZ_ASSERT(leadSubsamples <= skipFracNum, |
287 | 0 | "mBeginProcessing is wrong?"); |
288 | 0 | skipFracNum -= leadSubsamples; |
289 | 0 | } |
290 | 0 | speex_resampler_set_skip_frac_num(resampler, |
291 | 0 | std::min<int64_t>(skipFracNum, UINT32_MAX)); |
292 | 0 |
|
293 | 0 | mBeginProcessing = -STREAM_TIME_MAX; |
294 | 0 | } |
295 | 0 | inputLimit = std::min(inputLimit, availableInInputBuffer); |
296 | 0 |
|
297 | 0 | MOZ_ASSERT(mBuffer.mVolume == 1.0f); |
298 | 0 | for (uint32_t i = 0; true; ) { |
299 | 0 | uint32_t inSamples = inputLimit; |
300 | 0 |
|
301 | 0 | uint32_t outSamples = aAvailableInOutput; |
302 | 0 | float* outputData = |
303 | 0 | aOutput->ChannelFloatsForWrite(i) + *aOffsetWithinBlock; |
304 | 0 |
|
305 | 0 | if (mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) { |
306 | 0 | const float* inputData = |
307 | 0 | mBuffer.ChannelData<float>()[i] + mBufferPosition; |
308 | 0 | WebAudioUtils::SpeexResamplerProcess(resampler, i, |
309 | 0 | inputData, &inSamples, |
310 | 0 | outputData, &outSamples); |
311 | 0 | } else { |
312 | 0 | MOZ_ASSERT(mBuffer.mBufferFormat == AUDIO_FORMAT_S16); |
313 | 0 | const int16_t* inputData = |
314 | 0 | mBuffer.ChannelData<int16_t>()[i] + mBufferPosition; |
315 | 0 | WebAudioUtils::SpeexResamplerProcess(resampler, i, |
316 | 0 | inputData, &inSamples, |
317 | 0 | outputData, &outSamples); |
318 | 0 | } |
319 | 0 | if (++i == aChannels) { |
320 | 0 | mBufferPosition += inSamples; |
321 | 0 | MOZ_ASSERT(mBufferPosition <= mBufferEnd || mLoop); |
322 | 0 | *aOffsetWithinBlock += outSamples; |
323 | 0 | *aCurrentPosition += outSamples; |
324 | 0 | if (inSamples == availableInInputBuffer && !mLoop) { |
325 | 0 | // We'll feed in enough zeros to empty out the resampler's memory. |
326 | 0 | // This handles the output latency as well as capturing the low |
327 | 0 | // pass effects of the resample filter. |
328 | 0 | mRemainingResamplerTail = |
329 | 0 | 2 * speex_resampler_get_input_latency(resampler) - 1; |
330 | 0 | } |
331 | 0 | return; |
332 | 0 | } |
333 | 0 | } |
334 | 0 | } else { |
335 | 0 | for (uint32_t i = 0; true; ) { |
336 | 0 | uint32_t inSamples = mRemainingResamplerTail; |
337 | 0 | uint32_t outSamples = aAvailableInOutput; |
338 | 0 | float* outputData = |
339 | 0 | aOutput->ChannelFloatsForWrite(i) + *aOffsetWithinBlock; |
340 | 0 |
|
341 | 0 | // AudioDataValue* for aIn selects the function that does not try to |
342 | 0 | // copy and format-convert input data. |
343 | 0 | WebAudioUtils::SpeexResamplerProcess(resampler, i, |
344 | 0 | static_cast<AudioDataValue*>(nullptr), &inSamples, |
345 | 0 | outputData, &outSamples); |
346 | 0 | if (++i == aChannels) { |
347 | 0 | MOZ_ASSERT(inSamples <= mRemainingResamplerTail); |
348 | 0 | mRemainingResamplerTail -= inSamples; |
349 | 0 | *aOffsetWithinBlock += outSamples; |
350 | 0 | *aCurrentPosition += outSamples; |
351 | 0 | break; |
352 | 0 | } |
353 | 0 | } |
354 | 0 | } |
355 | 0 | } |
356 | | |
357 | | /** |
358 | | * Fill aOutput with as many zero frames as we can, and advance |
359 | | * aOffsetWithinBlock and aCurrentPosition based on how many frames we write. |
360 | | * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or |
361 | | * aCurrentPosition past aMaxPos. This function knows when it needs to |
362 | | * allocate the output buffer, and also optimizes the case where it can avoid |
363 | | * memory allocations. |
364 | | */ |
365 | | void FillWithZeroes(AudioBlock* aOutput, |
366 | | uint32_t aChannels, |
367 | | uint32_t* aOffsetWithinBlock, |
368 | | StreamTime* aCurrentPosition, |
369 | | StreamTime aMaxPos) |
370 | 0 | { |
371 | 0 | MOZ_ASSERT(*aCurrentPosition < aMaxPos); |
372 | 0 | uint32_t numFrames = |
373 | 0 | std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, |
374 | 0 | aMaxPos - *aCurrentPosition); |
375 | 0 | if (numFrames == WEBAUDIO_BLOCK_SIZE || !aChannels) { |
376 | 0 | aOutput->SetNull(numFrames); |
377 | 0 | } else { |
378 | 0 | if (*aOffsetWithinBlock == 0) { |
379 | 0 | aOutput->AllocateChannels(aChannels); |
380 | 0 | } |
381 | 0 | WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames); |
382 | 0 | } |
383 | 0 | *aOffsetWithinBlock += numFrames; |
384 | 0 | *aCurrentPosition += numFrames; |
385 | 0 | } |
386 | | |
387 | | /** |
388 | | * Copy as many frames as possible from the source buffer to aOutput, and |
389 | | * advance aOffsetWithinBlock and aCurrentPosition based on how many frames |
390 | | * we write. This will never advance aOffsetWithinBlock past |
391 | | * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from |
392 | | * the buffer at aBufferOffset, and never takes more data than aBufferMax. |
393 | | * This function knows when it needs to allocate the output buffer, and also |
394 | | * optimizes the case where it can avoid memory allocations. |
395 | | */ |
396 | | void CopyFromBuffer(AudioBlock* aOutput, |
397 | | uint32_t aChannels, |
398 | | uint32_t* aOffsetWithinBlock, |
399 | | StreamTime* aCurrentPosition, |
400 | | uint32_t aBufferMax) |
401 | 0 | { |
402 | 0 | MOZ_ASSERT(*aCurrentPosition < mStop); |
403 | 0 | uint32_t availableInOutput = |
404 | 0 | std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, |
405 | 0 | mStop - *aCurrentPosition); |
406 | 0 | if (mResampler) { |
407 | 0 | CopyFromInputBufferWithResampling(aOutput, aChannels, |
408 | 0 | aOffsetWithinBlock, availableInOutput, |
409 | 0 | aCurrentPosition, aBufferMax); |
410 | 0 | return; |
411 | 0 | } |
412 | 0 | |
413 | 0 | if (aChannels == 0) { |
414 | 0 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
415 | 0 | // There is no attempt here to limit advance so that mBufferPosition is |
416 | 0 | // limited to aBufferMax. The only observable affect of skipping the |
417 | 0 | // check would be in the precise timing of the ended event if the loop |
418 | 0 | // attribute is reset after playback has looped. |
419 | 0 | *aOffsetWithinBlock += availableInOutput; |
420 | 0 | *aCurrentPosition += availableInOutput; |
421 | 0 | // Rounding at the start and end of the period means that fractional |
422 | 0 | // increments essentially accumulate if outRate remains constant. If |
423 | 0 | // outRate is varying, then accumulation happens on average but not |
424 | 0 | // precisely. |
425 | 0 | TrackTicks start = *aCurrentPosition * |
426 | 0 | mBufferSampleRate / mResamplerOutRate; |
427 | 0 | TrackTicks end = (*aCurrentPosition + availableInOutput) * |
428 | 0 | mBufferSampleRate / mResamplerOutRate; |
429 | 0 | mBufferPosition += end - start; |
430 | 0 | return; |
431 | 0 | } |
432 | 0 | |
433 | 0 | uint32_t numFrames = std::min(aBufferMax - mBufferPosition, |
434 | 0 | availableInOutput); |
435 | 0 |
|
436 | 0 | bool shouldBorrow = false; |
437 | 0 | if (numFrames == WEBAUDIO_BLOCK_SIZE && |
438 | 0 | mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) { |
439 | 0 | shouldBorrow = true; |
440 | 0 | for (uint32_t i = 0; i < aChannels; ++i) { |
441 | 0 | if (!IS_ALIGNED16(mBuffer.ChannelData<float>()[i] + mBufferPosition)) { |
442 | 0 | shouldBorrow = false; |
443 | 0 | break; |
444 | 0 | } |
445 | 0 | } |
446 | 0 | } |
447 | 0 | MOZ_ASSERT(mBufferPosition < aBufferMax); |
448 | 0 | if (shouldBorrow) { |
449 | 0 | BorrowFromInputBuffer(aOutput, aChannels); |
450 | 0 | } else { |
451 | 0 | if (*aOffsetWithinBlock == 0) { |
452 | 0 | aOutput->AllocateChannels(aChannels); |
453 | 0 | } |
454 | 0 | if (mBuffer.mBufferFormat == AUDIO_FORMAT_FLOAT32) { |
455 | 0 | CopyFromInputBuffer<float>(aOutput, aChannels, |
456 | 0 | *aOffsetWithinBlock, numFrames); |
457 | 0 | } else { |
458 | 0 | MOZ_ASSERT(mBuffer.mBufferFormat == AUDIO_FORMAT_S16); |
459 | 0 | CopyFromInputBuffer<int16_t>(aOutput, aChannels, |
460 | 0 | *aOffsetWithinBlock, numFrames); |
461 | 0 | } |
462 | 0 | } |
463 | 0 | *aOffsetWithinBlock += numFrames; |
464 | 0 | *aCurrentPosition += numFrames; |
465 | 0 | mBufferPosition += numFrames; |
466 | 0 | } |
467 | | |
468 | | int32_t ComputeFinalOutSampleRate(float aPlaybackRate, float aDetune) |
469 | 0 | { |
470 | 0 | float computedPlaybackRate = aPlaybackRate * pow(2, aDetune / 1200.f); |
471 | 0 | // Make sure the playback rate and the doppler shift are something |
472 | 0 | // our resampler can work with. |
473 | 0 | int32_t rate = WebAudioUtils:: |
474 | 0 | TruncateFloatToInt<int32_t>(mSource->SampleRate() / |
475 | 0 | (computedPlaybackRate * mDopplerShift)); |
476 | 0 | return rate ? rate : mBufferSampleRate; |
477 | 0 | } |
478 | | |
479 | | void UpdateSampleRateIfNeeded(uint32_t aChannels, StreamTime aStreamPosition) |
480 | 0 | { |
481 | 0 | float playbackRate; |
482 | 0 | float detune; |
483 | 0 |
|
484 | 0 | if (mPlaybackRateTimeline.HasSimpleValue()) { |
485 | 0 | playbackRate = mPlaybackRateTimeline.GetValue(); |
486 | 0 | } else { |
487 | 0 | playbackRate = mPlaybackRateTimeline.GetValueAtTime(aStreamPosition); |
488 | 0 | } |
489 | 0 | if (mDetuneTimeline.HasSimpleValue()) { |
490 | 0 | detune = mDetuneTimeline.GetValue(); |
491 | 0 | } else { |
492 | 0 | detune = mDetuneTimeline.GetValueAtTime(aStreamPosition); |
493 | 0 | } |
494 | 0 | if (playbackRate <= 0 || mozilla::IsNaN(playbackRate)) { |
495 | 0 | playbackRate = 1.0f; |
496 | 0 | } |
497 | 0 |
|
498 | 0 | detune = std::min(std::max(-1200.f, detune), 1200.f); |
499 | 0 |
|
500 | 0 | int32_t outRate = ComputeFinalOutSampleRate(playbackRate, detune); |
501 | 0 | UpdateResampler(outRate, aChannels); |
502 | 0 | } |
503 | | |
504 | | void ProcessBlock(AudioNodeStream* aStream, |
505 | | GraphTime aFrom, |
506 | | const AudioBlock& aInput, |
507 | | AudioBlock* aOutput, |
508 | | bool* aFinished) override |
509 | 0 | { |
510 | 0 | if (mBufferSampleRate == 0) { |
511 | 0 | // start() has not yet been called or no buffer has yet been set |
512 | 0 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
513 | 0 | return; |
514 | 0 | } |
515 | 0 | |
516 | 0 | StreamTime streamPosition = mDestination->GraphTimeToStreamTime(aFrom); |
517 | 0 | uint32_t channels = mBuffer.ChannelCount(); |
518 | 0 |
|
519 | 0 | UpdateSampleRateIfNeeded(channels, streamPosition); |
520 | 0 |
|
521 | 0 | uint32_t written = 0; |
522 | 0 | while (written < WEBAUDIO_BLOCK_SIZE) { |
523 | 0 | if (mStop != STREAM_TIME_MAX && |
524 | 0 | streamPosition >= mStop) { |
525 | 0 | FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX); |
526 | 0 | continue; |
527 | 0 | } |
528 | 0 | if (streamPosition < mBeginProcessing) { |
529 | 0 | FillWithZeroes(aOutput, channels, &written, &streamPosition, |
530 | 0 | mBeginProcessing); |
531 | 0 | continue; |
532 | 0 | } |
533 | 0 | if (mLoop) { |
534 | 0 | // mLoopEnd can become less than mBufferPosition when a LOOPEND engine |
535 | 0 | // parameter is received after "loopend" is changed on the node or a |
536 | 0 | // new buffer with lower samplerate is set. |
537 | 0 | if (mBufferPosition >= mLoopEnd) { |
538 | 0 | mBufferPosition = mLoopStart; |
539 | 0 | } |
540 | 0 | CopyFromBuffer(aOutput, channels, &written, &streamPosition, mLoopEnd); |
541 | 0 | } else { |
542 | 0 | if (mBufferPosition < mBufferEnd || mRemainingResamplerTail) { |
543 | 0 | CopyFromBuffer(aOutput, channels, &written, &streamPosition, mBufferEnd); |
544 | 0 | } else { |
545 | 0 | FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX); |
546 | 0 | } |
547 | 0 | } |
548 | 0 | } |
549 | 0 |
|
550 | 0 | // We've finished if we've gone past mStop, or if we're past mDuration when |
551 | 0 | // looping is disabled. |
552 | 0 | if (streamPosition >= mStop || |
553 | 0 | (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) { |
554 | 0 | *aFinished = true; |
555 | 0 | } |
556 | 0 | } |
557 | | |
558 | | bool IsActive() const override |
559 | 0 | { |
560 | 0 | // Whether buffer has been set and start() has been called. |
561 | 0 | return mBufferSampleRate != 0; |
562 | 0 | } |
563 | | |
564 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override |
565 | 0 | { |
566 | 0 | // Not owned: |
567 | 0 | // - mBuffer - shared w/ AudioNode |
568 | 0 | // - mPlaybackRateTimeline - shared w/ AudioNode |
569 | 0 | // - mDetuneTimeline - shared w/ AudioNode |
570 | 0 |
|
571 | 0 | size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
572 | 0 |
|
573 | 0 | // NB: We need to modify speex if we want the full memory picture, internal |
574 | 0 | // fields that need measuring noted below. |
575 | 0 | // - mResampler->mem |
576 | 0 | // - mResampler->sinc_table |
577 | 0 | // - mResampler->last_sample |
578 | 0 | // - mResampler->magic_samples |
579 | 0 | // - mResampler->samp_frac_num |
580 | 0 | amount += aMallocSizeOf(mResampler); |
581 | 0 |
|
582 | 0 | return amount; |
583 | 0 | } |
584 | | |
585 | | size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override |
586 | 0 | { |
587 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
588 | 0 | } |
589 | | |
590 | | double mStart; // including the fractional position between ticks |
591 | | // Low pass filter effects from the resampler mean that samples before the |
592 | | // start time are influenced by resampling the buffer. mBeginProcessing |
593 | | // includes the extent of this filter. The special value of -STREAM_TIME_MAX |
594 | | // indicates that the resampler has begun processing. |
595 | | StreamTime mBeginProcessing; |
596 | | StreamTime mStop; |
597 | | AudioChunk mBuffer; |
598 | | SpeexResamplerState* mResampler; |
599 | | // mRemainingResamplerTail, like mBufferPosition, and |
600 | | // mBufferEnd, is measured in input buffer samples. |
601 | | uint32_t mRemainingResamplerTail; |
602 | | uint32_t mBufferEnd; |
603 | | uint32_t mLoopStart; |
604 | | uint32_t mLoopEnd; |
605 | | uint32_t mBufferPosition; |
606 | | int32_t mBufferSampleRate; |
607 | | int32_t mResamplerOutRate; |
608 | | uint32_t mChannels; |
609 | | float mDopplerShift; |
610 | | RefPtr<AudioNodeStream> mDestination; |
611 | | |
612 | | // mSource deletes the engine in its destructor. |
613 | | AudioNodeStream* MOZ_NON_OWNING_REF mSource; |
614 | | AudioParamTimeline mPlaybackRateTimeline; |
615 | | AudioParamTimeline mDetuneTimeline; |
616 | | bool mLoop; |
617 | | }; |
618 | | |
619 | | AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext) |
620 | | : AudioScheduledSourceNode(aContext, |
621 | | 2, |
622 | | ChannelCountMode::Max, |
623 | | ChannelInterpretation::Speakers) |
624 | | , mLoopStart(0.0) |
625 | | , mLoopEnd(0.0) |
626 | | // mOffset and mDuration are initialized in Start(). |
627 | | , mPlaybackRate(new AudioParam(this, PLAYBACKRATE, "playbackRate", 1.0f)) |
628 | | , mDetune(new AudioParam(this, DETUNE, "detune", 0.0f)) |
629 | | , mLoop(false) |
630 | | , mStartCalled(false) |
631 | 0 | { |
632 | 0 | AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination()); |
633 | 0 | mStream = AudioNodeStream::Create(aContext, engine, |
634 | 0 | AudioNodeStream::NEED_MAIN_THREAD_FINISHED, |
635 | 0 | aContext->Graph()); |
636 | 0 | engine->SetSourceStream(mStream); |
637 | 0 | mStream->AddMainThreadListener(this); |
638 | 0 | } |
639 | | |
640 | | /* static */ already_AddRefed<AudioBufferSourceNode> |
641 | | AudioBufferSourceNode::Create(JSContext* aCx, AudioContext& aAudioContext, |
642 | | const AudioBufferSourceOptions& aOptions, |
643 | | ErrorResult& aRv) |
644 | 0 | { |
645 | 0 | if (aAudioContext.CheckClosed(aRv)) { |
646 | 0 | return nullptr; |
647 | 0 | } |
648 | 0 | |
649 | 0 | RefPtr<AudioBufferSourceNode> audioNode = new AudioBufferSourceNode(&aAudioContext); |
650 | 0 |
|
651 | 0 | if (aOptions.mBuffer.WasPassed()) { |
652 | 0 | MOZ_ASSERT(aCx); |
653 | 0 | audioNode->SetBuffer(aCx, aOptions.mBuffer.Value()); |
654 | 0 | } |
655 | 0 |
|
656 | 0 | audioNode->Detune()->SetValue(aOptions.mDetune); |
657 | 0 | audioNode->SetLoop(aOptions.mLoop); |
658 | 0 | audioNode->SetLoopEnd(aOptions.mLoopEnd); |
659 | 0 | audioNode->SetLoopStart(aOptions.mLoopStart); |
660 | 0 | audioNode->PlaybackRate()->SetValue(aOptions.mPlaybackRate); |
661 | 0 |
|
662 | 0 | return audioNode.forget(); |
663 | 0 | } |
664 | | void |
665 | | AudioBufferSourceNode::DestroyMediaStream() |
666 | 0 | { |
667 | 0 | bool hadStream = mStream; |
668 | 0 | if (hadStream) { |
669 | 0 | mStream->RemoveMainThreadListener(this); |
670 | 0 | } |
671 | 0 | AudioNode::DestroyMediaStream(); |
672 | 0 | } |
673 | | |
674 | | size_t |
675 | | AudioBufferSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
676 | 0 | { |
677 | 0 | size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
678 | 0 |
|
679 | 0 | /* mBuffer can be shared and is accounted for separately. */ |
680 | 0 |
|
681 | 0 | amount += mPlaybackRate->SizeOfIncludingThis(aMallocSizeOf); |
682 | 0 | amount += mDetune->SizeOfIncludingThis(aMallocSizeOf); |
683 | 0 | return amount; |
684 | 0 | } |
685 | | |
686 | | size_t |
687 | | AudioBufferSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
688 | 0 | { |
689 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
690 | 0 | } |
691 | | |
692 | | JSObject* |
693 | | AudioBufferSourceNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) |
694 | 0 | { |
695 | 0 | return AudioBufferSourceNode_Binding::Wrap(aCx, this, aGivenProto); |
696 | 0 | } |
697 | | |
698 | | void |
699 | | AudioBufferSourceNode::Start(double aWhen, double aOffset, |
700 | | const Optional<double>& aDuration, ErrorResult& aRv) |
701 | 0 | { |
702 | 0 | if (!WebAudioUtils::IsTimeValid(aWhen)) { |
703 | 0 | aRv.ThrowRangeError<MSG_VALUE_OUT_OF_RANGE>(NS_LITERAL_STRING("start time")); |
704 | 0 | return; |
705 | 0 | } |
706 | 0 | if (aOffset < 0) { |
707 | 0 | aRv.ThrowRangeError<MSG_VALUE_OUT_OF_RANGE>(NS_LITERAL_STRING("offset")); |
708 | 0 | return; |
709 | 0 | } |
710 | 0 | if (aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value())) { |
711 | 0 | aRv.ThrowRangeError<MSG_VALUE_OUT_OF_RANGE>(NS_LITERAL_STRING("duration")); |
712 | 0 | return; |
713 | 0 | } |
714 | 0 |
|
715 | 0 | if (mStartCalled) { |
716 | 0 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
717 | 0 | return; |
718 | 0 | } |
719 | 0 | mStartCalled = true; |
720 | 0 |
|
721 | 0 | AudioNodeStream* ns = mStream; |
722 | 0 | if (!ns) { |
723 | 0 | // Nothing to play, or we're already dead for some reason |
724 | 0 | return; |
725 | 0 | } |
726 | 0 | |
727 | 0 | // Remember our arguments so that we can use them when we get a new buffer. |
728 | 0 | mOffset = aOffset; |
729 | 0 | mDuration = aDuration.WasPassed() ? aDuration.Value() |
730 | 0 | : std::numeric_limits<double>::min(); |
731 | 0 |
|
732 | 0 | WEB_AUDIO_API_LOG("%f: %s %u Start(%f, %g, %g)", Context()->CurrentTime(), |
733 | 0 | NodeType(), Id(), aWhen, aOffset, mDuration); |
734 | 0 |
|
735 | 0 | // We can't send these parameters without a buffer because we don't know the |
736 | 0 | // buffer's sample rate or length. |
737 | 0 | if (mBuffer) { |
738 | 0 | SendOffsetAndDurationParametersToStream(ns); |
739 | 0 | } |
740 | 0 |
|
741 | 0 | // Don't set parameter unnecessarily |
742 | 0 | if (aWhen > 0.0) { |
743 | 0 | ns->SetDoubleParameter(START, aWhen); |
744 | 0 | } |
745 | 0 | } |
746 | | |
747 | | void |
748 | | AudioBufferSourceNode::Start(double aWhen, ErrorResult& aRv) |
749 | 0 | { |
750 | 0 | Start(aWhen, 0 /* offset */, Optional<double>(), aRv); |
751 | 0 | } |
752 | | |
753 | | void |
754 | | AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx) |
755 | 0 | { |
756 | 0 | AudioNodeStream* ns = mStream; |
757 | 0 | if (!ns) { |
758 | 0 | return; |
759 | 0 | } |
760 | 0 | |
761 | 0 | if (mBuffer) { |
762 | 0 | AudioChunk data = mBuffer->GetThreadSharedChannelsForRate(aCx); |
763 | 0 | ns->SetBuffer(std::move(data)); |
764 | 0 |
|
765 | 0 | if (mStartCalled) { |
766 | 0 | SendOffsetAndDurationParametersToStream(ns); |
767 | 0 | } |
768 | 0 | } else { |
769 | 0 | ns->SetInt32Parameter(BUFFEREND, 0); |
770 | 0 | ns->SetBuffer(AudioChunk()); |
771 | 0 |
|
772 | 0 | MarkInactive(); |
773 | 0 | } |
774 | 0 | } |
775 | | |
776 | | void |
777 | | AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream) |
778 | 0 | { |
779 | 0 | NS_ASSERTION(mBuffer && mStartCalled, |
780 | 0 | "Only call this when we have a buffer and start() has been called"); |
781 | 0 |
|
782 | 0 | float rate = mBuffer->SampleRate(); |
783 | 0 | aStream->SetInt32Parameter(SAMPLE_RATE, rate); |
784 | 0 |
|
785 | 0 | int32_t bufferEnd = mBuffer->Length(); |
786 | 0 | int32_t offsetSamples = std::max(0, NS_lround(mOffset * rate)); |
787 | 0 |
|
788 | 0 | // Don't set parameter unnecessarily |
789 | 0 | if (offsetSamples > 0) { |
790 | 0 | aStream->SetInt32Parameter(BUFFERSTART, offsetSamples); |
791 | 0 | } |
792 | 0 |
|
793 | 0 | if (mDuration != std::numeric_limits<double>::min()) { |
794 | 0 | MOZ_ASSERT(mDuration >= 0.0); // provided by Start() |
795 | 0 | MOZ_ASSERT(rate >= 0.0f); // provided by AudioBuffer::Create() |
796 | 0 | static_assert(std::numeric_limits<double>::digits >= |
797 | 0 | std::numeric_limits<decltype(bufferEnd)>::digits, |
798 | 0 | "bufferEnd should be represented exactly by double"); |
799 | 0 | // + 0.5 rounds mDuration to nearest sample when assigned to bufferEnd. |
800 | 0 | bufferEnd = std::min<double>(bufferEnd, |
801 | 0 | offsetSamples + mDuration * rate + 0.5); |
802 | 0 | } |
803 | 0 | aStream->SetInt32Parameter(BUFFEREND, bufferEnd); |
804 | 0 |
|
805 | 0 | MarkActive(); |
806 | 0 | } |
807 | | |
808 | | void |
809 | | AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv) |
810 | 0 | { |
811 | 0 | if (!WebAudioUtils::IsTimeValid(aWhen)) { |
812 | 0 | aRv.ThrowRangeError<MSG_VALUE_OUT_OF_RANGE>(NS_LITERAL_STRING("stop time")); |
813 | 0 | return; |
814 | 0 | } |
815 | 0 |
|
816 | 0 | if (!mStartCalled) { |
817 | 0 | aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); |
818 | 0 | return; |
819 | 0 | } |
820 | 0 | |
821 | 0 | WEB_AUDIO_API_LOG("%f: %s %u Stop(%f)", Context()->CurrentTime(), |
822 | 0 | NodeType(), Id(), aWhen); |
823 | 0 |
|
824 | 0 | AudioNodeStream* ns = mStream; |
825 | 0 | if (!ns || !Context()) { |
826 | 0 | // We've already stopped and had our stream shut down |
827 | 0 | return; |
828 | 0 | } |
829 | 0 | |
830 | 0 | ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen)); |
831 | 0 | } |
832 | | |
833 | | void |
834 | | AudioBufferSourceNode::NotifyMainThreadStreamFinished() |
835 | 0 | { |
836 | 0 | MOZ_ASSERT(mStream->IsFinished()); |
837 | 0 |
|
838 | 0 | class EndedEventDispatcher final : public Runnable |
839 | 0 | { |
840 | 0 | public: |
841 | 0 | explicit EndedEventDispatcher(AudioBufferSourceNode* aNode) |
842 | 0 | : mozilla::Runnable("EndedEventDispatcher") |
843 | 0 | , mNode(aNode) |
844 | 0 | { |
845 | 0 | } |
846 | 0 | NS_IMETHOD Run() override |
847 | 0 | { |
848 | 0 | // If it's not safe to run scripts right now, schedule this to run later |
849 | 0 | if (!nsContentUtils::IsSafeToRunScript()) { |
850 | 0 | nsContentUtils::AddScriptRunner(this); |
851 | 0 | return NS_OK; |
852 | 0 | } |
853 | 0 | |
854 | 0 | mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); |
855 | 0 | // Release stream resources. |
856 | 0 | mNode->DestroyMediaStream(); |
857 | 0 | return NS_OK; |
858 | 0 | } |
859 | 0 | private: |
860 | 0 | RefPtr<AudioBufferSourceNode> mNode; |
861 | 0 | }; |
862 | 0 |
|
863 | 0 | Context()->Dispatch(do_AddRef(new EndedEventDispatcher(this))); |
864 | 0 |
|
865 | 0 | // Drop the playing reference |
866 | 0 | // Warning: The below line might delete this. |
867 | 0 | MarkInactive(); |
868 | 0 | } |
869 | | |
870 | | void |
871 | | AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift) |
872 | 0 | { |
873 | 0 | MOZ_ASSERT(mStream, "Should have disconnected panner if no stream"); |
874 | 0 | SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift); |
875 | 0 | } |
876 | | |
877 | | void |
878 | | AudioBufferSourceNode::SendLoopParametersToStream() |
879 | 0 | { |
880 | 0 | if (!mStream) { |
881 | 0 | return; |
882 | 0 | } |
883 | 0 | // Don't compute and set the loop parameters unnecessarily |
884 | 0 | if (mLoop && mBuffer) { |
885 | 0 | float rate = mBuffer->SampleRate(); |
886 | 0 | double length = (double(mBuffer->Length()) / mBuffer->SampleRate()); |
887 | 0 | double actualLoopStart, actualLoopEnd; |
888 | 0 | if (mLoopStart >= 0.0 && mLoopEnd > 0.0 && |
889 | 0 | mLoopStart < mLoopEnd) { |
890 | 0 | MOZ_ASSERT(mLoopStart != 0.0 || mLoopEnd != 0.0); |
891 | 0 | actualLoopStart = (mLoopStart > length) ? 0.0 : mLoopStart; |
892 | 0 | actualLoopEnd = std::min(mLoopEnd, length); |
893 | 0 | } else { |
894 | 0 | actualLoopStart = 0.0; |
895 | 0 | actualLoopEnd = length; |
896 | 0 | } |
897 | 0 | int32_t loopStartTicks = NS_lround(actualLoopStart * rate); |
898 | 0 | int32_t loopEndTicks = NS_lround(actualLoopEnd * rate); |
899 | 0 | if (loopStartTicks < loopEndTicks) { |
900 | 0 | SendInt32ParameterToStream(LOOPSTART, loopStartTicks); |
901 | 0 | SendInt32ParameterToStream(LOOPEND, loopEndTicks); |
902 | 0 | SendInt32ParameterToStream(LOOP, 1); |
903 | 0 | } else { |
904 | 0 | // Be explicit about looping not happening if the offsets make |
905 | 0 | // looping impossible. |
906 | 0 | SendInt32ParameterToStream(LOOP, 0); |
907 | 0 | } |
908 | 0 | } else { |
909 | 0 | SendInt32ParameterToStream(LOOP, 0); |
910 | 0 | } |
911 | 0 | } |
912 | | |
913 | | } // namespace dom |
914 | | } // namespace mozilla |