/src/mozilla-central/dom/media/webaudio/PannerNode.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "PannerNode.h" |
8 | | #include "AlignmentUtils.h" |
9 | | #include "AudioDestinationNode.h" |
10 | | #include "AudioNodeEngine.h" |
11 | | #include "AudioNodeStream.h" |
12 | | #include "AudioListener.h" |
13 | | #include "PanningUtils.h" |
14 | | #include "AudioBufferSourceNode.h" |
15 | | #include "PlayingRefChangeHandler.h" |
16 | | #include "blink/HRTFPanner.h" |
17 | | #include "blink/HRTFDatabaseLoader.h" |
18 | | #include "nsAutoPtr.h" |
19 | | |
20 | | using WebCore::HRTFDatabaseLoader; |
21 | | using WebCore::HRTFPanner; |
22 | | |
23 | | namespace mozilla { |
24 | | namespace dom { |
25 | | |
26 | | using namespace std; |
27 | | |
28 | | NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode) |
29 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(PannerNode, AudioNode) |
30 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ) |
31 | 0 | NS_IMPL_CYCLE_COLLECTION_UNLINK_END |
32 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode) |
33 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ) |
34 | 0 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
35 | | |
36 | 0 | NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(PannerNode) |
37 | 0 | NS_INTERFACE_MAP_END_INHERITING(AudioNode) |
38 | | |
39 | | NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode) |
40 | | NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode) |
41 | | |
42 | | class PannerNodeEngine final : public AudioNodeEngine |
43 | | { |
44 | | public: |
45 | | explicit PannerNodeEngine(AudioNode* aNode, |
46 | | AudioDestinationNode* aDestination, |
47 | | AudioListenerEngine* aListenerEngine) |
48 | | : AudioNodeEngine(aNode) |
49 | | , mDestination(aDestination->Stream()) |
50 | | , mListenerEngine(aListenerEngine) |
51 | | // Please keep these default values consistent with PannerNode::PannerNode |
52 | | // below. |
53 | | , mPanningModelFunction(&PannerNodeEngine::EqualPowerPanningFunction) |
54 | | , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction) |
55 | | , mPositionX(0.) |
56 | | , mPositionY(0.) |
57 | | , mPositionZ(0.) |
58 | | , mOrientationX(1.) |
59 | | , mOrientationY(0.) |
60 | | , mOrientationZ(0.) |
61 | | , mRefDistance(1.) |
62 | | , mMaxDistance(10000.) |
63 | | , mRolloffFactor(1.) |
64 | | , mConeInnerAngle(360.) |
65 | | , mConeOuterAngle(360.) |
66 | | , mConeOuterGain(0.) |
67 | | , mLeftOverData(INT_MIN) |
68 | 0 | { |
69 | 0 | } |
70 | | |
71 | | void RecvTimelineEvent(uint32_t aIndex, AudioTimelineEvent& aEvent) override |
72 | 0 | { |
73 | 0 | MOZ_ASSERT(mDestination); |
74 | 0 | WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, |
75 | 0 | mDestination); |
76 | 0 | switch (aIndex) { |
77 | 0 | case PannerNode::POSITIONX: |
78 | 0 | mPositionX.InsertEvent<int64_t>(aEvent); |
79 | 0 | break; |
80 | 0 | case PannerNode::POSITIONY: |
81 | 0 | mPositionY.InsertEvent<int64_t>(aEvent); |
82 | 0 | break; |
83 | 0 | case PannerNode::POSITIONZ: |
84 | 0 | mPositionZ.InsertEvent<int64_t>(aEvent); |
85 | 0 | break; |
86 | 0 | case PannerNode::ORIENTATIONX: |
87 | 0 | mOrientationX.InsertEvent<int64_t>(aEvent); |
88 | 0 | break; |
89 | 0 | case PannerNode::ORIENTATIONY: |
90 | 0 | mOrientationY.InsertEvent<int64_t>(aEvent); |
91 | 0 | break; |
92 | 0 | case PannerNode::ORIENTATIONZ: |
93 | 0 | mOrientationZ.InsertEvent<int64_t>(aEvent); |
94 | 0 | break; |
95 | 0 | default: |
96 | 0 | NS_ERROR("Bad PannerNode TimelineParameter"); |
97 | 0 | } |
98 | 0 | } |
99 | | |
100 | | void CreateHRTFPanner() |
101 | 0 | { |
102 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
103 | 0 | if (mHRTFPanner) { |
104 | 0 | return; |
105 | 0 | } |
106 | 0 | // HRTFDatabaseLoader needs to be fetched on the main thread. |
107 | 0 | RefPtr<HRTFDatabaseLoader> loader = |
108 | 0 | HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(NodeMainThread()->Context()->SampleRate()); |
109 | 0 | mHRTFPanner = new HRTFPanner(NodeMainThread()->Context()->SampleRate(), loader.forget()); |
110 | 0 | } |
111 | | |
112 | | void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override |
113 | 0 | { |
114 | 0 | switch (aIndex) { |
115 | 0 | case PannerNode::PANNING_MODEL: |
116 | 0 | switch (PanningModelType(aParam)) { |
117 | 0 | case PanningModelType::Equalpower: |
118 | 0 | mPanningModelFunction = &PannerNodeEngine::EqualPowerPanningFunction; |
119 | 0 | break; |
120 | 0 | case PanningModelType::HRTF: |
121 | 0 | mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction; |
122 | 0 | break; |
123 | 0 | default: |
124 | 0 | MOZ_ASSERT_UNREACHABLE("We should never see alternate names here"); |
125 | 0 | break; |
126 | 0 | } |
127 | 0 | break; |
128 | 0 | case PannerNode::DISTANCE_MODEL: |
129 | 0 | switch (DistanceModelType(aParam)) { |
130 | 0 | case DistanceModelType::Inverse: |
131 | 0 | mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction; |
132 | 0 | break; |
133 | 0 | case DistanceModelType::Linear: |
134 | 0 | mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction; |
135 | 0 | break; |
136 | 0 | case DistanceModelType::Exponential: |
137 | 0 | mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction; |
138 | 0 | break; |
139 | 0 | default: |
140 | 0 | MOZ_ASSERT_UNREACHABLE("We should never see alternate names here"); |
141 | 0 | break; |
142 | 0 | } |
143 | 0 | break; |
144 | 0 | default: |
145 | 0 | NS_ERROR("Bad PannerNodeEngine Int32Parameter"); |
146 | 0 | } |
147 | 0 | } |
148 | | void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) override |
149 | 0 | { |
150 | 0 | switch (aIndex) { |
151 | 0 | case PannerNode::POSITION: |
152 | 0 | mPositionX.SetValue(aParam.x); |
153 | 0 | mPositionY.SetValue(aParam.y); |
154 | 0 | mPositionZ.SetValue(aParam.z); |
155 | 0 | break; |
156 | 0 | case PannerNode::ORIENTATION: |
157 | 0 | mOrientationX.SetValue(aParam.x); |
158 | 0 | mOrientationY.SetValue(aParam.y); |
159 | 0 | mOrientationZ.SetValue(aParam.z); |
160 | 0 | break; |
161 | 0 | default: |
162 | 0 | NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter"); |
163 | 0 | } |
164 | 0 | } |
165 | | void SetDoubleParameter(uint32_t aIndex, double aParam) override |
166 | 0 | { |
167 | 0 | switch (aIndex) { |
168 | 0 | case PannerNode::REF_DISTANCE: mRefDistance = aParam; break; |
169 | 0 | case PannerNode::MAX_DISTANCE: mMaxDistance = aParam; break; |
170 | 0 | case PannerNode::ROLLOFF_FACTOR: mRolloffFactor = aParam; break; |
171 | 0 | case PannerNode::CONE_INNER_ANGLE: mConeInnerAngle = aParam; break; |
172 | 0 | case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break; |
173 | 0 | case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break; |
174 | 0 | default: |
175 | 0 | NS_ERROR("Bad PannerNodeEngine DoubleParameter"); |
176 | 0 | } |
177 | 0 | } |
178 | | |
179 | | void ProcessBlock(AudioNodeStream* aStream, |
180 | | GraphTime aFrom, |
181 | | const AudioBlock& aInput, |
182 | | AudioBlock* aOutput, |
183 | | bool *aFinished) override |
184 | 0 | { |
185 | 0 | if (aInput.IsNull()) { |
186 | 0 | // mLeftOverData != INT_MIN means that the panning model was HRTF and a |
187 | 0 | // tail-time reference was added. Even if the model is now equalpower, |
188 | 0 | // the reference will need to be removed. |
189 | 0 | if (mLeftOverData > 0 && |
190 | 0 | mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { |
191 | 0 | mLeftOverData -= WEBAUDIO_BLOCK_SIZE; |
192 | 0 | } else { |
193 | 0 | if (mLeftOverData != INT_MIN) { |
194 | 0 | mLeftOverData = INT_MIN; |
195 | 0 | aStream->ScheduleCheckForInactive(); |
196 | 0 | mHRTFPanner->reset(); |
197 | 0 |
|
198 | 0 | RefPtr<PlayingRefChangeHandler> refchanged = |
199 | 0 | new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE); |
200 | 0 | aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate( |
201 | 0 | refchanged.forget()); |
202 | 0 | } |
203 | 0 | aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); |
204 | 0 | return; |
205 | 0 | } |
206 | 0 | } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { |
207 | 0 | if (mLeftOverData == INT_MIN) { |
208 | 0 | RefPtr<PlayingRefChangeHandler> refchanged = |
209 | 0 | new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF); |
210 | 0 | aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate( |
211 | 0 | refchanged.forget()); |
212 | 0 | } |
213 | 0 | mLeftOverData = mHRTFPanner->maxTailFrames(); |
214 | 0 | } |
215 | 0 |
|
216 | 0 | StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom); |
217 | 0 | (this->*mPanningModelFunction)(aInput, aOutput, tick); |
218 | 0 | } |
219 | | |
220 | | bool IsActive() const override |
221 | 0 | { |
222 | 0 | return mLeftOverData != INT_MIN; |
223 | 0 | } |
224 | | |
225 | | void ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation); |
226 | | float ComputeConeGain(const ThreeDPoint& position, const ThreeDPoint& orientation); |
227 | | // Compute how much the distance contributes to the gain reduction. |
228 | | double ComputeDistanceGain(const ThreeDPoint& position); |
229 | | |
230 | | void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick); |
231 | | void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick); |
232 | | |
233 | | float LinearGainFunction(double aDistance); |
234 | | float InverseGainFunction(double aDistance); |
235 | | float ExponentialGainFunction(double aDistance); |
236 | | |
237 | | ThreeDPoint ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime& tick); |
238 | | |
239 | | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override |
240 | 0 | { |
241 | 0 | size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); |
242 | 0 | if (mHRTFPanner) { |
243 | 0 | amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf); |
244 | 0 | } |
245 | 0 |
|
246 | 0 | return amount; |
247 | 0 | } |
248 | | |
249 | | size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override |
250 | 0 | { |
251 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
252 | 0 | } |
253 | | |
254 | | RefPtr<AudioNodeStream> mDestination; |
255 | | // This member is set on the main thread, but is not accessed on the rendering |
256 | | // thread untile mPanningModelFunction has changed, and this happens strictly |
257 | | // later, via a MediaStreamGraph ControlMessage. |
258 | | nsAutoPtr<HRTFPanner> mHRTFPanner; |
259 | | RefPtr<AudioListenerEngine> mListenerEngine; |
260 | | typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick); |
261 | | PanningModelFunction mPanningModelFunction; |
262 | | typedef float (PannerNodeEngine::*DistanceModelFunction)(double aDistance); |
263 | | DistanceModelFunction mDistanceModelFunction; |
264 | | AudioParamTimeline mPositionX; |
265 | | AudioParamTimeline mPositionY; |
266 | | AudioParamTimeline mPositionZ; |
267 | | AudioParamTimeline mOrientationX; |
268 | | AudioParamTimeline mOrientationY; |
269 | | AudioParamTimeline mOrientationZ; |
270 | | double mRefDistance; |
271 | | double mMaxDistance; |
272 | | double mRolloffFactor; |
273 | | double mConeInnerAngle; |
274 | | double mConeOuterAngle; |
275 | | double mConeOuterGain; |
276 | | int mLeftOverData; |
277 | | }; |
278 | | |
279 | | PannerNode::PannerNode(AudioContext* aContext) |
280 | | : AudioNode(aContext, |
281 | | 2, |
282 | | ChannelCountMode::Clamped_max, |
283 | | ChannelInterpretation::Speakers) |
284 | | // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above. |
285 | | , mPanningModel(PanningModelType::Equalpower) |
286 | | , mDistanceModel(DistanceModelType::Inverse) |
287 | | , mPositionX(new AudioParam(this, PannerNode::POSITIONX, this->NodeType(), 0.f)) |
288 | | , mPositionY(new AudioParam(this, PannerNode::POSITIONY, this->NodeType(), 0.f)) |
289 | | , mPositionZ(new AudioParam(this, PannerNode::POSITIONZ, this->NodeType(), 0.f)) |
290 | | , mOrientationX(new AudioParam(this, PannerNode::ORIENTATIONX, this->NodeType(), 1.0f)) |
291 | | , mOrientationY(new AudioParam(this, PannerNode::ORIENTATIONY, this->NodeType(), 0.f)) |
292 | | , mOrientationZ(new AudioParam(this, PannerNode::ORIENTATIONZ, this->NodeType(), 0.f)) |
293 | | , mRefDistance(1.) |
294 | | , mMaxDistance(10000.) |
295 | | , mRolloffFactor(1.) |
296 | | , mConeInnerAngle(360.) |
297 | | , mConeOuterAngle(360.) |
298 | | , mConeOuterGain(0.) |
299 | 0 | { |
300 | 0 | mStream = AudioNodeStream::Create( |
301 | 0 | aContext, |
302 | 0 | new PannerNodeEngine( |
303 | 0 | this, aContext->Destination(), aContext->Listener()->Engine()), |
304 | 0 | AudioNodeStream::NO_STREAM_FLAGS, |
305 | 0 | aContext->Graph()); |
306 | 0 | } |
307 | | |
308 | | /* static */ already_AddRefed<PannerNode> |
309 | | PannerNode::Create(AudioContext& aAudioContext, |
310 | | const PannerOptions& aOptions, |
311 | | ErrorResult& aRv) |
312 | 0 | { |
313 | 0 | if (aAudioContext.CheckClosed(aRv)) { |
314 | 0 | return nullptr; |
315 | 0 | } |
316 | 0 | |
317 | 0 | RefPtr<PannerNode> audioNode = new PannerNode(&aAudioContext); |
318 | 0 |
|
319 | 0 | audioNode->Initialize(aOptions, aRv); |
320 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
321 | 0 | return nullptr; |
322 | 0 | } |
323 | 0 | |
324 | 0 | audioNode->SetPanningModel(aOptions.mPanningModel); |
325 | 0 | audioNode->SetDistanceModel(aOptions.mDistanceModel); |
326 | 0 | audioNode->SetPosition(aOptions.mPositionX, aOptions.mPositionY, |
327 | 0 | aOptions.mPositionZ); |
328 | 0 | audioNode->SetOrientation(aOptions.mOrientationX, aOptions.mOrientationY, |
329 | 0 | aOptions.mOrientationZ); |
330 | 0 | audioNode->SetRefDistance(aOptions.mRefDistance, aRv); |
331 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
332 | 0 | return nullptr; |
333 | 0 | } |
334 | 0 | audioNode->SetMaxDistance(aOptions.mMaxDistance, aRv); |
335 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
336 | 0 | return nullptr; |
337 | 0 | } |
338 | 0 | audioNode->SetRolloffFactor(aOptions.mRolloffFactor, aRv); |
339 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
340 | 0 | return nullptr; |
341 | 0 | } |
342 | 0 | audioNode->SetConeInnerAngle(aOptions.mConeInnerAngle); |
343 | 0 | audioNode->SetConeOuterAngle(aOptions.mConeOuterAngle); |
344 | 0 | audioNode->SetConeOuterGain(aOptions.mConeOuterGain, aRv); |
345 | 0 | if (NS_WARN_IF(aRv.Failed())) { |
346 | 0 | return nullptr; |
347 | 0 | } |
348 | 0 | |
349 | 0 | return audioNode.forget(); |
350 | 0 | } |
351 | | |
352 | | void PannerNode::SetPanningModel(PanningModelType aPanningModel) |
353 | 0 | { |
354 | 0 | mPanningModel = aPanningModel; |
355 | 0 | if (mPanningModel == PanningModelType::HRTF) { |
356 | 0 | // We can set the engine's `mHRTFPanner` member here from the main thread, |
357 | 0 | // because the engine will not touch it from the MediaStreamGraph |
358 | 0 | // thread until the PANNING_MODEL message sent below is received. |
359 | 0 | static_cast<PannerNodeEngine*>(mStream->Engine())->CreateHRTFPanner(); |
360 | 0 | } |
361 | 0 | SendInt32ParameterToStream(PANNING_MODEL, int32_t(mPanningModel)); |
362 | 0 | } |
363 | | |
364 | | size_t |
365 | | PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
366 | 0 | { |
367 | 0 | return AudioNode::SizeOfExcludingThis(aMallocSizeOf); |
368 | 0 | } |
369 | | |
370 | | size_t |
371 | | PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const |
372 | 0 | { |
373 | 0 | return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); |
374 | 0 | } |
375 | | |
376 | | JSObject* |
377 | | PannerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) |
378 | 0 | { |
379 | 0 | return PannerNode_Binding::Wrap(aCx, this, aGivenProto); |
380 | 0 | } |
381 | | |
382 | | // Those three functions are described in the spec. |
383 | | float |
384 | | PannerNodeEngine::LinearGainFunction(double aDistance) |
385 | 0 | { |
386 | 0 | return 1 - mRolloffFactor * (std::max(std::min(aDistance, mMaxDistance), mRefDistance) - mRefDistance) / (mMaxDistance - mRefDistance); |
387 | 0 | } |
388 | | |
389 | | float |
390 | | PannerNodeEngine::InverseGainFunction(double aDistance) |
391 | 0 | { |
392 | 0 | return mRefDistance / (mRefDistance + mRolloffFactor * (std::max(aDistance, mRefDistance) - mRefDistance)); |
393 | 0 | } |
394 | | |
395 | | float |
396 | | PannerNodeEngine::ExponentialGainFunction(double aDistance) |
397 | 0 | { |
398 | 0 | return pow(std::max(aDistance, mRefDistance) / mRefDistance, -mRolloffFactor); |
399 | 0 | } |
400 | | |
401 | | void |
402 | | PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput, |
403 | | AudioBlock* aOutput, |
404 | | StreamTime tick) |
405 | 0 | { |
406 | 0 | // The output of this node is always stereo, no matter what the inputs are. |
407 | 0 | aOutput->AllocateChannels(2); |
408 | 0 |
|
409 | 0 | float azimuth, elevation; |
410 | 0 |
|
411 | 0 | ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick); |
412 | 0 | ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick); |
413 | 0 | if (!orientation.IsZero()) { |
414 | 0 | orientation.Normalize(); |
415 | 0 | } |
416 | 0 | ComputeAzimuthAndElevation(position, azimuth, elevation); |
417 | 0 |
|
418 | 0 | AudioBlock input = aInput; |
419 | 0 | // Gain is applied before the delay and convolution of the HRTF. |
420 | 0 | input.mVolume *= ComputeConeGain(position, orientation) * ComputeDistanceGain(position); |
421 | 0 |
|
422 | 0 | mHRTFPanner->pan(azimuth, elevation, &input, aOutput); |
423 | 0 | } |
424 | | |
425 | | ThreeDPoint |
426 | | PannerNodeEngine::ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime &tick) |
427 | 0 | { |
428 | 0 | return ThreeDPoint(aX.GetValueAtTime(tick), |
429 | 0 | aY.GetValueAtTime(tick), |
430 | 0 | aZ.GetValueAtTime(tick)); |
431 | 0 | } |
432 | | |
433 | | void |
434 | | PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput, |
435 | | AudioBlock* aOutput, |
436 | | StreamTime tick) |
437 | 0 | { |
438 | 0 | float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain; |
439 | 0 | int inputChannels = aInput.ChannelCount(); |
440 | 0 |
|
441 | 0 | // Optimize the case where the position and orientation is constant for this |
442 | 0 | // processing block: we can just apply a constant gain on the left and right |
443 | 0 | // channel |
444 | 0 | if (mPositionX.HasSimpleValue() && |
445 | 0 | mPositionY.HasSimpleValue() && |
446 | 0 | mPositionZ.HasSimpleValue() && |
447 | 0 | mOrientationX.HasSimpleValue() && |
448 | 0 | mOrientationY.HasSimpleValue() && |
449 | 0 | mOrientationZ.HasSimpleValue()) { |
450 | 0 |
|
451 | 0 | ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick); |
452 | 0 | ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick); |
453 | 0 | if (!orientation.IsZero()) { |
454 | 0 | orientation.Normalize(); |
455 | 0 | } |
456 | 0 |
|
457 | 0 | // For a stereo source, when both the listener and the panner are in |
458 | 0 | // the same spot, and no cone gain is specified, this node is noop. |
459 | 0 | if (inputChannels == 2 && mListenerEngine->Position() == position && |
460 | 0 | mConeInnerAngle == 360 && mConeOuterAngle == 360) { |
461 | 0 | *aOutput = aInput; |
462 | 0 | return; |
463 | 0 | } |
464 | 0 | |
465 | 0 | // The output of this node is always stereo, no matter what the inputs are. |
466 | 0 | aOutput->AllocateChannels(2); |
467 | 0 |
|
468 | 0 | ComputeAzimuthAndElevation(position, azimuth, elevation); |
469 | 0 | coneGain = ComputeConeGain(position, orientation); |
470 | 0 |
|
471 | 0 | // The following algorithm is described in the spec. |
472 | 0 | // Clamp azimuth in the [-90, 90] range. |
473 | 0 | azimuth = min(180.f, max(-180.f, azimuth)); |
474 | 0 |
|
475 | 0 | // Wrap around |
476 | 0 | if (azimuth < -90.f) { |
477 | 0 | azimuth = -180.f - azimuth; |
478 | 0 | } else if (azimuth > 90) { |
479 | 0 | azimuth = 180.f - azimuth; |
480 | 0 | } |
481 | 0 |
|
482 | 0 | // Normalize the value in the [0, 1] range. |
483 | 0 | if (inputChannels == 1) { |
484 | 0 | normalizedAzimuth = (azimuth + 90.f) / 180.f; |
485 | 0 | } else { |
486 | 0 | if (azimuth <= 0) { |
487 | 0 | normalizedAzimuth = (azimuth + 90.f) / 90.f; |
488 | 0 | } else { |
489 | 0 | normalizedAzimuth = azimuth / 90.f; |
490 | 0 | } |
491 | 0 | } |
492 | 0 |
|
493 | 0 | distanceGain = ComputeDistanceGain(position); |
494 | 0 |
|
495 | 0 | // Actually compute the left and right gain. |
496 | 0 | gainL = cos(0.5 * M_PI * normalizedAzimuth); |
497 | 0 | gainR = sin(0.5 * M_PI * normalizedAzimuth); |
498 | 0 |
|
499 | 0 | // Compute the output. |
500 | 0 | ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0); |
501 | 0 |
|
502 | 0 | aOutput->mVolume = aInput.mVolume * distanceGain * coneGain; |
503 | 0 | } else { |
504 | 0 | float positionX[WEBAUDIO_BLOCK_SIZE]; |
505 | 0 | float positionY[WEBAUDIO_BLOCK_SIZE]; |
506 | 0 | float positionZ[WEBAUDIO_BLOCK_SIZE]; |
507 | 0 | float orientationX[WEBAUDIO_BLOCK_SIZE]; |
508 | 0 | float orientationY[WEBAUDIO_BLOCK_SIZE]; |
509 | 0 | float orientationZ[WEBAUDIO_BLOCK_SIZE]; |
510 | 0 |
|
511 | 0 | // The output of this node is always stereo, no matter what the inputs are. |
512 | 0 | aOutput->AllocateChannels(2); |
513 | 0 |
|
514 | 0 | if (!mPositionX.HasSimpleValue()) { |
515 | 0 | mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE); |
516 | 0 | } else { |
517 | 0 | positionX[0] = mPositionX.GetValueAtTime(tick); |
518 | 0 | } |
519 | 0 | if (!mPositionY.HasSimpleValue()) { |
520 | 0 | mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE); |
521 | 0 | } else { |
522 | 0 | positionY[0] = mPositionY.GetValueAtTime(tick); |
523 | 0 | } |
524 | 0 | if (!mPositionZ.HasSimpleValue()) { |
525 | 0 | mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE); |
526 | 0 | } else { |
527 | 0 | positionZ[0] = mPositionZ.GetValueAtTime(tick); |
528 | 0 | } |
529 | 0 | if (!mOrientationX.HasSimpleValue()) { |
530 | 0 | mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE); |
531 | 0 | } else { |
532 | 0 | orientationX[0] = mOrientationX.GetValueAtTime(tick); |
533 | 0 | } |
534 | 0 | if (!mOrientationY.HasSimpleValue()) { |
535 | 0 | mOrientationY.GetValuesAtTime(tick, orientationY, WEBAUDIO_BLOCK_SIZE); |
536 | 0 | } else { |
537 | 0 | orientationY[0] = mOrientationY.GetValueAtTime(tick); |
538 | 0 | } |
539 | 0 | if (!mOrientationZ.HasSimpleValue()) { |
540 | 0 | mOrientationZ.GetValuesAtTime(tick, orientationZ, WEBAUDIO_BLOCK_SIZE); |
541 | 0 | } else { |
542 | 0 | orientationZ[0] = mOrientationZ.GetValueAtTime(tick); |
543 | 0 | } |
544 | 0 |
|
545 | 0 | float buffer[3*WEBAUDIO_BLOCK_SIZE + 4]; |
546 | 0 | bool onLeft[WEBAUDIO_BLOCK_SIZE]; |
547 | 0 |
|
548 | 0 | float* alignedPanningL = ALIGNED16(buffer); |
549 | 0 | float* alignedPanningR = alignedPanningL + WEBAUDIO_BLOCK_SIZE; |
550 | 0 | float* alignedGain = alignedPanningR + WEBAUDIO_BLOCK_SIZE; |
551 | 0 | ASSERT_ALIGNED16(alignedPanningL); |
552 | 0 | ASSERT_ALIGNED16(alignedPanningR); |
553 | 0 | ASSERT_ALIGNED16(alignedGain); |
554 | 0 |
|
555 | 0 | for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { |
556 | 0 | ThreeDPoint position(mPositionX.HasSimpleValue() ? positionX[0] : positionX[counter], |
557 | 0 | mPositionY.HasSimpleValue() ? positionY[0] : positionY[counter], |
558 | 0 | mPositionZ.HasSimpleValue() ? positionZ[0] : positionZ[counter]); |
559 | 0 | ThreeDPoint orientation(mOrientationX.HasSimpleValue() ? orientationX[0] : orientationX[counter], |
560 | 0 | mOrientationY.HasSimpleValue() ? orientationY[0] : orientationY[counter], |
561 | 0 | mOrientationZ.HasSimpleValue() ? orientationZ[0] : orientationZ[counter]); |
562 | 0 | if (!orientation.IsZero()) { |
563 | 0 | orientation.Normalize(); |
564 | 0 | } |
565 | 0 |
|
566 | 0 | ComputeAzimuthAndElevation(position, azimuth, elevation); |
567 | 0 | coneGain = ComputeConeGain(position, orientation); |
568 | 0 |
|
569 | 0 | // The following algorithm is described in the spec. |
570 | 0 | // Clamp azimuth in the [-90, 90] range. |
571 | 0 | azimuth = min(180.f, max(-180.f, azimuth)); |
572 | 0 |
|
573 | 0 | // Wrap around |
574 | 0 | if (azimuth < -90.f) { |
575 | 0 | azimuth = -180.f - azimuth; |
576 | 0 | } else if (azimuth > 90) { |
577 | 0 | azimuth = 180.f - azimuth; |
578 | 0 | } |
579 | 0 |
|
580 | 0 | // Normalize the value in the [0, 1] range. |
581 | 0 | if (inputChannels == 1) { |
582 | 0 | normalizedAzimuth = (azimuth + 90.f) / 180.f; |
583 | 0 | } else { |
584 | 0 | if (azimuth <= 0) { |
585 | 0 | normalizedAzimuth = (azimuth + 90.f) / 90.f; |
586 | 0 | } else { |
587 | 0 | normalizedAzimuth = azimuth / 90.f; |
588 | 0 | } |
589 | 0 | } |
590 | 0 |
|
591 | 0 | distanceGain = ComputeDistanceGain(position); |
592 | 0 |
|
593 | 0 | // Actually compute the left and right gain. |
594 | 0 | float gainL = cos(0.5 * M_PI * normalizedAzimuth); |
595 | 0 | float gainR = sin(0.5 * M_PI * normalizedAzimuth); |
596 | 0 |
|
597 | 0 |
|
598 | 0 | alignedPanningL[counter] = gainL; |
599 | 0 | alignedPanningR[counter] = gainR; |
600 | 0 | alignedGain[counter] = aInput.mVolume * distanceGain * coneGain; |
601 | 0 | onLeft[counter] = azimuth <= 0; |
602 | 0 | } |
603 | 0 |
|
604 | 0 | // Apply the panning to the output buffer |
605 | 0 | ApplyStereoPanning(aInput, aOutput, alignedPanningL, alignedPanningR, onLeft); |
606 | 0 |
|
607 | 0 | // Apply the input volume, cone and distance gain to the output buffer. |
608 | 0 | float* outputL = aOutput->ChannelFloatsForWrite(0); |
609 | 0 | float* outputR = aOutput->ChannelFloatsForWrite(1); |
610 | 0 | AudioBlockInPlaceScale(outputL, alignedGain); |
611 | 0 | AudioBlockInPlaceScale(outputR, alignedGain); |
612 | 0 | } |
613 | 0 | } |
614 | | |
615 | | // This algorithm is specified in the webaudio spec. |
616 | | void |
617 | | PannerNodeEngine::ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation) |
618 | 0 | { |
619 | 0 | ThreeDPoint sourceListener = position - mListenerEngine->Position(); |
620 | 0 | if (sourceListener.IsZero()) { |
621 | 0 | aAzimuth = 0.0; |
622 | 0 | aElevation = 0.0; |
623 | 0 | return; |
624 | 0 | } |
625 | 0 | |
626 | 0 | sourceListener.Normalize(); |
627 | 0 |
|
628 | 0 | // Project the source-listener vector on the x-z plane. |
629 | 0 | const ThreeDPoint& listenerFront = mListenerEngine->FrontVector(); |
630 | 0 | const ThreeDPoint& listenerRight = mListenerEngine->RightVector(); |
631 | 0 | ThreeDPoint up = listenerRight.CrossProduct(listenerFront); |
632 | 0 |
|
633 | 0 | double upProjection = sourceListener.DotProduct(up); |
634 | 0 | aElevation = 90 - 180 * acos(upProjection) / M_PI; |
635 | 0 |
|
636 | 0 | if (aElevation > 90) { |
637 | 0 | aElevation = 180 - aElevation; |
638 | 0 | } else if (aElevation < -90) { |
639 | 0 | aElevation = -180 - aElevation; |
640 | 0 | } |
641 | 0 |
|
642 | 0 | ThreeDPoint projectedSource = sourceListener - up * upProjection; |
643 | 0 | if (projectedSource.IsZero()) { |
644 | 0 | // source - listener direction is up or down. |
645 | 0 | aAzimuth = 0.0; |
646 | 0 | return; |
647 | 0 | } |
648 | 0 | projectedSource.Normalize(); |
649 | 0 |
|
650 | 0 | // Actually compute the angle, and convert to degrees |
651 | 0 | double projection = projectedSource.DotProduct(listenerRight); |
652 | 0 | aAzimuth = 180 * acos(projection) / M_PI; |
653 | 0 |
|
654 | 0 | // Compute whether the source is in front or behind the listener. |
655 | 0 | double frontBack = projectedSource.DotProduct(listenerFront); |
656 | 0 | if (frontBack < 0) { |
657 | 0 | aAzimuth = 360 - aAzimuth; |
658 | 0 | } |
659 | 0 | // Rotate the azimuth so it is relative to the listener front vector instead |
660 | 0 | // of the right vector. |
661 | 0 | if ((aAzimuth >= 0) && (aAzimuth <= 270)) { |
662 | 0 | aAzimuth = 90 - aAzimuth; |
663 | 0 | } else { |
664 | 0 | aAzimuth = 450 - aAzimuth; |
665 | 0 | } |
666 | 0 | } |
667 | | |
668 | | // This algorithm is described in the WebAudio spec. |
669 | | float |
670 | | PannerNodeEngine::ComputeConeGain(const ThreeDPoint& position, |
671 | | const ThreeDPoint& orientation) |
672 | 0 | { |
673 | 0 | // Omnidirectional source |
674 | 0 | if (orientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) { |
675 | 0 | return 1; |
676 | 0 | } |
677 | 0 | |
678 | 0 | // Normalized source-listener vector |
679 | 0 | ThreeDPoint sourceToListener = mListenerEngine->Position() - position; |
680 | 0 | sourceToListener.Normalize(); |
681 | 0 |
|
682 | 0 | // Angle between the source orientation vector and the source-listener vector |
683 | 0 | double dotProduct = sourceToListener.DotProduct(orientation); |
684 | 0 | double angle = 180 * acos(dotProduct) / M_PI; |
685 | 0 | double absAngle = fabs(angle); |
686 | 0 |
|
687 | 0 | // Divide by 2 here since API is entire angle (not half-angle) |
688 | 0 | double absInnerAngle = fabs(mConeInnerAngle) / 2; |
689 | 0 | double absOuterAngle = fabs(mConeOuterAngle) / 2; |
690 | 0 | double gain = 1; |
691 | 0 |
|
692 | 0 | if (absAngle <= absInnerAngle) { |
693 | 0 | // No attenuation |
694 | 0 | gain = 1; |
695 | 0 | } else if (absAngle >= absOuterAngle) { |
696 | 0 | // Max attenuation |
697 | 0 | gain = mConeOuterGain; |
698 | 0 | } else { |
699 | 0 | // Between inner and outer cones |
700 | 0 | // inner -> outer, x goes from 0 -> 1 |
701 | 0 | double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle); |
702 | 0 | gain = (1 - x) + mConeOuterGain * x; |
703 | 0 | } |
704 | 0 |
|
705 | 0 | return gain; |
706 | 0 | } |
707 | | |
708 | | double |
709 | | PannerNodeEngine::ComputeDistanceGain(const ThreeDPoint& position) |
710 | 0 | { |
711 | 0 | ThreeDPoint distanceVec = position - mListenerEngine->Position(); |
712 | 0 | float distance = sqrt(distanceVec.DotProduct(distanceVec)); |
713 | 0 | return std::max(0.0f, (this->*mDistanceModelFunction)(distance)); |
714 | 0 | } |
715 | | |
716 | | } // namespace dom |
717 | | } // namespace mozilla |