/src/mozilla-central/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
4 | | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "MediaEngineWebRTC.h" |
7 | | |
8 | | #include <stdio.h> |
9 | | #include <algorithm> |
10 | | |
11 | | #include "AllocationHandle.h" |
12 | | #include "AudioConverter.h" |
13 | | #include "MediaManager.h" |
14 | | #include "MediaStreamGraphImpl.h" |
15 | | #include "MediaTrackConstraints.h" |
16 | | #include "mozilla/Assertions.h" |
17 | | #include "mozilla/ErrorNames.h" |
18 | | #include "mtransport/runnable_utils.h" |
19 | | #include "nsAutoPtr.h" |
20 | | #include "Tracing.h" |
21 | | |
22 | | // scoped_ptr.h uses FF |
23 | | #ifdef FF |
24 | | #undef FF |
25 | | #endif |
26 | | #include "webrtc/voice_engine/voice_engine_defines.h" |
27 | | #include "webrtc/modules/audio_processing/include/audio_processing.h" |
28 | | #include "webrtc/common_audio/include/audio_util.h" |
29 | | |
30 | | using namespace webrtc; |
31 | | |
32 | | // These are restrictions from the webrtc.org code |
33 | 0 | #define MAX_CHANNELS 2 |
34 | | #define MAX_SAMPLING_FREQ 48000 // Hz - multiple of 100 |
35 | | |
36 | | #define MAX_AEC_FIFO_DEPTH 200 // ms - multiple of 10 |
37 | | static_assert(!(MAX_AEC_FIFO_DEPTH % 10), "Invalid MAX_AEC_FIFO_DEPTH"); |
38 | | |
39 | | namespace mozilla { |
40 | | |
41 | | #ifdef LOG |
42 | | #undef LOG |
43 | | #endif |
44 | | |
45 | | LogModule* GetMediaManagerLog(); |
46 | 0 | #define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg) |
47 | 0 | #define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg) |
48 | | |
49 | 0 | LogModule* AudioLogModule() { |
50 | 0 | static mozilla::LazyLogModule log("AudioLatency"); |
51 | 0 | return static_cast<LogModule*>(log); |
52 | 0 | } |
53 | | |
54 | | void |
55 | | WebRTCAudioDataListener::NotifyOutputData(MediaStreamGraphImpl* aGraph, |
56 | | AudioDataValue* aBuffer, |
57 | | size_t aFrames, |
58 | | TrackRate aRate, |
59 | | uint32_t aChannels) |
60 | 0 | { |
61 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
62 | 0 | if (mAudioSource) { |
63 | 0 | mAudioSource->NotifyOutputData(aGraph, aBuffer, aFrames, aRate, aChannels); |
64 | 0 | } |
65 | 0 | } |
66 | | |
67 | | void |
68 | | WebRTCAudioDataListener::NotifyInputData(MediaStreamGraphImpl* aGraph, |
69 | | const AudioDataValue* aBuffer, |
70 | | size_t aFrames, |
71 | | TrackRate aRate, |
72 | | uint32_t aChannels) |
73 | 0 | { |
74 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
75 | 0 | if (mAudioSource) { |
76 | 0 | mAudioSource->NotifyInputData(aGraph, aBuffer, aFrames, aRate, aChannels); |
77 | 0 | } |
78 | 0 | } |
79 | | |
80 | | void |
81 | | WebRTCAudioDataListener::DeviceChanged(MediaStreamGraphImpl* aGraph) |
82 | 0 | { |
83 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
84 | 0 | if (mAudioSource) { |
85 | 0 | mAudioSource->DeviceChanged(aGraph); |
86 | 0 | } |
87 | 0 | } |
88 | | |
89 | | uint32_t |
90 | | WebRTCAudioDataListener::RequestedInputChannelCount(MediaStreamGraphImpl* aGraph) |
91 | 0 | { |
92 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
93 | 0 | if (mAudioSource) { |
94 | 0 | return mAudioSource->RequestedInputChannelCount(aGraph); |
95 | 0 | } |
96 | 0 | return 0; |
97 | 0 | } |
98 | | |
99 | | void |
100 | | WebRTCAudioDataListener::Disconnect(MediaStreamGraphImpl* aGraph) |
101 | 0 | { |
102 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
103 | 0 | if (mAudioSource) { |
104 | 0 | mAudioSource->Disconnect(aGraph); |
105 | 0 | mAudioSource = nullptr; |
106 | 0 | } |
107 | 0 | } |
108 | | |
109 | | /** |
110 | | * WebRTC Microphone MediaEngineSource. |
111 | | */ |
112 | | |
113 | | MediaEngineWebRTCMicrophoneSource::Allocation::Allocation( |
114 | | const RefPtr<AllocationHandle>& aHandle) |
115 | | : mHandle(aHandle) |
116 | 0 | {} |
117 | | |
118 | 0 | MediaEngineWebRTCMicrophoneSource::Allocation::~Allocation() = default; |
119 | | |
120 | | MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource( |
121 | | RefPtr<AudioDeviceInfo> aInfo, |
122 | | const nsString& aDeviceName, |
123 | | const nsCString& aDeviceUUID, |
124 | | uint32_t aMaxChannelCount, |
125 | | bool aDelayAgnostic, |
126 | | bool aExtendedFilter) |
127 | | : mDeviceInfo(std::move(aInfo)) |
128 | | , mAudioProcessing(AudioProcessing::Create()) |
129 | | , mMutex("WebRTCMic::Mutex") |
130 | | , mDelayAgnostic(aDelayAgnostic) |
131 | | , mExtendedFilter(aExtendedFilter) |
132 | | , mStarted(false) |
133 | | , mDeviceName(aDeviceName) |
134 | | , mDeviceUUID(aDeviceUUID) |
135 | | , mSettings( |
136 | | new nsMainThreadPtrHolder<media::Refcountable<dom::MediaTrackSettings>>( |
137 | | "MediaEngineWebRTCMicrophoneSource::mSettings", |
138 | | new media::Refcountable<dom::MediaTrackSettings>(), |
139 | | // Non-strict means it won't assert main thread for us. |
140 | | // It would be great if it did but we're already on the media thread. |
141 | | /* aStrict = */ false)) |
142 | | , mRequestedInputChannelCount(aMaxChannelCount) |
143 | | , mTotalFrames(0) |
144 | | , mLastLogFrames(0) |
145 | | , mSkipProcessing(false) |
146 | | , mInputDownmixBuffer(MAX_SAMPLING_FREQ * MAX_CHANNELS / 100) |
147 | 0 | { |
148 | 0 | #ifndef ANDROID |
149 | 0 | MOZ_ASSERT(mDeviceInfo->DeviceID()); |
150 | 0 | #endif |
151 | 0 |
|
152 | 0 | // We'll init lazily as needed |
153 | 0 | mSettings->mEchoCancellation.Construct(0); |
154 | 0 | mSettings->mAutoGainControl.Construct(0); |
155 | 0 | mSettings->mNoiseSuppression.Construct(0); |
156 | 0 | mSettings->mChannelCount.Construct(0); |
157 | 0 |
|
158 | 0 | mState = kReleased; |
159 | 0 | } |
160 | | |
161 | | nsString |
162 | | MediaEngineWebRTCMicrophoneSource::GetName() const |
163 | 0 | { |
164 | 0 | return mDeviceName; |
165 | 0 | } |
166 | | |
167 | | nsCString |
168 | | MediaEngineWebRTCMicrophoneSource::GetUUID() const |
169 | 0 | { |
170 | 0 | return mDeviceUUID; |
171 | 0 | } |
172 | | |
173 | | // GetBestFitnessDistance returns the best distance the capture device can offer |
174 | | // as a whole, given an accumulated number of ConstraintSets. |
175 | | // Ideal values are considered in the first ConstraintSet only. |
176 | | // Plain values are treated as Ideal in the first ConstraintSet. |
177 | | // Plain values are treated as Exact in subsequent ConstraintSets. |
178 | | // Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets. |
179 | | // A finite result may be used to calculate this device's ranking as a choice. |
180 | | |
181 | | uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance( |
182 | | const nsTArray<const NormalizedConstraintSet*>& aConstraintSets, |
183 | | const nsString& aDeviceId) const |
184 | 0 | { |
185 | 0 | uint32_t distance = 0; |
186 | 0 |
|
187 | 0 | for (const auto* cs : aConstraintSets) { |
188 | 0 | distance = MediaConstraintsHelper::GetMinimumFitnessDistance(*cs, aDeviceId); |
189 | 0 | break; // distance is read from first entry only |
190 | 0 | } |
191 | 0 | return distance; |
192 | 0 | } |
193 | | |
194 | | nsresult |
195 | | MediaEngineWebRTCMicrophoneSource::ReevaluateAllocation( |
196 | | const RefPtr<AllocationHandle>& aHandle, |
197 | | const NormalizedConstraints* aConstraintsUpdate, |
198 | | const MediaEnginePrefs& aPrefs, |
199 | | const nsString& aDeviceId, |
200 | | const char** aOutBadConstraint) |
201 | 0 | { |
202 | 0 | AssertIsOnOwningThread(); |
203 | 0 |
|
204 | 0 | // aHandle and/or aConstraintsUpdate may be nullptr (see below) |
205 | 0 |
|
206 | 0 | AutoTArray<const NormalizedConstraints*, 10> allConstraints; |
207 | 0 | for (const Allocation& registered : mAllocations) { |
208 | 0 | if (aConstraintsUpdate && registered.mHandle == aHandle) { |
209 | 0 | continue; // Don't count old constraints |
210 | 0 | } |
211 | 0 | allConstraints.AppendElement(®istered.mHandle->mConstraints); |
212 | 0 | } |
213 | 0 | if (aConstraintsUpdate) { |
214 | 0 | allConstraints.AppendElement(aConstraintsUpdate); |
215 | 0 | } else if (aHandle) { |
216 | 0 | // In the case of AddShareOfSingleSource, the handle isn't registered yet. |
217 | 0 | allConstraints.AppendElement(&aHandle->mConstraints); |
218 | 0 | } |
219 | 0 |
|
220 | 0 | NormalizedConstraints netConstraints(allConstraints); |
221 | 0 | if (netConstraints.mBadConstraint) { |
222 | 0 | *aOutBadConstraint = netConstraints.mBadConstraint; |
223 | 0 | return NS_ERROR_FAILURE; |
224 | 0 | } |
225 | 0 | |
226 | 0 | nsresult rv = UpdateSingleSource(aHandle, |
227 | 0 | netConstraints, |
228 | 0 | aPrefs, |
229 | 0 | aDeviceId, |
230 | 0 | aOutBadConstraint); |
231 | 0 | if (NS_FAILED(rv)) { |
232 | 0 | return rv; |
233 | 0 | } |
234 | 0 | if (aHandle && aConstraintsUpdate) { |
235 | 0 | aHandle->mConstraints = *aConstraintsUpdate; |
236 | 0 | } |
237 | 0 | return NS_OK; |
238 | 0 | } |
239 | | |
240 | | nsresult |
241 | | MediaEngineWebRTCMicrophoneSource::Reconfigure(const RefPtr<AllocationHandle>& aHandle, |
242 | | const dom::MediaTrackConstraints& aConstraints, |
243 | | const MediaEnginePrefs& aPrefs, |
244 | | const nsString& aDeviceId, |
245 | | const char** aOutBadConstraint) |
246 | 0 | { |
247 | 0 | AssertIsOnOwningThread(); |
248 | 0 | MOZ_ASSERT(aHandle); |
249 | 0 |
|
250 | 0 | LOG(("Mic source %p allocation %p Reconfigure()", this, aHandle.get())); |
251 | 0 |
|
252 | 0 | NormalizedConstraints constraints(aConstraints); |
253 | 0 | nsresult rv = ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId, |
254 | 0 | aOutBadConstraint); |
255 | 0 | if (NS_FAILED(rv)) { |
256 | 0 | if (aOutBadConstraint) { |
257 | 0 | return NS_ERROR_INVALID_ARG; |
258 | 0 | } |
259 | 0 | |
260 | 0 | nsAutoCString name; |
261 | 0 | GetErrorName(rv, name); |
262 | 0 | LOG(("Mic source %p Reconfigure() failed unexpectedly. rv=%s", |
263 | 0 | this, name.Data())); |
264 | 0 | Stop(aHandle); |
265 | 0 | return NS_ERROR_UNEXPECTED; |
266 | 0 | } |
267 | 0 |
|
268 | 0 | size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator()); |
269 | 0 | MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex); |
270 | 0 | ApplySettings(mNetPrefs, mAllocations[i].mStream->GraphImpl()); |
271 | 0 |
|
272 | 0 | return NS_OK; |
273 | 0 | } |
274 | | |
275 | | bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b) |
276 | 0 | { |
277 | 0 | return !memcmp(&a, &b, sizeof(MediaEnginePrefs)); |
278 | 0 | }; |
279 | | |
280 | | // This does an early return in case of error. |
281 | 0 | #define HANDLE_APM_ERROR(fn) \ |
282 | 0 | do { \ |
283 | 0 | int rv = fn; \ |
284 | 0 | if (rv != AudioProcessing::kNoError) { \ |
285 | 0 | MOZ_ASSERT_UNREACHABLE("APM error in " #fn); \ |
286 | 0 | return; \ |
287 | 0 | } \ |
288 | 0 | } while(0); |
289 | | |
290 | | void MediaEngineWebRTCMicrophoneSource::UpdateAECSettingsIfNeeded(bool aEnable, EcModes aMode) |
291 | 0 | { |
292 | 0 | AssertIsOnOwningThread(); |
293 | 0 |
|
294 | 0 | using webrtc::EcModes; |
295 | 0 |
|
296 | 0 | EchoCancellation::SuppressionLevel level; |
297 | 0 |
|
298 | 0 | switch(aMode) { |
299 | 0 | case EcModes::kEcUnchanged: |
300 | 0 | level = mAudioProcessing->echo_cancellation()->suppression_level(); |
301 | 0 | break; |
302 | 0 | case EcModes::kEcConference: |
303 | 0 | level = EchoCancellation::kHighSuppression; |
304 | 0 | break; |
305 | 0 | case EcModes::kEcDefault: |
306 | 0 | level = EchoCancellation::kModerateSuppression; |
307 | 0 | break; |
308 | 0 | case EcModes::kEcAec: |
309 | 0 | level = EchoCancellation::kModerateSuppression; |
310 | 0 | break; |
311 | 0 | case EcModes::kEcAecm: |
312 | 0 | // No suppression level to set for the mobile echo canceller |
313 | 0 | break; |
314 | 0 | default: |
315 | 0 | MOZ_LOG(GetMediaManagerLog(), LogLevel::Error, ("Bad EcMode value")); |
316 | 0 | MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config" |
317 | 0 | " for the echo cancelation mode."); |
318 | 0 | // fall back to something sensible in release |
319 | 0 | level = EchoCancellation::kModerateSuppression; |
320 | 0 | break; |
321 | 0 | } |
322 | 0 |
|
323 | 0 | // AECm and AEC are mutually exclusive. |
324 | 0 | if (aMode == EcModes::kEcAecm) { |
325 | 0 | HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->Enable(false)); |
326 | 0 | HANDLE_APM_ERROR(mAudioProcessing->echo_control_mobile()->Enable(aEnable)); |
327 | 0 | } else { |
328 | 0 | HANDLE_APM_ERROR(mAudioProcessing->echo_control_mobile()->Enable(false)); |
329 | 0 | HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->Enable(aEnable)); |
330 | 0 | HANDLE_APM_ERROR(mAudioProcessing->echo_cancellation()->set_suppression_level(level)); |
331 | 0 | } |
332 | 0 | } |
333 | | |
334 | | void |
335 | | MediaEngineWebRTCMicrophoneSource::UpdateAGCSettingsIfNeeded(bool aEnable, AgcModes aMode) |
336 | 0 | { |
337 | 0 | AssertIsOnOwningThread(); |
338 | 0 |
|
339 | | #if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID) |
340 | | if (aMode == kAgcAdaptiveAnalog) { |
341 | | MOZ_LOG(GetMediaManagerLog(), |
342 | | LogLevel::Error, |
343 | | ("Invalid AGC mode kAgcAdaptiveAnalog on mobile")); |
344 | | MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config" |
345 | | " for the auto gain, on mobile."); |
346 | | aMode = kAgcDefault; |
347 | | } |
348 | | #endif |
349 | | GainControl::Mode mode = kDefaultAgcMode; |
350 | 0 |
|
351 | 0 | switch (aMode) { |
352 | 0 | case AgcModes::kAgcDefault: |
353 | 0 | mode = kDefaultAgcMode; |
354 | 0 | break; |
355 | 0 | case AgcModes::kAgcUnchanged: |
356 | 0 | mode = mAudioProcessing->gain_control()->mode(); |
357 | 0 | break; |
358 | 0 | case AgcModes::kAgcFixedDigital: |
359 | 0 | mode = GainControl::Mode::kFixedDigital; |
360 | 0 | break; |
361 | 0 | case AgcModes::kAgcAdaptiveAnalog: |
362 | 0 | mode = GainControl::Mode::kAdaptiveAnalog; |
363 | 0 | break; |
364 | 0 | case AgcModes::kAgcAdaptiveDigital: |
365 | 0 | mode = GainControl::Mode::kAdaptiveDigital; |
366 | 0 | break; |
367 | 0 | default: |
368 | 0 | MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config" |
369 | 0 | " for the auto gain."); |
370 | 0 | // This is a good fallback, it works regardless of the platform. |
371 | 0 | mode = GainControl::Mode::kAdaptiveDigital; |
372 | 0 | break; |
373 | 0 | } |
374 | 0 |
|
375 | 0 | HANDLE_APM_ERROR(mAudioProcessing->gain_control()->set_mode(mode)); |
376 | 0 | HANDLE_APM_ERROR(mAudioProcessing->gain_control()->Enable(aEnable)); |
377 | 0 | } |
378 | | |
379 | | void |
380 | | MediaEngineWebRTCMicrophoneSource::UpdateNSSettingsIfNeeded(bool aEnable, NsModes aMode) |
381 | 0 | { |
382 | 0 | AssertIsOnOwningThread(); |
383 | 0 |
|
384 | 0 | NoiseSuppression::Level nsLevel; |
385 | 0 |
|
386 | 0 | switch (aMode) { |
387 | 0 | case NsModes::kNsDefault: |
388 | 0 | nsLevel = kDefaultNsMode; |
389 | 0 | break; |
390 | 0 | case NsModes::kNsUnchanged: |
391 | 0 | nsLevel = mAudioProcessing->noise_suppression()->level(); |
392 | 0 | break; |
393 | 0 | case NsModes::kNsConference: |
394 | 0 | nsLevel = NoiseSuppression::kHigh; |
395 | 0 | break; |
396 | 0 | case NsModes::kNsLowSuppression: |
397 | 0 | nsLevel = NoiseSuppression::kLow; |
398 | 0 | break; |
399 | 0 | case NsModes::kNsModerateSuppression: |
400 | 0 | nsLevel = NoiseSuppression::kModerate; |
401 | 0 | break; |
402 | 0 | case NsModes::kNsHighSuppression: |
403 | 0 | nsLevel = NoiseSuppression::kHigh; |
404 | 0 | break; |
405 | 0 | case NsModes::kNsVeryHighSuppression: |
406 | 0 | nsLevel = NoiseSuppression::kVeryHigh; |
407 | 0 | break; |
408 | 0 | default: |
409 | 0 | MOZ_ASSERT_UNREACHABLE("Bad pref set in all.js or in about:config" |
410 | 0 | " for the noise suppression."); |
411 | 0 | // Pick something sensible as a faillback in release. |
412 | 0 | nsLevel = NoiseSuppression::kModerate; |
413 | 0 | } |
414 | 0 | HANDLE_APM_ERROR(mAudioProcessing->noise_suppression()->set_level(nsLevel)); |
415 | 0 | HANDLE_APM_ERROR(mAudioProcessing->noise_suppression()->Enable(aEnable)); |
416 | 0 | } |
417 | | |
418 | | #undef HANDLE_APM_ERROR |
419 | | |
420 | | nsresult |
421 | | MediaEngineWebRTCMicrophoneSource::UpdateSingleSource( |
422 | | const RefPtr<const AllocationHandle>& aHandle, |
423 | | const NormalizedConstraints& aNetConstraints, |
424 | | const MediaEnginePrefs& aPrefs, |
425 | | const nsString& aDeviceId, |
426 | | const char** aOutBadConstraint) |
427 | 0 | { |
428 | 0 | AssertIsOnOwningThread(); |
429 | 0 |
|
430 | 0 | FlattenedConstraints c(aNetConstraints); |
431 | 0 |
|
432 | 0 | MediaEnginePrefs prefs = aPrefs; |
433 | 0 | prefs.mAecOn = c.mEchoCancellation.Get(prefs.mAecOn); |
434 | 0 | prefs.mAgcOn = c.mAutoGainControl.Get(prefs.mAgcOn); |
435 | 0 | prefs.mNoiseOn = c.mNoiseSuppression.Get(prefs.mNoiseOn); |
436 | 0 |
|
437 | 0 | // Determine an actual channel count to use for this source. Three factors at |
438 | 0 | // play here: the device capabilities, the constraints passed in by content, |
439 | 0 | // and a pref that can force things (for testing) |
440 | 0 | int32_t maxChannels = mDeviceInfo->MaxChannels(); |
441 | 0 |
|
442 | 0 | // First, check channelCount violation wrt constraints. This fails in case of |
443 | 0 | // error. |
444 | 0 | if (c.mChannelCount.mMin > maxChannels) { |
445 | 0 | *aOutBadConstraint = "channelCount"; |
446 | 0 | return NS_ERROR_FAILURE; |
447 | 0 | } |
448 | 0 | // A pref can force the channel count to use. If the pref has a value of zero |
449 | 0 | // or lower, it has no effect. |
450 | 0 | if (prefs.mChannels <= 0) { |
451 | 0 | prefs.mChannels = maxChannels; |
452 | 0 | } |
453 | 0 |
|
454 | 0 | // Get the number of channels asked for by content, and clamp it between the |
455 | 0 | // pref and the maximum number of channels that the device supports. |
456 | 0 | prefs.mChannels = c.mChannelCount.Get(std::min(prefs.mChannels, |
457 | 0 | maxChannels)); |
458 | 0 | prefs.mChannels = std::max(1, std::min(prefs.mChannels, maxChannels)); |
459 | 0 |
|
460 | 0 | LOG(("Audio config: aec: %d, agc: %d, noise: %d, channels: %d", |
461 | 0 | prefs.mAecOn ? prefs.mAec : -1, |
462 | 0 | prefs.mAgcOn ? prefs.mAgc : -1, |
463 | 0 | prefs.mNoiseOn ? prefs.mNoise : -1, |
464 | 0 | prefs.mChannels)); |
465 | 0 |
|
466 | 0 | switch (mState) { |
467 | 0 | case kReleased: |
468 | 0 | MOZ_ASSERT(aHandle); |
469 | 0 | { |
470 | 0 | MutexAutoLock lock(mMutex); |
471 | 0 | mState = kAllocated; |
472 | 0 | } |
473 | 0 | LOG(("Audio device %s allocated", NS_ConvertUTF16toUTF8(mDeviceInfo->Name()).get())); |
474 | 0 | break; |
475 | 0 |
|
476 | 0 | case kStarted: |
477 | 0 | case kStopped: |
478 | 0 | if (prefs == mNetPrefs) { |
479 | 0 | LOG(("UpdateSingleSource: new prefs for %s are the same as the current prefs, returning.", |
480 | 0 | NS_ConvertUTF16toUTF8(mDeviceName).get())); |
481 | 0 | return NS_OK; |
482 | 0 | } |
483 | 0 | break; |
484 | 0 |
|
485 | 0 | default: |
486 | 0 | LOG(("Audio device %s in ignored state %d", NS_ConvertUTF16toUTF8(mDeviceInfo->Name()).get(), MediaEngineSourceState(mState))); |
487 | 0 | break; |
488 | 0 | } |
489 | 0 |
|
490 | 0 | if (mState != kReleased) { |
491 | 0 | UpdateAGCSettingsIfNeeded(prefs.mAgcOn, static_cast<AgcModes>(prefs.mAgc)); |
492 | 0 | UpdateNSSettingsIfNeeded(prefs.mNoiseOn, static_cast<NsModes>(prefs.mNoise)); |
493 | 0 | UpdateAECSettingsIfNeeded(prefs.mAecOn, static_cast<EcModes>(prefs.mAec)); |
494 | 0 |
|
495 | 0 | webrtc::Config config; |
496 | 0 | config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(mExtendedFilter)); |
497 | 0 | config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(mDelayAgnostic)); |
498 | 0 | mAudioProcessing->SetExtraOptions(config); |
499 | 0 | } |
500 | 0 | mNetPrefs = prefs; |
501 | 0 | return NS_OK; |
502 | 0 | } |
503 | | |
504 | | #undef HANDLE_APM_ERROR |
505 | | |
506 | | bool |
507 | | MediaEngineWebRTCMicrophoneSource::PassThrough(MediaStreamGraphImpl* aGraph) const |
508 | 0 | { |
509 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
510 | 0 | return mSkipProcessing; |
511 | 0 | } |
512 | | void |
513 | | MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough) |
514 | 0 | { |
515 | 0 | { |
516 | 0 | MutexAutoLock lock(mMutex); |
517 | 0 | if (mAllocations.IsEmpty()) { |
518 | 0 | // This can be the case, for now, because we're mixing mutable shared state |
519 | 0 | // and linearization via message queue. This is temporary. |
520 | 0 | return; |
521 | 0 | } |
522 | 0 | |
523 | 0 | // mStream is always valid because it's set right before ::Start is called. |
524 | 0 | // SetPassThrough cannot be called before that, because it's running on the |
525 | 0 | // graph thread, and this cannot happen before the source has been started. |
526 | 0 | MOZ_ASSERT(mAllocations.Length() == 1 && |
527 | 0 | mAllocations[0].mStream && |
528 | 0 | mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread(), |
529 | 0 | "Wrong calling pattern, don't call this before ::SetTrack."); |
530 | 0 | } |
531 | 0 | mSkipProcessing = aPassThrough; |
532 | 0 | } |
533 | | |
534 | | uint32_t |
535 | | MediaEngineWebRTCMicrophoneSource::GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl) |
536 | 0 | { |
537 | 0 | MOZ_ASSERT(aGraphImpl->CurrentDriver()->OnThread(), |
538 | 0 | "Wrong calling pattern, don't call this before ::SetTrack."); |
539 | 0 |
|
540 | 0 | if (mState == kReleased) { |
541 | 0 | // This source has been released, and is waiting for collection. Simply |
542 | 0 | // return 0, this source won't contribute to the channel count decision. |
543 | 0 | // Again, this is temporary. |
544 | 0 | return 0; |
545 | 0 | } |
546 | 0 | |
547 | 0 | return mRequestedInputChannelCount; |
548 | 0 | } |
549 | | |
550 | | void |
551 | | MediaEngineWebRTCMicrophoneSource::SetRequestedInputChannelCount( |
552 | | uint32_t aRequestedInputChannelCount) |
553 | 0 | { |
554 | 0 | MutexAutoLock lock(mMutex); |
555 | 0 |
|
556 | 0 | MOZ_ASSERT(mAllocations.Length() <= 1); |
557 | 0 |
|
558 | 0 | if (mAllocations.IsEmpty()) { |
559 | 0 | return; |
560 | 0 | } |
561 | 0 | MOZ_ASSERT(mAllocations.Length() == 1 && |
562 | 0 | mAllocations[0].mStream && |
563 | 0 | mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread(), |
564 | 0 | "Wrong calling pattern, don't call this before ::SetTrack."); |
565 | 0 | mRequestedInputChannelCount = aRequestedInputChannelCount; |
566 | 0 | mAllocations[0].mStream->GraphImpl()->ReevaluateInputDevice(); |
567 | 0 | } |
568 | | |
569 | | void |
570 | | MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs, |
571 | | RefPtr<MediaStreamGraphImpl> aGraph) |
572 | 0 | { |
573 | 0 | AssertIsOnOwningThread(); |
574 | 0 | MOZ_DIAGNOSTIC_ASSERT(aGraph); |
575 | | #ifdef DEBUG |
576 | | { |
577 | | MutexAutoLock lock(mMutex); |
578 | | MOZ_ASSERT(mAllocations.Length() <= 1); |
579 | | } |
580 | | #endif |
581 | |
|
582 | 0 | RefPtr<MediaEngineWebRTCMicrophoneSource> that = this; |
583 | 0 | NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(aGraph), aPrefs]() mutable { |
584 | 0 | that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn; |
585 | 0 | that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn; |
586 | 0 | that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn; |
587 | 0 | that->mSettings->mChannelCount.Value() = aPrefs.mChannels; |
588 | 0 |
|
589 | 0 | class Message : public ControlMessage { |
590 | 0 | public: |
591 | 0 | Message(MediaEngineWebRTCMicrophoneSource* aSource, |
592 | 0 | bool aPassThrough, |
593 | 0 | uint32_t aRequestedInputChannelCount) |
594 | 0 | : ControlMessage(nullptr) |
595 | 0 | , mMicrophoneSource(aSource) |
596 | 0 | , mPassThrough(aPassThrough) |
597 | 0 | , mRequestedInputChannelCount(aRequestedInputChannelCount) |
598 | 0 | {} |
599 | 0 |
|
600 | 0 | void Run() override |
601 | 0 | { |
602 | 0 | mMicrophoneSource->SetPassThrough(mPassThrough); |
603 | 0 | mMicrophoneSource->SetRequestedInputChannelCount(mRequestedInputChannelCount); |
604 | 0 | } |
605 | 0 |
|
606 | 0 | protected: |
607 | 0 | RefPtr<MediaEngineWebRTCMicrophoneSource> mMicrophoneSource; |
608 | 0 | bool mPassThrough; |
609 | 0 | uint32_t mRequestedInputChannelCount; |
610 | 0 | }; |
611 | 0 |
|
612 | 0 | bool passThrough = !(aPrefs.mAecOn || aPrefs.mAgcOn || aPrefs.mNoiseOn); |
613 | 0 | if (graph) { |
614 | 0 | graph->AppendMessage(MakeUnique<Message>(that, |
615 | 0 | passThrough, |
616 | 0 | aPrefs.mChannels)); |
617 | 0 | } |
618 | 0 |
|
619 | 0 | return NS_OK; |
620 | 0 | })); |
621 | 0 | } |
622 | | |
623 | | nsresult |
624 | | MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints, |
625 | | const MediaEnginePrefs& aPrefs, |
626 | | const nsString& aDeviceId, |
627 | | const ipc::PrincipalInfo& aPrincipalInfo, |
628 | | AllocationHandle** aOutHandle, |
629 | | const char** aOutBadConstraint) |
630 | 0 | { |
631 | 0 | AssertIsOnOwningThread(); |
632 | 0 | MOZ_ASSERT(aOutHandle); |
633 | 0 | auto handle = MakeRefPtr<AllocationHandle>(aConstraints, aPrincipalInfo, |
634 | 0 | aDeviceId); |
635 | 0 |
|
636 | | #ifdef DEBUG |
637 | | { |
638 | | MutexAutoLock lock(mMutex); |
639 | | MOZ_ASSERT(mAllocations.Length() <= 1); |
640 | | } |
641 | | #endif |
642 | 0 | LOG(("Mic source %p allocation %p Allocate()", this, handle.get())); |
643 | 0 |
|
644 | 0 | nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId, |
645 | 0 | aOutBadConstraint); |
646 | 0 | if (NS_FAILED(rv)) { |
647 | 0 | return rv; |
648 | 0 | } |
649 | 0 | |
650 | 0 | { |
651 | 0 | MutexAutoLock lock(mMutex); |
652 | 0 | MOZ_ASSERT(mAllocations.IsEmpty(), "Only allocate once."); |
653 | 0 | mAllocations.AppendElement(Allocation(handle)); |
654 | 0 | } |
655 | 0 |
|
656 | 0 | handle.forget(aOutHandle); |
657 | 0 | return NS_OK; |
658 | 0 | } |
659 | | |
660 | | nsresult |
661 | | MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle) |
662 | 0 | { |
663 | 0 | AssertIsOnOwningThread(); |
664 | 0 |
|
665 | 0 | MOZ_ASSERT(mState == kStopped); |
666 | 0 |
|
667 | 0 | size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator()); |
668 | 0 | MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex); |
669 | 0 | MOZ_DIAGNOSTIC_ASSERT(!mAllocations[i].mEnabled, |
670 | 0 | "Source should be stopped for the track before removing"); |
671 | 0 |
|
672 | 0 | if (mAllocations[i].mStream && IsTrackIDExplicit(mAllocations[i].mTrackID)) { |
673 | 0 | mAllocations[i].mStream->EndTrack(mAllocations[i].mTrackID); |
674 | 0 | } |
675 | 0 |
|
676 | 0 | { |
677 | 0 | MutexAutoLock lock(mMutex); |
678 | 0 | MOZ_ASSERT(mAllocations.Length() == 1, "Only allocate once."); |
679 | 0 | mAllocations.RemoveElementAt(i); |
680 | 0 | } |
681 | 0 |
|
682 | 0 | if (mAllocations.IsEmpty()) { |
683 | 0 | // If empty, no callbacks to deliver data should be occuring |
684 | 0 | MOZ_ASSERT(mState != kReleased, "Source not allocated"); |
685 | 0 | MOZ_ASSERT(mState != kStarted, "Source not stopped"); |
686 | 0 |
|
687 | 0 | MutexAutoLock lock(mMutex); |
688 | 0 | mState = kReleased; |
689 | 0 | LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get())); |
690 | 0 | } else { |
691 | 0 | LOG(("Audio device %s deallocated but still in use", NS_ConvertUTF16toUTF8(mDeviceName).get())); |
692 | 0 | } |
693 | 0 | return NS_OK; |
694 | 0 | } |
695 | | |
696 | | nsresult |
697 | | MediaEngineWebRTCMicrophoneSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle, |
698 | | const RefPtr<SourceMediaStream>& aStream, |
699 | | TrackID aTrackID, |
700 | | const PrincipalHandle& aPrincipal) |
701 | 0 | { |
702 | 0 | AssertIsOnOwningThread(); |
703 | 0 | MOZ_ASSERT(aStream); |
704 | 0 | MOZ_ASSERT(IsTrackIDExplicit(aTrackID)); |
705 | 0 |
|
706 | 0 | if (mAllocations.Length() == 1 && |
707 | 0 | mAllocations[0].mStream && |
708 | 0 | mAllocations[0].mStream->Graph() != aStream->Graph()) { |
709 | 0 | return NS_ERROR_NOT_AVAILABLE; |
710 | 0 | } |
711 | 0 | |
712 | 0 | MOZ_ASSERT(mAllocations.Length() == 1, "Only allocate once."); |
713 | 0 |
|
714 | 0 | size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator()); |
715 | 0 | MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex); |
716 | 0 | MOZ_ASSERT(!mAllocations[i].mStream); |
717 | 0 | MOZ_ASSERT(mAllocations[i].mTrackID == TRACK_NONE); |
718 | 0 | MOZ_ASSERT(mAllocations[i].mPrincipal == PRINCIPAL_HANDLE_NONE); |
719 | 0 | { |
720 | 0 | MutexAutoLock lock(mMutex); |
721 | 0 | mAllocations[i].mStream = aStream; |
722 | 0 | mAllocations[i].mTrackID = aTrackID; |
723 | 0 | mAllocations[i].mPrincipal = aPrincipal; |
724 | 0 | } |
725 | 0 |
|
726 | 0 | AudioSegment* segment = new AudioSegment(); |
727 | 0 |
|
728 | 0 | aStream->AddAudioTrack(aTrackID, |
729 | 0 | aStream->GraphRate(), |
730 | 0 | 0, |
731 | 0 | segment, |
732 | 0 | SourceMediaStream::ADDTRACK_QUEUED); |
733 | 0 |
|
734 | 0 | // XXX Make this based on the pref. |
735 | 0 | aStream->RegisterForAudioMixing(); |
736 | 0 |
|
737 | 0 | LOG(("Stream %p registered for microphone capture", aStream.get())); |
738 | 0 | return NS_OK; |
739 | 0 | } |
740 | | |
741 | | nsresult |
742 | | MediaEngineWebRTCMicrophoneSource::Start(const RefPtr<const AllocationHandle>& aHandle) |
743 | 0 | { |
744 | 0 | AssertIsOnOwningThread(); |
745 | 0 | if (mState == kStarted) { |
746 | 0 | return NS_OK; |
747 | 0 | } |
748 | 0 | |
749 | 0 | MOZ_ASSERT(mState == kAllocated || mState == kStopped); |
750 | 0 |
|
751 | 0 | size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator()); |
752 | 0 | MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex, |
753 | 0 | "Can't start track that hasn't been added"); |
754 | 0 | Allocation& allocation = mAllocations[i]; |
755 | 0 |
|
756 | 0 | CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID(); |
757 | 0 | if (allocation.mStream->GraphImpl()->InputDeviceID() && |
758 | 0 | allocation.mStream->GraphImpl()->InputDeviceID() != deviceID) { |
759 | 0 | // For now, we only allow opening a single audio input device per document, |
760 | 0 | // because we can only have one MSG per document. |
761 | 0 | return NS_ERROR_FAILURE; |
762 | 0 | } |
763 | 0 | |
764 | 0 | MOZ_ASSERT(!allocation.mEnabled, "Source already started"); |
765 | 0 | { |
766 | 0 | // This spans setting both the enabled state and mState. |
767 | 0 | MutexAutoLock lock(mMutex); |
768 | 0 | allocation.mEnabled = true; |
769 | 0 |
|
770 | | #ifdef DEBUG |
771 | | // Ensure that callback-tracking state is reset when callbacks start coming. |
772 | | allocation.mLastCallbackAppendTime = 0; |
773 | | #endif |
774 | | allocation.mLiveFramesAppended = false; |
775 | 0 | allocation.mLiveSilenceAppended = false; |
776 | 0 |
|
777 | 0 | if (!mListener) { |
778 | 0 | mListener = new WebRTCAudioDataListener(this); |
779 | 0 | } |
780 | 0 |
|
781 | 0 | // Make sure logger starts before capture |
782 | 0 | AsyncLatencyLogger::Get(true); |
783 | 0 |
|
784 | 0 | allocation.mStream->OpenAudioInput(deviceID, mListener); |
785 | 0 |
|
786 | 0 | MOZ_ASSERT(mState != kReleased); |
787 | 0 | mState = kStarted; |
788 | 0 | } |
789 | 0 |
|
790 | 0 | ApplySettings(mNetPrefs, allocation.mStream->GraphImpl()); |
791 | 0 |
|
792 | 0 | return NS_OK; |
793 | 0 | } |
794 | | |
795 | | nsresult |
796 | | MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle) |
797 | 0 | { |
798 | 0 | MOZ_ASSERT(mAllocations.Length() <= 1); |
799 | 0 | AssertIsOnOwningThread(); |
800 | 0 |
|
801 | 0 | LOG(("Mic source %p allocation %p Stop()", this, aHandle.get())); |
802 | 0 |
|
803 | 0 | size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator()); |
804 | 0 | MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex, |
805 | 0 | "Cannot stop track that we don't know about"); |
806 | 0 | Allocation& allocation = mAllocations[i]; |
807 | 0 | MOZ_ASSERT(allocation.mStream, "SetTrack must have been called before ::Stop"); |
808 | 0 |
|
809 | 0 | if (!allocation.mEnabled) { |
810 | 0 | // Already stopped - this is allowed |
811 | 0 | return NS_OK; |
812 | 0 | } |
813 | 0 | |
814 | 0 | { |
815 | 0 | // This spans setting both the enabled state and mState. |
816 | 0 | MutexAutoLock lock(mMutex); |
817 | 0 | allocation.mEnabled = false; |
818 | 0 |
|
819 | 0 | CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID(); |
820 | 0 | Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID); |
821 | 0 | allocation.mStream->CloseAudioInput(id, mListener); |
822 | 0 | mListener = nullptr; |
823 | 0 |
|
824 | 0 | if (HasEnabledTrack()) { |
825 | 0 | // Another track is keeping us from stopping |
826 | 0 | return NS_OK; |
827 | 0 | } |
828 | 0 | |
829 | 0 | MOZ_ASSERT(mState == kStarted, "Should be started when stopping"); |
830 | 0 | mState = kStopped; |
831 | 0 | } |
832 | 0 |
|
833 | 0 | return NS_OK; |
834 | 0 | } |
835 | | |
836 | | void |
837 | | MediaEngineWebRTCMicrophoneSource::GetSettings(dom::MediaTrackSettings& aOutSettings) const |
838 | 0 | { |
839 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
840 | 0 | aOutSettings = *mSettings; |
841 | 0 | } |
842 | | |
843 | | void |
844 | | MediaEngineWebRTCMicrophoneSource::Pull(const RefPtr<const AllocationHandle>& aHandle, |
845 | | const RefPtr<SourceMediaStream>& aStream, |
846 | | TrackID aTrackID, |
847 | | StreamTime aDesiredTime, |
848 | | const PrincipalHandle& aPrincipalHandle) |
849 | 0 | { |
850 | 0 | TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", |
851 | 0 | aStream.get(), aTrackID); |
852 | 0 | StreamTime delta; |
853 | 0 |
|
854 | 0 | { |
855 | 0 | MutexAutoLock lock(mMutex); |
856 | 0 | size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator()); |
857 | 0 | if (i == mAllocations.NoIndex) { |
858 | 0 | // This handle must have been deallocated. That's fine, and its track |
859 | 0 | // will already be ended. No need to do anything. |
860 | 0 | return; |
861 | 0 | } |
862 | 0 | |
863 | 0 | // We don't want to GetEndOfAppendedData() above at the declaration if the |
864 | 0 | // allocation was removed and the track non-existant. An assert will fail. |
865 | 0 | delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID); |
866 | 0 |
|
867 | 0 | if (delta < 0) { |
868 | 0 | LOG_FRAMES(("Not appending silence for allocation %p; %" PRId64 " frames already buffered", |
869 | 0 | mAllocations[i].mHandle.get(), -delta)); |
870 | 0 | return; |
871 | 0 | } |
872 | 0 |
|
873 | 0 | if (!mAllocations[i].mLiveFramesAppended || |
874 | 0 | !mAllocations[i].mLiveSilenceAppended) { |
875 | 0 | // These are the iterations after starting or resuming audio capture. |
876 | 0 | // Make sure there's at least one extra block buffered until audio |
877 | 0 | // callbacks come in. We also allow appending silence one time after |
878 | 0 | // audio callbacks have started, to cover the case where audio callbacks |
879 | 0 | // start appending data immediately and there is no extra data buffered. |
880 | 0 | delta += WEBAUDIO_BLOCK_SIZE; |
881 | 0 |
|
882 | 0 | // If we're supposed to be packetizing but there's no packetizer yet, |
883 | 0 | // there must not have been any live frames appended yet. |
884 | 0 | // If there were live frames appended and we haven't appended the |
885 | 0 | // right amount of silence, we'll have to append silence once more, |
886 | 0 | // failing the other assert below. |
887 | 0 | MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput, |
888 | 0 | !mAllocations[i].mLiveFramesAppended); |
889 | 0 |
|
890 | 0 | if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) { |
891 | 0 | // Processing is active and is processed in chunks of 10ms through the |
892 | 0 | // input packetizer. We allow for 10ms of silence on the track to |
893 | 0 | // accomodate the buffering worst-case. |
894 | 0 | delta += mPacketizerInput->PacketSize(); |
895 | 0 | } |
896 | 0 | } |
897 | 0 |
|
898 | 0 | LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p", |
899 | 0 | delta, mAllocations[i].mHandle.get())); |
900 | 0 |
|
901 | 0 | // This assertion fails when we append silence here in the same iteration |
902 | 0 | // as there were real audio samples already appended by the audio callback. |
903 | 0 | // Note that this is exempted until live samples and a subsequent chunk of |
904 | 0 | // silence have been appended to the track. This will cover cases like: |
905 | 0 | // - After Start(), there is silence (maybe multiple times) appended before |
906 | 0 | // the first audio callback. |
907 | 0 | // - After Start(), there is real data (maybe multiple times) appended |
908 | 0 | // before the first graph iteration. |
909 | 0 | // And other combinations of order of audio sample sources. |
910 | 0 | MOZ_ASSERT_IF( |
911 | 0 | mAllocations[i].mEnabled && |
912 | 0 | mAllocations[i].mLiveFramesAppended && |
913 | 0 | mAllocations[i].mLiveSilenceAppended, |
914 | 0 | aStream->GraphImpl()->IterationEnd() > |
915 | 0 | mAllocations[i].mLastCallbackAppendTime); |
916 | 0 |
|
917 | 0 | if (mAllocations[i].mLiveFramesAppended) { |
918 | 0 | mAllocations[i].mLiveSilenceAppended = true; |
919 | 0 | } |
920 | 0 | } |
921 | 0 |
|
922 | 0 | AudioSegment audio; |
923 | 0 | audio.AppendNullData(delta); |
924 | 0 | aStream->AppendToTrack(aTrackID, &audio); |
925 | 0 | } |
926 | | |
927 | | void |
928 | | MediaEngineWebRTCMicrophoneSource::NotifyOutputData(MediaStreamGraphImpl* aGraph, |
929 | | AudioDataValue* aBuffer, |
930 | | size_t aFrames, |
931 | | TrackRate aRate, |
932 | | uint32_t aChannels) |
933 | 0 | { |
934 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
935 | 0 |
|
936 | 0 | if (!mPacketizerOutput || |
937 | 0 | mPacketizerOutput->PacketSize() != aRate/100u || |
938 | 0 | mPacketizerOutput->Channels() != aChannels) { |
939 | 0 | // It's ok to drop the audio still in the packetizer here: if this changes, |
940 | 0 | // we changed devices or something. |
941 | 0 | mPacketizerOutput = |
942 | 0 | new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels); |
943 | 0 | } |
944 | 0 |
|
945 | 0 | mPacketizerOutput->Input(aBuffer, aFrames); |
946 | 0 |
|
947 | 0 | while (mPacketizerOutput->PacketsAvailable()) { |
948 | 0 | uint32_t samplesPerPacket = mPacketizerOutput->PacketSize() * |
949 | 0 | mPacketizerOutput->Channels(); |
950 | 0 | if (mOutputBuffer.Length() < samplesPerPacket) { |
951 | 0 | mOutputBuffer.SetLength(samplesPerPacket); |
952 | 0 | } |
953 | 0 | if (mDeinterleavedBuffer.Length() < samplesPerPacket) { |
954 | 0 | mDeinterleavedBuffer.SetLength(samplesPerPacket); |
955 | 0 | } |
956 | 0 | float* packet = mOutputBuffer.Data(); |
957 | 0 | mPacketizerOutput->Output(packet); |
958 | 0 |
|
959 | 0 | AutoTArray<float*, MAX_CHANNELS> deinterleavedPacketDataChannelPointers; |
960 | 0 | float* interleavedFarend = nullptr; |
961 | 0 | uint32_t channelCountFarend = 0; |
962 | 0 | uint32_t framesPerPacketFarend = 0; |
963 | 0 |
|
964 | 0 | // Downmix from aChannels to MAX_CHANNELS if needed. We always have floats |
965 | 0 | // here, the packetized performed the conversion. |
966 | 0 | if (aChannels > MAX_CHANNELS) { |
967 | 0 | AudioConverter converter(AudioConfig(aChannels, 0, AudioConfig::FORMAT_FLT), |
968 | 0 | AudioConfig(MAX_CHANNELS, 0, AudioConfig::FORMAT_FLT)); |
969 | 0 | framesPerPacketFarend = mPacketizerOutput->PacketSize(); |
970 | 0 | framesPerPacketFarend = |
971 | 0 | converter.Process(mInputDownmixBuffer, |
972 | 0 | packet, |
973 | 0 | framesPerPacketFarend); |
974 | 0 | interleavedFarend = mInputDownmixBuffer.Data(); |
975 | 0 | channelCountFarend = MAX_CHANNELS; |
976 | 0 | deinterleavedPacketDataChannelPointers.SetLength(MAX_CHANNELS); |
977 | 0 | } else { |
978 | 0 | interleavedFarend = packet; |
979 | 0 | channelCountFarend = aChannels; |
980 | 0 | framesPerPacketFarend = mPacketizerOutput->PacketSize(); |
981 | 0 | deinterleavedPacketDataChannelPointers.SetLength(aChannels); |
982 | 0 | } |
983 | 0 |
|
984 | 0 | MOZ_ASSERT(interleavedFarend && |
985 | 0 | (channelCountFarend == 1 || channelCountFarend == 2) && |
986 | 0 | framesPerPacketFarend); |
987 | 0 |
|
988 | 0 | if (mInputBuffer.Length() < framesPerPacketFarend * channelCountFarend) { |
989 | 0 | mInputBuffer.SetLength(framesPerPacketFarend * channelCountFarend); |
990 | 0 | } |
991 | 0 |
|
992 | 0 | size_t offset = 0; |
993 | 0 | for (size_t i = 0; i < deinterleavedPacketDataChannelPointers.Length(); ++i) { |
994 | 0 | deinterleavedPacketDataChannelPointers[i] = mInputBuffer.Data() + offset; |
995 | 0 | offset += framesPerPacketFarend; |
996 | 0 | } |
997 | 0 |
|
998 | 0 | // Deinterleave, prepare a channel pointers array, with enough storage for |
999 | 0 | // the frames. |
1000 | 0 | DeinterleaveAndConvertBuffer(interleavedFarend, |
1001 | 0 | framesPerPacketFarend, |
1002 | 0 | channelCountFarend, |
1003 | 0 | deinterleavedPacketDataChannelPointers.Elements()); |
1004 | 0 |
|
1005 | 0 | // Having the same config for input and output means we potentially save |
1006 | 0 | // some CPU. |
1007 | 0 | StreamConfig inputConfig(aRate, channelCountFarend, false); |
1008 | 0 | StreamConfig outputConfig = inputConfig; |
1009 | 0 |
|
1010 | 0 | // Passing the same pointers here saves a copy inside this function. |
1011 | 0 | DebugOnly<int> err = |
1012 | 0 | mAudioProcessing->ProcessReverseStream(deinterleavedPacketDataChannelPointers.Elements(), |
1013 | 0 | inputConfig, |
1014 | 0 | outputConfig, |
1015 | 0 | deinterleavedPacketDataChannelPointers.Elements()); |
1016 | 0 |
|
1017 | 0 | MOZ_ASSERT(!err, "Could not process the reverse stream."); |
1018 | 0 | } |
1019 | 0 | } |
1020 | | |
1021 | | // Only called if we're not in passthrough mode |
1022 | | void |
1023 | | MediaEngineWebRTCMicrophoneSource::PacketizeAndProcess(MediaStreamGraphImpl* aGraph, |
1024 | | const AudioDataValue* aBuffer, |
1025 | | size_t aFrames, |
1026 | | TrackRate aRate, |
1027 | | uint32_t aChannels) |
1028 | 0 | { |
1029 | 0 | MOZ_ASSERT(!PassThrough(aGraph), "This should be bypassed when in PassThrough mode."); |
1030 | 0 | size_t offset = 0; |
1031 | 0 |
|
1032 | 0 | if (!mPacketizerInput || |
1033 | 0 | mPacketizerInput->PacketSize() != aRate/100u || |
1034 | 0 | mPacketizerInput->Channels() != aChannels) { |
1035 | 0 | // It's ok to drop the audio still in the packetizer here. |
1036 | 0 | mPacketizerInput = |
1037 | 0 | new AudioPacketizer<AudioDataValue, float>(aRate/100, aChannels); |
1038 | 0 | } |
1039 | 0 |
|
1040 | 0 | // On initial capture, throw away all far-end data except the most recent |
1041 | 0 | // sample since it's already irrelevant and we want to avoid confusing the AEC |
1042 | 0 | // far-end input code with "old" audio. |
1043 | 0 | if (!mStarted) { |
1044 | 0 | mStarted = true; |
1045 | 0 | } |
1046 | 0 |
|
1047 | 0 | // Packetize our input data into 10ms chunks, deinterleave into planar channel |
1048 | 0 | // buffers, process, and append to the right MediaStreamTrack. |
1049 | 0 | mPacketizerInput->Input(aBuffer, static_cast<uint32_t>(aFrames)); |
1050 | 0 |
|
1051 | 0 | while (mPacketizerInput->PacketsAvailable()) { |
1052 | 0 | uint32_t samplesPerPacket = mPacketizerInput->PacketSize() * |
1053 | 0 | mPacketizerInput->Channels(); |
1054 | 0 | if (mInputBuffer.Length() < samplesPerPacket) { |
1055 | 0 | mInputBuffer.SetLength(samplesPerPacket); |
1056 | 0 | } |
1057 | 0 | if (mDeinterleavedBuffer.Length() < samplesPerPacket) { |
1058 | 0 | mDeinterleavedBuffer.SetLength(samplesPerPacket); |
1059 | 0 | } |
1060 | 0 | float* packet = mInputBuffer.Data(); |
1061 | 0 | mPacketizerInput->Output(packet); |
1062 | 0 |
|
1063 | 0 | // Deinterleave the input data |
1064 | 0 | // Prepare an array pointing to deinterleaved channels. |
1065 | 0 | AutoTArray<float*, 8> deinterleavedPacketizedInputDataChannelPointers; |
1066 | 0 | deinterleavedPacketizedInputDataChannelPointers.SetLength(aChannels); |
1067 | 0 | offset = 0; |
1068 | 0 | for (size_t i = 0; i < deinterleavedPacketizedInputDataChannelPointers.Length(); ++i) { |
1069 | 0 | deinterleavedPacketizedInputDataChannelPointers[i] = mDeinterleavedBuffer.Data() + offset; |
1070 | 0 | offset += mPacketizerInput->PacketSize(); |
1071 | 0 | } |
1072 | 0 |
|
1073 | 0 | // Deinterleave to mInputBuffer, pointed to by inputBufferChannelPointers. |
1074 | 0 | Deinterleave(packet, mPacketizerInput->PacketSize(), aChannels, |
1075 | 0 | deinterleavedPacketizedInputDataChannelPointers.Elements()); |
1076 | 0 |
|
1077 | 0 | StreamConfig inputConfig(aRate, |
1078 | 0 | aChannels, |
1079 | 0 | false /* we don't use typing detection*/); |
1080 | 0 | StreamConfig outputConfig = inputConfig; |
1081 | 0 |
|
1082 | 0 | // Bug 1404965: Get the right delay here, it saves some work down the line. |
1083 | 0 | mAudioProcessing->set_stream_delay_ms(0); |
1084 | 0 |
|
1085 | 0 | // Bug 1414837: find a way to not allocate here. |
1086 | 0 | RefPtr<SharedBuffer> buffer = |
1087 | 0 | SharedBuffer::Create(mPacketizerInput->PacketSize() * aChannels * sizeof(float)); |
1088 | 0 |
|
1089 | 0 | // Prepare channel pointers to the SharedBuffer created above. |
1090 | 0 | AutoTArray<float*, 8> processedOutputChannelPointers; |
1091 | 0 | AutoTArray<const float*, 8> processedOutputChannelPointersConst; |
1092 | 0 | processedOutputChannelPointers.SetLength(aChannels); |
1093 | 0 | processedOutputChannelPointersConst.SetLength(aChannels); |
1094 | 0 |
|
1095 | 0 | offset = 0; |
1096 | 0 | for (size_t i = 0; i < processedOutputChannelPointers.Length(); ++i) { |
1097 | 0 | processedOutputChannelPointers[i] = static_cast<float*>(buffer->Data()) + offset; |
1098 | 0 | processedOutputChannelPointersConst[i] = static_cast<float*>(buffer->Data()) + offset; |
1099 | 0 | offset += mPacketizerInput->PacketSize(); |
1100 | 0 | } |
1101 | 0 |
|
1102 | 0 | mAudioProcessing->ProcessStream(deinterleavedPacketizedInputDataChannelPointers.Elements(), |
1103 | 0 | inputConfig, |
1104 | 0 | outputConfig, |
1105 | 0 | processedOutputChannelPointers.Elements()); |
1106 | 0 | MutexAutoLock lock(mMutex); |
1107 | 0 | if (mState != kStarted) { |
1108 | 0 | return; |
1109 | 0 | } |
1110 | 0 | |
1111 | 0 | AudioSegment segment; |
1112 | 0 | for (Allocation& allocation : mAllocations) { |
1113 | 0 | if (!allocation.mStream) { |
1114 | 0 | continue; |
1115 | 0 | } |
1116 | 0 | |
1117 | 0 | if (!allocation.mStream->GraphImpl()) { |
1118 | 0 | // The DOMMediaStream that owns allocation.mStream has been cleaned up |
1119 | 0 | // and MediaStream::DestroyImpl() has run in the MSG. This is fine and |
1120 | 0 | // can happen before the MediaManager thread gets to stop capture for |
1121 | 0 | // this allocation. |
1122 | 0 | continue; |
1123 | 0 | } |
1124 | 0 | |
1125 | 0 | if (!allocation.mEnabled) { |
1126 | 0 | continue; |
1127 | 0 | } |
1128 | 0 | |
1129 | 0 | LOG_FRAMES(("Appending %" PRIu32 " frames of packetized audio for allocation %p", |
1130 | 0 | mPacketizerInput->PacketSize(), allocation.mHandle.get())); |
1131 | 0 |
|
1132 | | #ifdef DEBUG |
1133 | | allocation.mLastCallbackAppendTime = |
1134 | | allocation.mStream->GraphImpl()->IterationEnd(); |
1135 | | #endif |
1136 | | allocation.mLiveFramesAppended = true; |
1137 | 0 |
|
1138 | 0 | // We already have planar audio data of the right format. Insert into the |
1139 | 0 | // MSG. |
1140 | 0 | MOZ_ASSERT(processedOutputChannelPointers.Length() == aChannels); |
1141 | 0 | RefPtr<SharedBuffer> other = buffer; |
1142 | 0 | segment.AppendFrames(other.forget(), |
1143 | 0 | processedOutputChannelPointersConst, |
1144 | 0 | mPacketizerInput->PacketSize(), |
1145 | 0 | allocation.mPrincipal); |
1146 | 0 | allocation.mStream->AppendToTrack(allocation.mTrackID, &segment); |
1147 | 0 | } |
1148 | 0 | } |
1149 | 0 | } |
1150 | | |
1151 | | template<typename T> |
1152 | | void |
1153 | | MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer, |
1154 | | size_t aFrames, |
1155 | | uint32_t aChannels) |
1156 | 0 | { |
1157 | 0 | MutexAutoLock lock(mMutex); |
1158 | 0 |
|
1159 | 0 | if (mState != kStarted) { |
1160 | 0 | return; |
1161 | 0 | } |
1162 | 0 | |
1163 | 0 | if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) { |
1164 | 0 | mTotalFrames += aFrames; |
1165 | 0 | if (mAllocations[0].mStream && |
1166 | 0 | mTotalFrames > mLastLogFrames + |
1167 | 0 | mAllocations[0].mStream->GraphRate()) { // ~ 1 second |
1168 | 0 | MOZ_LOG(AudioLogModule(), LogLevel::Debug, |
1169 | 0 | ("%p: Inserting %zu samples into graph, total frames = %" PRIu64, |
1170 | 0 | (void*)this, aFrames, mTotalFrames)); |
1171 | 0 | mLastLogFrames = mTotalFrames; |
1172 | 0 | } |
1173 | 0 | } |
1174 | 0 |
|
1175 | 0 | for (Allocation& allocation : mAllocations) { |
1176 | 0 | if (!allocation.mStream) { |
1177 | 0 | continue; |
1178 | 0 | } |
1179 | 0 | |
1180 | 0 | if (!allocation.mStream->GraphImpl()) { |
1181 | 0 | // The DOMMediaStream that owns allocation.mStream has been cleaned up |
1182 | 0 | // and MediaStream::DestroyImpl() has run in the MSG. This is fine and |
1183 | 0 | // can happen before the MediaManager thread gets to stop capture for |
1184 | 0 | // this allocation. |
1185 | 0 | continue; |
1186 | 0 | } |
1187 | 0 | |
1188 | 0 | if (!allocation.mEnabled) { |
1189 | 0 | continue; |
1190 | 0 | } |
1191 | 0 | |
1192 | | #ifdef DEBUG |
1193 | | allocation.mLastCallbackAppendTime = |
1194 | | allocation.mStream->GraphImpl()->IterationEnd(); |
1195 | | #endif |
1196 | 0 | allocation.mLiveFramesAppended = true; |
1197 | 0 |
|
1198 | 0 | TimeStamp insertTime; |
1199 | 0 | // Make sure we include the stream and the track. |
1200 | 0 | // The 0:1 is a flag to note when we've done the final insert for a given input block. |
1201 | 0 | LogTime(AsyncLatencyLogger::AudioTrackInsertion, |
1202 | 0 | LATENCY_STREAM_ID(allocation.mStream.get(), allocation.mTrackID), |
1203 | 0 | (&allocation != &mAllocations.LastElement()) ? 0 : 1, insertTime); |
1204 | 0 |
|
1205 | 0 | // Bug 971528 - Support stereo capture in gUM |
1206 | 0 | MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); |
1207 | 0 |
|
1208 | 0 | AudioSegment segment; |
1209 | 0 | RefPtr<SharedBuffer> buffer = |
1210 | 0 | SharedBuffer::Create(aFrames * aChannels * sizeof(T)); |
1211 | 0 | AutoTArray<const T*, 8> channels; |
1212 | 0 | if (aChannels == 1) { |
1213 | 0 | PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); |
1214 | 0 | channels.AppendElement(static_cast<T*>(buffer->Data())); |
1215 | 0 | } else { |
1216 | 0 | channels.SetLength(aChannels); |
1217 | 0 | AutoTArray<T*, 8> write_channels; |
1218 | 0 | write_channels.SetLength(aChannels); |
1219 | 0 | T * samples = static_cast<T*>(buffer->Data()); |
1220 | 0 |
|
1221 | 0 | size_t offset = 0; |
1222 | 0 | for(uint32_t i = 0; i < aChannels; ++i) { |
1223 | 0 | channels[i] = write_channels[i] = samples + offset; |
1224 | 0 | offset += aFrames; |
1225 | 0 | } |
1226 | 0 |
|
1227 | 0 | DeinterleaveAndConvertBuffer(aBuffer, |
1228 | 0 | aFrames, |
1229 | 0 | aChannels, |
1230 | 0 | write_channels.Elements()); |
1231 | 0 | } |
1232 | 0 |
|
1233 | 0 | LOG_FRAMES(("Appending %zu frames of raw audio for allocation %p", |
1234 | 0 | aFrames, allocation.mHandle.get())); |
1235 | 0 |
|
1236 | 0 | MOZ_ASSERT(aChannels == channels.Length()); |
1237 | 0 | segment.AppendFrames(buffer.forget(), channels, aFrames, |
1238 | 0 | allocation.mPrincipal); |
1239 | 0 | segment.GetStartTime(insertTime); |
1240 | 0 |
|
1241 | 0 | allocation.mStream->AppendToTrack(allocation.mTrackID, &segment); |
1242 | 0 | } |
1243 | 0 | } |
1244 | | |
1245 | | // Called back on GraphDriver thread! |
1246 | | // Note this can be called back after ::Shutdown() |
1247 | | void |
1248 | | MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraphImpl* aGraph, |
1249 | | const AudioDataValue* aBuffer, |
1250 | | size_t aFrames, |
1251 | | TrackRate aRate, |
1252 | | uint32_t aChannels) |
1253 | 0 | { |
1254 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
1255 | 0 | TRACE_AUDIO_CALLBACK(); |
1256 | 0 |
|
1257 | 0 | { |
1258 | 0 | MutexAutoLock lock(mMutex); |
1259 | 0 | if (mAllocations.IsEmpty()) { |
1260 | 0 | // This can happen because mAllocations is not yet using message passing, and |
1261 | 0 | // is access both on the media manager thread and the MSG thread. This is to |
1262 | 0 | // be fixed soon. |
1263 | 0 | // When deallocating, the listener is removed via message passing, while the |
1264 | 0 | // allocation is removed immediately, so there can be a few iterations where |
1265 | 0 | // we need to return early here. |
1266 | 0 | return; |
1267 | 0 | } |
1268 | 0 | } |
1269 | 0 | // If some processing is necessary, packetize and insert in the WebRTC.org |
1270 | 0 | // code. Otherwise, directly insert the mic data in the MSG, bypassing all processing. |
1271 | 0 | if (PassThrough(aGraph)) { |
1272 | 0 | InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels); |
1273 | 0 | } else { |
1274 | 0 | PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels); |
1275 | 0 | } |
1276 | 0 | } |
1277 | | |
1278 | 0 | #define ResetProcessingIfNeeded(_processing) \ |
1279 | 0 | do { \ |
1280 | 0 | bool enabled = mAudioProcessing->_processing()->is_enabled(); \ |
1281 | 0 | \ |
1282 | 0 | if (enabled) { \ |
1283 | 0 | int rv = mAudioProcessing->_processing()->Enable(!enabled); \ |
1284 | 0 | if (rv) { \ |
1285 | 0 | NS_WARNING("Could not reset the status of the " \ |
1286 | 0 | #_processing " on device change."); \ |
1287 | 0 | return; \ |
1288 | 0 | } \ |
1289 | 0 | rv = mAudioProcessing->_processing()->Enable(enabled); \ |
1290 | 0 | if (rv) { \ |
1291 | 0 | NS_WARNING("Could not reset the status of the " \ |
1292 | 0 | #_processing " on device change."); \ |
1293 | 0 | return; \ |
1294 | 0 | } \ |
1295 | 0 | \ |
1296 | 0 | } \ |
1297 | 0 | } while(0) |
1298 | | |
1299 | | void |
1300 | | MediaEngineWebRTCMicrophoneSource::DeviceChanged(MediaStreamGraphImpl* aGraph) |
1301 | 0 | { |
1302 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
1303 | 0 | // Reset some processing |
1304 | 0 | ResetProcessingIfNeeded(gain_control); |
1305 | 0 | ResetProcessingIfNeeded(echo_cancellation); |
1306 | 0 | ResetProcessingIfNeeded(noise_suppression); |
1307 | 0 | } |
1308 | | |
1309 | | void |
1310 | | MediaEngineWebRTCMicrophoneSource::Disconnect(MediaStreamGraphImpl* aGraph) |
1311 | 0 | { |
1312 | 0 | // This method is just for asserts. |
1313 | 0 | MOZ_ASSERT(aGraph->CurrentDriver()->OnThread()); |
1314 | 0 | MOZ_ASSERT(!mListener); |
1315 | 0 | } |
1316 | | |
1317 | | void |
1318 | | MediaEngineWebRTCMicrophoneSource::Shutdown() |
1319 | 0 | { |
1320 | 0 | AssertIsOnOwningThread(); |
1321 | 0 |
|
1322 | 0 | if (mState == kStarted) { |
1323 | 0 | for (const Allocation& allocation : mAllocations) { |
1324 | 0 | if (allocation.mEnabled) { |
1325 | 0 | Stop(allocation.mHandle); |
1326 | 0 | } |
1327 | 0 | } |
1328 | 0 | MOZ_ASSERT(mState == kStopped); |
1329 | 0 | } |
1330 | 0 |
|
1331 | 0 | while (!mAllocations.IsEmpty()) { |
1332 | 0 | MOZ_ASSERT(mState == kAllocated || mState == kStopped); |
1333 | 0 | Deallocate(mAllocations[0].mHandle); |
1334 | 0 | } |
1335 | 0 | MOZ_ASSERT(mState == kReleased); |
1336 | 0 | } |
1337 | | |
1338 | | nsString |
1339 | | MediaEngineWebRTCAudioCaptureSource::GetName() const |
1340 | 0 | { |
1341 | 0 | return NS_LITERAL_STRING(u"AudioCapture"); |
1342 | 0 | } |
1343 | | |
1344 | | nsCString |
1345 | | MediaEngineWebRTCAudioCaptureSource::GetUUID() const |
1346 | 0 | { |
1347 | 0 | nsID uuid; |
1348 | 0 | char uuidBuffer[NSID_LENGTH]; |
1349 | 0 | nsCString asciiString; |
1350 | 0 | ErrorResult rv; |
1351 | 0 |
|
1352 | 0 | rv = nsContentUtils::GenerateUUIDInPlace(uuid); |
1353 | 0 | if (rv.Failed()) { |
1354 | 0 | return NS_LITERAL_CSTRING(""); |
1355 | 0 | } |
1356 | 0 |
|
1357 | 0 | uuid.ToProvidedString(uuidBuffer); |
1358 | 0 | asciiString.AssignASCII(uuidBuffer); |
1359 | 0 |
|
1360 | 0 | // Remove {} and the null terminator |
1361 | 0 | return nsCString(Substring(asciiString, 1, NSID_LENGTH - 3)); |
1362 | 0 | } |
1363 | | |
1364 | | bool |
1365 | | MediaEngineWebRTCMicrophoneSource::HasEnabledTrack() const |
1366 | 0 | { |
1367 | 0 | AssertIsOnOwningThread(); |
1368 | 0 | for (const Allocation& allocation : mAllocations) { |
1369 | 0 | if (allocation.mEnabled) { |
1370 | 0 | return true; |
1371 | 0 | } |
1372 | 0 | } |
1373 | 0 | return false; |
1374 | 0 | } |
1375 | | |
1376 | | nsresult |
1377 | | MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle, |
1378 | | const RefPtr<SourceMediaStream>& aStream, |
1379 | | TrackID aTrackID, |
1380 | | const PrincipalHandle& aPrincipalHandle) |
1381 | 0 | { |
1382 | 0 | AssertIsOnOwningThread(); |
1383 | 0 | // Nothing to do here. aStream is a placeholder dummy and not exposed. |
1384 | 0 | return NS_OK; |
1385 | 0 | } |
1386 | | |
1387 | | nsresult |
1388 | | MediaEngineWebRTCAudioCaptureSource::Start(const RefPtr<const AllocationHandle>& aHandle) |
1389 | 0 | { |
1390 | 0 | AssertIsOnOwningThread(); |
1391 | 0 | return NS_OK; |
1392 | 0 | } |
1393 | | |
1394 | | nsresult |
1395 | | MediaEngineWebRTCAudioCaptureSource::Stop(const RefPtr<const AllocationHandle>& aHandle) |
1396 | 0 | { |
1397 | 0 | AssertIsOnOwningThread(); |
1398 | 0 | return NS_OK; |
1399 | 0 | } |
1400 | | |
1401 | | nsresult |
1402 | | MediaEngineWebRTCAudioCaptureSource::Reconfigure( |
1403 | | const RefPtr<AllocationHandle>& aHandle, |
1404 | | const dom::MediaTrackConstraints& aConstraints, |
1405 | | const MediaEnginePrefs &aPrefs, |
1406 | | const nsString& aDeviceId, |
1407 | | const char** aOutBadConstraint) |
1408 | 0 | { |
1409 | 0 | MOZ_ASSERT(!aHandle); |
1410 | 0 | return NS_OK; |
1411 | 0 | } |
1412 | | |
1413 | | uint32_t |
1414 | | MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance( |
1415 | | const nsTArray<const NormalizedConstraintSet*>& aConstraintSets, |
1416 | | const nsString& aDeviceId) const |
1417 | 0 | { |
1418 | 0 | // There is only one way of capturing audio for now, and it's always adequate. |
1419 | 0 | return 0; |
1420 | 0 | } |
1421 | | |
1422 | | } |