FreeBSD Bugzilla – Attachment 227770 Details for
Bug 257639
www/firefox: broken mic in WebRTC in 91.0
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
v1 (use "git am")
0001-www-firefox-unbreak-WebRTC-microphone-after-2d661b18.patch (text/plain), 49.59 KB, created by
Ghost
on 2021-09-08 19:26:34 UTC
(
hide
)
Description:
v1 (use "git am")
Filename:
MIME Type:
Creator:
Ghost
Created:
2021-09-08 19:26:34 UTC
Size:
49.59 KB
patch
obsolete
>From 1aae4f9ae7e9385a1e12321d535add104a70706a Mon Sep 17 00:00:00 2001 >From: Evgeniy Khramtsov <evgeniy@khramtsov.org> >Date: Wed, 8 Sep 2021 21:42:24 +0300 >Subject: [PATCH] www/firefox: unbreak WebRTC microphone after 2d661b18c2c37a > >PR: 257639 >Aprroved by: ? >Tested by: ? >--- > www/firefox/Makefile | 2 +- > www/firefox/files/patch-bug1702646 | 1183 ++++++++++++++++++++++++++++ > 2 files changed, 1184 insertions(+), 1 deletion(-) > create mode 100644 www/firefox/files/patch-bug1702646 > >diff --git a/www/firefox/Makefile b/www/firefox/Makefile >index 5bda986e7c34..9772f8c97ba7 100644 >--- a/www/firefox/Makefile >+++ b/www/firefox/Makefile >@@ -2,7 +2,7 @@ > > PORTNAME= firefox > DISTVERSION= 92.0 >-PORTREVISION= 2 >+PORTREVISION= 3 > PORTEPOCH= 2 > CATEGORIES= www > MASTER_SITES= MOZILLA/${PORTNAME}/releases/${DISTVERSION}/source \ >diff --git a/www/firefox/files/patch-bug1702646 b/www/firefox/files/patch-bug1702646 >new file mode 100644 >index 000000000000..8b1c5acf68d5 >--- /dev/null >+++ b/www/firefox/files/patch-bug1702646 >@@ -0,0 +1,1183 @@ >+From 569e6df7ac64e586c7cbdc0ccd2ccf7d34f6984d Mon Sep 17 00:00:00 2001 >+From: Evgeniy Khramtsov <evgeniy@khramtsov.org> >+Date: Wed, 8 Sep 2021 21:37:04 +0300 >+Subject: [PATCH] Revert bug 1702646 >+ >+Revert: >+https://github.com/mozilla/gecko-dev/commit/0a9065018a56a7812b15411051143c2c8f7b1a08 >+https://github.com/mozilla/gecko-dev/commit/d2f2ea20bbc9e1e7bc6858ea19d689624f27055a >+https://github.com/mozilla/gecko-dev/commit/c95e8979b6b673658cdc51313d01db388bc3ff5d >+https://github.com/mozilla/gecko-dev/commit/1f72372ece815dd5c59e9a34ca0754ea84124713 >+https://github.com/mozilla/gecko-dev/commit/81d623f7fc5f4e9acbe31203bc9a0868078dbe09 >+ >+See: >+https://bugzilla.mozilla.org/show_bug.cgi?id=1702646 >+https://bugzilla.mozilla.org/show_bug.cgi?id=1725810 >+https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=257639 >+--- >+ dom/media/AudioSegment.h | 45 --- >+ dom/media/MediaTrackGraph.cpp | 369 +++---------------- >+ dom/media/MediaTrackGraph.h | 16 +- >+ dom/media/MediaTrackGraphImpl.h | 123 +------ >+ dom/media/gtest/TestAudioInputProcessing.cpp | 10 +- >+ dom/media/webrtc/MediaEngineWebRTCAudio.cpp | 127 +++---- >+ dom/media/webrtc/MediaEngineWebRTCAudio.h | 19 +- >+ 7 files changed, 133 insertions(+), 576 deletions(-) >+ >+diff --git dom/media/AudioSegment.h dom/media/AudioSegment.h >+index 4ec71c92bc..a0aee574e5 100644 >+--- dom/media/AudioSegment.h >++++ dom/media/AudioSegment.h >+@@ -377,51 +377,6 @@ class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> { >+ chunk->mBufferFormat = AUDIO_FORMAT_S16; >+ chunk->mPrincipalHandle = aPrincipalHandle; >+ } >+- void AppendSegment(const AudioSegment* aSegment, >+- const PrincipalHandle& aPrincipalHandle) { >+- MOZ_ASSERT(aSegment); >+- >+- for (const AudioChunk& c : aSegment->mChunks) { >+- AudioChunk* chunk = AppendChunk(c.GetDuration()); >+- chunk->mBuffer = c.mBuffer; >+- chunk->mChannelData = c.mChannelData; >+- chunk->mBufferFormat = c.mBufferFormat; >+- chunk->mPrincipalHandle = aPrincipalHandle; >+- } >+- } >+- template <typename T> >+- void AppendFromInterleavedBuffer(const T* aBuffer, size_t aFrames, >+- uint32_t aChannels, >+- const PrincipalHandle& aPrincipalHandle) { >+- MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); >+- >+- CheckedInt<size_t> bufferSize(sizeof(T)); >+- bufferSize *= aFrames; >+- bufferSize *= aChannels; >+- RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); >+- AutoTArray<const T*, 8> channels; >+- if (aChannels == 1) { >+- PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); >+- channels.AppendElement(static_cast<T*>(buffer->Data())); >+- } else { >+- channels.SetLength(aChannels); >+- AutoTArray<T*, 8> writeChannels; >+- writeChannels.SetLength(aChannels); >+- T* samples = static_cast<T*>(buffer->Data()); >+- >+- size_t offset = 0; >+- for (uint32_t i = 0; i < aChannels; ++i) { >+- channels[i] = writeChannels[i] = samples + offset; >+- offset += aFrames; >+- } >+- >+- DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, >+- writeChannels.Elements()); >+- } >+- >+- MOZ_ASSERT(aChannels == channels.Length()); >+- AppendFrames(buffer.forget(), channels, aFrames, aPrincipalHandle); >+- } >+ // Consumes aChunk, and returns a pointer to the persistent copy of aChunk >+ // in the segment. >+ AudioChunk* AppendAndConsumeChunk(AudioChunk&& aChunk) { >+diff --git dom/media/MediaTrackGraph.cpp dom/media/MediaTrackGraph.cpp >+index 3b5d376028..14f918558d 100644 >+--- dom/media/MediaTrackGraph.cpp >++++ dom/media/MediaTrackGraph.cpp >+@@ -62,157 +62,6 @@ LazyLogModule gMediaTrackGraphLog("MediaTrackGraph"); >+ */ >+ static nsTHashMap<nsUint32HashKey, MediaTrackGraphImpl*> gGraphs; >+ >+-void NativeInputTrack::AudioDataBuffers::SetOutputData(AudioDataValue* aBuffer, >+- size_t aFrames, >+- uint32_t aChannels, >+- TrackRate aRate) { >+- mOutputData = Some(BufferInfo{aBuffer, aFrames, aChannels, aRate}); >+-} >+- >+-void NativeInputTrack::AudioDataBuffers::SetInputData(AudioDataValue* aBuffer, >+- size_t aFrames, >+- uint32_t aChannels, >+- TrackRate aRate) { >+- mInputData = Some(BufferInfo{aBuffer, aFrames, aChannels, aRate}); >+-} >+- >+-void NativeInputTrack::AudioDataBuffers::Clear(Scope aScope) { >+- if (aScope & Scope::Input) { >+- mInputData.take(); >+- } >+- >+- if (aScope & Scope::Output) { >+- mOutputData.take(); >+- } >+-} >+- >+-NativeInputTrack* NativeInputTrack::Create(MediaTrackGraphImpl* aGraph) { >+- MOZ_ASSERT(NS_IsMainThread()); >+- >+- NativeInputTrack* track = new NativeInputTrack(aGraph->GraphRate()); >+- aGraph->AddTrack(track); >+- return track; >+-} >+- >+-size_t NativeInputTrack::AddUser() { >+- MOZ_ASSERT(NS_IsMainThread()); >+- mUserCount += 1; >+- return mUserCount; >+-} >+- >+-size_t NativeInputTrack::RemoveUser() { >+- MOZ_ASSERT(NS_IsMainThread()); >+- MOZ_ASSERT(mUserCount > 0); >+- mUserCount -= 1; >+- return mUserCount; >+-} >+- >+-void NativeInputTrack::DestroyImpl() { >+- MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning()); >+- if (mDataHolder) { >+- mDataHolder->Clear(static_cast<AudioDataBuffers::Scope>( >+- AudioDataBuffers::Scope::Input | AudioDataBuffers::Scope::Output)); >+- } >+- ProcessedMediaTrack::DestroyImpl(); >+-} >+- >+-void NativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, >+- uint32_t aFlags) { >+- MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning()); >+- TRACE_COMMENT("NativeInputTrack %p", this); >+- >+- if (!mDataHolder || !mDataHolder->mInputData) { >+- return; >+- } >+- >+- // One NotifyInputData might have multiple following ProcessInput calls, but >+- // we only process one input per NotifyInputData call. >+- NativeInputTrack::AudioDataBuffers::BufferInfo inputInfo = >+- mDataHolder->mInputData.extract(); >+- >+- MOZ_ASSERT(mInputChannels == inputInfo.mChannels); >+- MOZ_ASSERT(inputInfo.mChannels >= 1 && inputInfo.mChannels <= 8, >+- "Support up to 8 channels"); >+- >+- GetData<AudioSegment>()->Clear(); >+- GetData<AudioSegment>()->AppendFromInterleavedBuffer( >+- inputInfo.mBuffer, inputInfo.mFrames, inputInfo.mChannels, >+- PRINCIPAL_HANDLE_NONE); >+- >+- LOG(LogLevel::Verbose, >+- ("NativeInputTrack %p Appending %zu frames of raw audio", this, >+- inputInfo.mFrames)); >+-} >+- >+-uint32_t NativeInputTrack::NumberOfChannels() const { >+- MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning()); >+- return mInputChannels; >+-} >+- >+-void NativeInputTrack::InitDataHolderIfNeeded() { >+- MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning()); >+- if (!mDataHolder) { >+- mDataHolder.emplace(); >+- } >+-} >+- >+-void NativeInputTrack::NotifyOutputData(MediaTrackGraphImpl* aGraph, >+- AudioDataValue* aBuffer, size_t aFrames, >+- TrackRate aRate, uint32_t aChannels) { >+- MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning()); >+- MOZ_ASSERT(aGraph == mGraph, "Receive output data from another graph"); >+- MOZ_ASSERT(mDataHolder); >+- mDataHolder->SetOutputData(aBuffer, aFrames, aChannels, aRate); >+- for (auto& listener : mDataUsers) { >+- listener->NotifyOutputData(aGraph, mDataHolder->mOutputData.value()); >+- } >+-} >+- >+-void NativeInputTrack::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { >+- MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning()); >+- MOZ_ASSERT(aGraph == mGraph, >+- "Receive input stopped signal from another graph"); >+- MOZ_ASSERT(mDataHolder); >+- mInputChannels = 0; >+- mDataHolder->Clear(AudioDataBuffers::Scope::Input); >+- for (auto& listener : mDataUsers) { >+- listener->NotifyInputStopped(aGraph); >+- } >+-} >+- >+-void NativeInputTrack::NotifyInputData(MediaTrackGraphImpl* aGraph, >+- const AudioDataValue* aBuffer, >+- size_t aFrames, TrackRate aRate, >+- uint32_t aChannels, >+- uint32_t aAlreadyBuffered) { >+- MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning()); >+- MOZ_ASSERT(aGraph == mGraph, "Receive input data from another graph"); >+- >+- MOZ_ASSERT(mDataHolder); >+- MOZ_ASSERT(aChannels); >+- if (!mInputChannels) { >+- mInputChannels = aChannels; >+- } >+- mDataHolder->SetInputData(const_cast<AudioDataValue*>(aBuffer), aFrames, >+- aChannels, aRate); >+- for (auto& listener : mDataUsers) { >+- listener->NotifyInputData(aGraph, mDataHolder->mInputData.value(), >+- aAlreadyBuffered); >+- } >+-} >+- >+-void NativeInputTrack::DeviceChanged(MediaTrackGraphImpl* aGraph) { >+- MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning()); >+- MOZ_ASSERT(aGraph == mGraph, >+- "Receive device changed signal from another graph"); >+- MOZ_ASSERT(mDataHolder); >+- mDataHolder->Clear(static_cast<AudioDataBuffers::Scope>( >+- AudioDataBuffers::Scope::Input | AudioDataBuffers::Scope::Output)); >+- for (auto& listener : mDataUsers) { >+- listener->DeviceChanged(aGraph); >+- } >+-} >+- >+ MediaTrackGraphImpl::~MediaTrackGraphImpl() { >+ MOZ_ASSERT(mTracks.IsEmpty() && mSuspendedTracks.IsEmpty(), >+ "All tracks should have been destroyed by messages from the main " >+@@ -430,7 +279,7 @@ bool MediaTrackGraphImpl::AudioTrackPresent() { >+ // XXX For some reason, there are race conditions when starting an audio input >+ // where we find no active audio tracks. In any case, if we have an active >+ // audio input we should not allow a switch back to a SystemClockDriver >+- if (!audioTrackPresent && mDeviceTrackMap.Count() != 0) { >++ if (!audioTrackPresent && mInputDeviceUsers.Count() != 0) { >+ NS_WARNING("No audio tracks, but full-duplex audio is enabled!!!!!"); >+ audioTrackPresent = true; >+ } >+@@ -779,51 +628,23 @@ TrackTime MediaTrackGraphImpl::PlayAudio(AudioMixer* aMixer, >+ return ticksWritten; >+ } >+ >+-ProcessedMediaTrack* MediaTrackGraphImpl::GetDeviceTrack( >+- CubebUtils::AudioDeviceID aID) { >+- MOZ_ASSERT(NS_IsMainThread()); >+- >+- RefPtr<NativeInputTrack>& t = mDeviceTracks.LookupOrInsertWith( >+- aID, [self = RefPtr<MediaTrackGraphImpl>(this), aID] { >+- NativeInputTrack* track = NativeInputTrack::Create(self); >+- LOG(LogLevel::Debug, >+- ("Create NativeInputTrack %p for device %p", track, aID)); >+- return do_AddRef(track); >+- }); >+- >+- return t.get(); >+-} >+- >+ void MediaTrackGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID, >+- AudioDataListener* aListener, >+- NativeInputTrack* aInputTrack) { >++ AudioDataListener* aListener) { >+ MOZ_ASSERT(OnGraphThread()); >+- LOG(LogLevel::Debug, >+- ("%p OpenAudioInputImpl: NativeInputTrack %p for device %p", this, >+- aInputTrack, aID)); >+- >+- if (mDeviceTrackMap.Count() > 0 && !mDeviceTrackMap.Get(aID, nullptr)) { >++ // Only allow one device per MTG (hence, per document), but allow opening a >++ // device multiple times >++ nsTArray<RefPtr<AudioDataListener>>& listeners = >++ mInputDeviceUsers.LookupOrInsert(aID); >++ if (listeners.IsEmpty() && mInputDeviceUsers.Count() > 1) { >+ // We don't support opening multiple input device in a graph for now. >+- LOG(LogLevel::Debug, ("%p Device %p is not native device. Cannot open %p!", >+- this, aID, aInputTrack)); >++ listeners.RemoveElement(aID); >+ return; >+ } >+ >+- LOG(LogLevel::Debug, >+- ("%p Device %p is native device. Open %p", this, aID, aInputTrack)); >+- >+- // Only allow one device per MTG (hence, per document), but allow opening a >+- // device multiple times >+- NativeInputTrack* track = mDeviceTrackMap.LookupOrInsertWith( >+- aID, [inputTrack = RefPtr<NativeInputTrack>(aInputTrack)] { >+- return inputTrack.get(); >+- }); >+- MOZ_ASSERT(track); >+- track->InitDataHolderIfNeeded(); >+- >+- nsTArray<RefPtr<AudioDataListener>>& listeners = track->mDataUsers; >+ MOZ_ASSERT(!listeners.Contains(aListener), "Don't add a listener twice."); >++ >+ listeners.AppendElement(aListener); >++ >+ if (listeners.Length() == 1) { // first open for this device >+ mInputDeviceID = aID; >+ // Switch Drivers since we're adding input (to input-only or full-duplex) >+@@ -844,60 +665,28 @@ nsresult MediaTrackGraphImpl::OpenAudioInput(CubebUtils::AudioDeviceID aID, >+ class Message : public ControlMessage { >+ public: >+ Message(MediaTrackGraphImpl* aGraph, CubebUtils::AudioDeviceID aID, >+- AudioDataListener* aListener, NativeInputTrack* aInputTrack) >++ AudioDataListener* aListener) >+ : ControlMessage(nullptr), >+ mGraph(aGraph), >+ mID(aID), >+- mListener(aListener), >+- mInputTrack(aInputTrack) {} >+- void Run() override { >+- mGraph->OpenAudioInputImpl(mID, mListener, mInputTrack); >+- } >++ mListener(aListener) {} >++ void Run() override { mGraph->OpenAudioInputImpl(mID, mListener); } >+ MediaTrackGraphImpl* mGraph; >+ CubebUtils::AudioDeviceID mID; >+ RefPtr<AudioDataListener> mListener; >+- NativeInputTrack* mInputTrack; >+ }; >+- >+- auto result = mDeviceTracks.Lookup(aID); >+- MOZ_ASSERT(result); >+- MOZ_ASSERT(result.Data()); >+- size_t users = result.Data()->AddUser(); >+- >+- LOG(LogLevel::Debug, >+- ("%p OpenInput: NativeInputTrack %p for device %p has %zu users now", >+- this, result.Data().get(), aID, users)); >+- >+ // XXX Check not destroyed! >+- this->AppendMessage( >+- MakeUnique<Message>(this, aID, aListener, result.Data().get())); >++ this->AppendMessage(MakeUnique<Message>(this, aID, aListener)); >+ return NS_OK; >+ } >+ >+ void MediaTrackGraphImpl::CloseAudioInputImpl(CubebUtils::AudioDeviceID aID, >+- AudioDataListener* aListener, >+- NativeInputTrack* aInputTrack) { >++ AudioDataListener* aListener) { >+ MOZ_ASSERT(OnGraphThread()); >+ >+- LOG(LogLevel::Debug, >+- ("%p CloseAudioInputImpl: NativeInputTrack %p for device %p", this, >+- aInputTrack, aID)); >+- >+- auto result = mDeviceTrackMap.Lookup(aID); >+- if (!result) { >+- LOG(LogLevel::Debug, >+- ("%p Device %p is not native device. Do nothing for %p", this, aID, >+- aInputTrack)); >+- return; >+- } >+- >+- LOG(LogLevel::Debug, >+- ("%p Device %p is native device. Close %p", this, aID, aInputTrack)); >+- >+- NativeInputTrack* track = result.Data(); >+- MOZ_ASSERT(track == aInputTrack); >+- nsTArray<RefPtr<AudioDataListener>>& listeners = track->mDataUsers; >+- bool wasPresent = listeners.RemoveElement(aListener); >++ auto listeners = mInputDeviceUsers.Lookup(aID); >++ MOZ_ASSERT(listeners); >++ bool wasPresent = listeners->RemoveElement(aListener); >+ MOZ_ASSERT(wasPresent); >+ >+ if (wasPresent) { >+@@ -907,22 +696,13 @@ void MediaTrackGraphImpl::CloseAudioInputImpl(CubebUtils::AudioDeviceID aID, >+ // Breaks the cycle between the MTG and the listener. >+ aListener->Disconnect(this); >+ >+- if (!listeners.IsEmpty()) { >+- LOG(LogLevel::Debug, >+- ("%p NativeInputTrack %p for device %p still has consumer", this, track, >+- aID)); >++ if (!listeners->IsEmpty()) { >++ // There is still a consumer for this audio input device >+ return; >+ } >+ >+- LOG(LogLevel::Debug, >+- ("%p NativeInputTrack %p for device %p has no consumer now", this, track, >+- aID)); >+- >+ mInputDeviceID = nullptr; // reset to default >+- >+- bool r = mDeviceTrackMap.Remove(aID); >+- MOZ_ASSERT(r); >+- Unused << r; >++ Unused << mInputDeviceUsers.Remove(aID); >+ >+ // Switch Drivers since we're adding or removing an input (to nothing/system >+ // or output only) >+@@ -989,55 +769,17 @@ void MediaTrackGraphImpl::CloseAudioInput(CubebUtils::AudioDeviceID aID, >+ class Message : public ControlMessage { >+ public: >+ Message(MediaTrackGraphImpl* aGraph, CubebUtils::AudioDeviceID aID, >+- AudioDataListener* aListener, NativeInputTrack* aInputTrack) >++ AudioDataListener* aListener) >+ : ControlMessage(nullptr), >+ mGraph(aGraph), >+ mID(aID), >+- mListener(aListener), >+- mInputTrack(aInputTrack) {} >+- void Run() override { >+- mGraph->CloseAudioInputImpl(mID, mListener, mInputTrack); >+- } >++ mListener(aListener) {} >++ void Run() override { mGraph->CloseAudioInputImpl(mID, mListener); } >+ MediaTrackGraphImpl* mGraph; >+ CubebUtils::AudioDeviceID mID; >+ RefPtr<AudioDataListener> mListener; >+- NativeInputTrack* mInputTrack; >+ }; >+- >+- auto result = mDeviceTracks.Lookup(aID); >+- MOZ_ASSERT(result); >+- MOZ_ASSERT(result.Data()); >+- size_t users = result.Data()->RemoveUser(); >+- >+- LOG(LogLevel::Debug, >+- ("%p: CloseInput: NativeInputTrack %p for device %p has %zu users now", >+- this, result.Data().get(), aID, users)); >+- >+- this->AppendMessage( >+- MakeUnique<Message>(this, aID, aListener, result.Data().get())); >+- >+- // Remove the NativeInputTrack from mDeviceTracks if no AudioInputTrack needs >+- // it, so NativeInputTrack::Create can create a new NativeInputTrack when it's >+- // called for the same aID. The paired value in mDeviceTrackMap will be >+- // removed later in CloseAudioInputImpl. The NativeInputTrack will still be >+- // alive after it's removed from mDeviceTracks since AddTrack called via >+- // NativeInputTrack::Create will call NS_ADDREF to it and it will be alive >+- // until its NS_RELEASE is called via NativeInputTrack::DestroyImpl(). >+- // Note that NativeInputTrack::Destroy() must be called after the above >+- // message is appended so NativeInputTrack::DestroyImpl() will be run after >+- // CloseAudioInputImpl(). Therefore, the NativeInputTrack will be alive before >+- // it's removed from mDeviceTrackMap in CloseAudioInputImpl() >+- if (users == 0) { >+- LOG(LogLevel::Debug, >+- ("%p: CloseInput: NativeInputTrack %p for device %p is removed from " >+- "mDeviceTracks", >+- this, result.Data().get(), aID)); >+- >+- result.Data()->Destroy(); >+- bool r = mDeviceTracks.Remove(aID); >+- MOZ_ASSERT(r); >+- Unused << r; >+- } >++ this->AppendMessage(MakeUnique<Message>(this, aID, aListener)); >+ } >+ >+ // All AudioInput listeners get the same speaker data (at least for now). >+@@ -1049,7 +791,7 @@ void MediaTrackGraphImpl::NotifyOutputData(AudioDataValue* aBuffer, >+ // device. >+ // The absence of an input consumer is enough to know we need to bail out >+ // here. >+- if (!mDeviceTrackMap.Contains(mInputDeviceID)) { >++ if (!mInputDeviceUsers.Contains(mInputDeviceID)) { >+ return; >+ } >+ #else >+@@ -1059,16 +801,14 @@ void MediaTrackGraphImpl::NotifyOutputData(AudioDataValue* aBuffer, >+ #endif >+ // When/if we decide to support multiple input devices per graph, this needs >+ // to loop over them. >+- auto result = mDeviceTrackMap.Lookup(mInputDeviceID); >+- MOZ_ASSERT(result); >+- NativeInputTrack* track = result.Data(); >+- MOZ_ASSERT(track); >+- track->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels); >++ for (auto& listener : *mInputDeviceUsers.Lookup(mInputDeviceID)) { >++ listener->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels); >++ } >+ } >+ >+ void MediaTrackGraphImpl::NotifyInputStopped() { >+ #ifdef ANDROID >+- if (!mDeviceTrackMap.Contains(mInputDeviceID)) { >++ if (!mInputDeviceUsers.Contains(mInputDeviceID)) { >+ return; >+ } >+ #else >+@@ -1076,11 +816,9 @@ void MediaTrackGraphImpl::NotifyInputStopped() { >+ return; >+ } >+ #endif >+- auto result = mDeviceTrackMap.Lookup(mInputDeviceID); >+- MOZ_ASSERT(result); >+- NativeInputTrack* track = result.Data(); >+- MOZ_ASSERT(track); >+- track->NotifyInputStopped(this); >++ for (auto& listener : *mInputDeviceUsers.Lookup(mInputDeviceID)) { >++ listener->NotifyInputStopped(this); >++ } >+ } >+ >+ void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer, >+@@ -1088,7 +826,7 @@ void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer, >+ uint32_t aChannels, >+ uint32_t aAlreadyBuffered) { >+ #ifdef ANDROID >+- if (!mDeviceTrackMap.Contains(mInputDeviceID)) { >++ if (!mInputDeviceUsers.Contains(mInputDeviceID)) { >+ return; >+ } >+ #else >+@@ -1100,18 +838,17 @@ void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer, >+ return; >+ } >+ #endif >+- auto result = mDeviceTrackMap.Lookup(mInputDeviceID); >+- MOZ_ASSERT(result); >+- NativeInputTrack* track = result.Data(); >+- MOZ_ASSERT(track); >+- track->NotifyInputData(this, aBuffer, aFrames, aRate, aChannels, >+- aAlreadyBuffered); >++ for (auto& listener : *mInputDeviceUsers.Lookup(mInputDeviceID)) { >++ listener->NotifyInputData(this, aBuffer, aFrames, aRate, aChannels, >++ aAlreadyBuffered); >++ } >+ } >+ >+ void MediaTrackGraphImpl::DeviceChangedImpl() { >+ MOZ_ASSERT(OnGraphThread()); >++ >+ #ifdef ANDROID >+- if (!mDeviceTrackMap.Contains(mInputDeviceID)) { >++ if (!mInputDeviceUsers.Contains(mInputDeviceID)) { >+ return; >+ } >+ #else >+@@ -1119,11 +856,10 @@ void MediaTrackGraphImpl::DeviceChangedImpl() { >+ return; >+ } >+ #endif >+- auto result = mDeviceTrackMap.Lookup(mInputDeviceID); >+- MOZ_ASSERT(result); >+- NativeInputTrack* track = result.Data(); >+- MOZ_ASSERT(track); >+- track->DeviceChanged(this); >++ >++ for (auto& listener : *mInputDeviceUsers.Lookup(mInputDeviceID)) { >++ listener->DeviceChanged(this); >++ } >+ } >+ >+ void MediaTrackGraphImpl::SetMaxOutputChannelCount(uint32_t aMaxChannelCount) { >+@@ -1361,10 +1097,6 @@ void MediaTrackGraphImpl::ProduceDataForTracksBlockByBlock( >+ for (uint32_t i = aTrackIndex; i < mTracks.Length(); ++i) { >+ ProcessedMediaTrack* pt = mTracks[i]->AsProcessedTrack(); >+ if (pt) { >+- if (pt->AsNativeInputTrack()) { >+- // NativeInputTracks are processed in Process. Skip them. >+- continue; >+- } >+ pt->ProcessInput( >+ mProcessedTime, next, >+ (next == mStateComputedTime) ? ProcessedMediaTrack::ALLOW_END : 0); >+@@ -1509,23 +1241,12 @@ void MediaTrackGraphImpl::Process(AudioMixer* aMixer) { >+ bool doneAllProducing = false; >+ const GraphTime oldProcessedTime = mProcessedTime; >+ >+- // Process NativeInputTracks first since they are data source of other tracks >+- for (uint32_t i = 0; i < mTracks.Length(); ++i) { >+- NativeInputTrack* track = mTracks[i]->AsNativeInputTrack(); >+- if (track) { >+- track->ProcessInput(mProcessedTime, mStateComputedTime, >+- ProcessedMediaTrack::ALLOW_END); >+- } >+- } >+- >+ // Figure out what each track wants to do >+ for (uint32_t i = 0; i < mTracks.Length(); ++i) { >+ MediaTrack* track = mTracks[i]; >+ if (!doneAllProducing) { >+ ProcessedMediaTrack* pt = track->AsProcessedTrack(); >+- // NativeInputTrack::ProcessInput are called above so we can skip them >+- bool isNativeInputTrack = track->AsNativeInputTrack(); >+- if (pt && !isNativeInputTrack) { >++ if (pt) { >+ AudioNodeTrack* n = track->AsAudioNodeTrack(); >+ if (n) { >+ #ifdef DEBUG >+diff --git dom/media/MediaTrackGraph.h dom/media/MediaTrackGraph.h >+index d85cb502b9..1c34060abb 100644 >+--- dom/media/MediaTrackGraph.h >++++ dom/media/MediaTrackGraph.h >+@@ -97,7 +97,6 @@ class MediaTrack; >+ class MediaTrackGraph; >+ class MediaTrackGraphImpl; >+ class MediaTrackListener; >+-class NativeInputTrack; >+ class ProcessedMediaTrack; >+ class SourceMediaTrack; >+ >+@@ -107,14 +106,6 @@ class AudioDataListenerInterface { >+ virtual ~AudioDataListenerInterface() = default; >+ >+ public: >+- // Information for the interleaved buffer coming from the audio callbacks >+- struct BufferInfo { >+- AudioDataValue* mBuffer = nullptr; >+- size_t mFrames = 0; >+- uint32_t mChannels = 0; >+- TrackRate mRate = 0; >+- }; >+- >+ /* These are for cubeb audio input & output streams: */ >+ /** >+ * Output data to speakers, for use as the "far-end" data for echo >+@@ -122,7 +113,8 @@ class AudioDataListenerInterface { >+ * chunks. >+ */ >+ virtual void NotifyOutputData(MediaTrackGraphImpl* aGraph, >+- BufferInfo aInfo) = 0; >++ AudioDataValue* aBuffer, size_t aFrames, >++ TrackRate aRate, uint32_t aChannels) = 0; >+ /** >+ * An AudioCallbackDriver with an input stream signaling that it has stopped >+ * for any reason and the AudioDataListener will not be notified of input data >+@@ -134,7 +126,8 @@ class AudioDataListenerInterface { >+ * guaranteed to be in any particular size chunks. >+ */ >+ virtual void NotifyInputData(MediaTrackGraphImpl* aGraph, >+- const BufferInfo aInfo, >++ const AudioDataValue* aBuffer, size_t aFrames, >++ TrackRate aRate, uint32_t aChannels, >+ uint32_t aAlreadyBuffered) = 0; >+ >+ /** >+@@ -389,7 +382,6 @@ class MediaTrack : public mozilla::LinkedListElement<MediaTrack> { >+ virtual ForwardedInputTrack* AsForwardedInputTrack() { return nullptr; } >+ virtual CrossGraphTransmitter* AsCrossGraphTransmitter() { return nullptr; } >+ virtual CrossGraphReceiver* AsCrossGraphReceiver() { return nullptr; } >+- virtual NativeInputTrack* AsNativeInputTrack() { return nullptr; } >+ >+ // These Impl methods perform the core functionality of the control methods >+ // above, on the media graph thread. >+diff --git dom/media/MediaTrackGraphImpl.h dom/media/MediaTrackGraphImpl.h >+index 8baeb25a7d..d0c3ac8499 100644 >+--- dom/media/MediaTrackGraphImpl.h >++++ dom/media/MediaTrackGraphImpl.h >+@@ -11,7 +11,6 @@ >+ #include "AudioMixer.h" >+ #include "GraphDriver.h" >+ #include "mozilla/Atomics.h" >+-#include "mozilla/Maybe.h" >+ #include "mozilla/Monitor.h" >+ #include "mozilla/TimeStamp.h" >+ #include "mozilla/UniquePtr.h" >+@@ -35,79 +34,6 @@ template <typename T> >+ class LinkedList; >+ class GraphRunner; >+ >+-// MediaTrack subclass storing the raw audio data from microphone. >+-class NativeInputTrack : public ProcessedMediaTrack { >+- ~NativeInputTrack() = default; >+- explicit NativeInputTrack(TrackRate aSampleRate) >+- : ProcessedMediaTrack(aSampleRate, MediaSegment::AUDIO, >+- new AudioSegment()) {} >+- >+- public: >+- // Main Thread API >+- static NativeInputTrack* Create(MediaTrackGraphImpl* aGraph); >+- >+- size_t AddUser(); >+- size_t RemoveUser(); >+- >+- // Graph Thread APIs, for ProcessedMediaTrack >+- void DestroyImpl() override; >+- void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; >+- uint32_t NumberOfChannels() const override; >+- >+- // Graph thread APIs: Redirect calls from GraphDriver to mDataUsers >+- void NotifyOutputData(MediaTrackGraphImpl* aGraph, AudioDataValue* aBuffer, >+- size_t aFrames, TrackRate aRate, uint32_t aChannels); >+- void NotifyInputStopped(MediaTrackGraphImpl* aGraph); >+- void NotifyInputData(MediaTrackGraphImpl* aGraph, >+- const AudioDataValue* aBuffer, size_t aFrames, >+- TrackRate aRate, uint32_t aChannels, >+- uint32_t aAlreadyBuffered); >+- void DeviceChanged(MediaTrackGraphImpl* aGraph); >+- >+- // Other Graph Thread APIs >+- void InitDataHolderIfNeeded(); >+- >+- // Any thread >+- NativeInputTrack* AsNativeInputTrack() override { return this; } >+- >+- public: >+- // Only accessed on the graph thread. >+- nsTArray<RefPtr<AudioDataListener>> mDataUsers; >+- >+- private: >+- class AudioDataBuffers { >+- public: >+- AudioDataBuffers() = default; >+- void SetOutputData(AudioDataValue* aBuffer, size_t aFrames, >+- uint32_t aChannels, TrackRate aRate); >+- void SetInputData(AudioDataValue* aBuffer, size_t aFrames, >+- uint32_t aChannels, TrackRate aRate); >+- >+- enum Scope : unsigned char { >+- Input = 0x01, >+- Output = 0x02, >+- }; >+- void Clear(Scope aScope); >+- >+- typedef AudioDataListenerInterface::BufferInfo BufferInfo; >+- // Storing the audio output data coming from NotifyOutputData >+- Maybe<BufferInfo> mOutputData; >+- // Storing the audio input data coming from NotifyInputData >+- Maybe<BufferInfo> mInputData; >+- }; >+- >+- // Only accessed on the graph thread. >+- // Storing the audio data coming from GraphDriver directly. >+- Maybe<AudioDataBuffers> mDataHolder; >+- >+- // Only accessed on the graph thread. >+- uint32_t mInputChannels = 0; >+- >+- // Only accessed on the main thread. >+- // When this becomes zero, this NativeInputTrack is no longer needed. >+- int32_t mUserCount = 0; >+-}; >+- >+ /** >+ * A per-track update message passed from the media graph thread to the >+ * main thread. >+@@ -462,28 +388,20 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ }; >+ TrackTime PlayAudio(AudioMixer* aMixer, const TrackKeyAndVolume& aTkv, >+ GraphTime aPlayedTime); >+- >+- /* Called on the main thread when AudioInputTrack requests audio data from an >+- * input device aID. */ >+- ProcessedMediaTrack* GetDeviceTrack(CubebUtils::AudioDeviceID aID); >+- >+ /* Runs off a message on the graph thread when something requests audio from >+ * an input audio device of ID aID, and delivers the input audio frames to >+ * aListener. */ >+ void OpenAudioInputImpl(CubebUtils::AudioDeviceID aID, >+- AudioDataListener* aListener, >+- NativeInputTrack* aInputTrack); >++ AudioDataListener* aListener); >+ /* Called on the main thread when something requests audio from an input >+ * audio device aID. */ >+ virtual nsresult OpenAudioInput(CubebUtils::AudioDeviceID aID, >+ AudioDataListener* aListener) override; >+- >+ /* Runs off a message on the graph when input audio from aID is not needed >+ * anymore, for a particular track. It can be that other tracks still need >+ * audio from this audio input device. */ >+ void CloseAudioInputImpl(CubebUtils::AudioDeviceID aID, >+- AudioDataListener* aListener, >+- NativeInputTrack* aInputTrack); >++ AudioDataListener* aListener); >+ /* Called on the main thread when input audio from aID is not needed >+ * anymore, for a particular track. It can be that other tracks still need >+ * audio from this audio input device. */ >+@@ -578,12 +496,12 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ MOZ_ASSERT(OnGraphThreadOrNotRunning()); >+ >+ #ifdef ANDROID >+- if (!mDeviceTrackMap.Contains(mInputDeviceID)) { >++ if (!mInputDeviceUsers.Contains(mInputDeviceID)) { >+ return 0; >+ } >+ #else >+ if (!mInputDeviceID) { >+- MOZ_ASSERT(mDeviceTrackMap.Count() == 0, >++ MOZ_ASSERT(mInputDeviceUsers.Count() == 0, >+ "If running on a platform other than android," >+ "an explicit device id should be present"); >+ return 0; >+@@ -592,12 +510,7 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ uint32_t maxInputChannels = 0; >+ // When/if we decide to support multiple input device per graph, this needs >+ // loop over them. >+- auto result = mDeviceTrackMap.Lookup(mInputDeviceID); >+- MOZ_ASSERT(result); >+- if (!result) { >+- return maxInputChannels; >+- } >+- for (const auto& listener : result.Data()->mDataUsers) { >++ for (const auto& listener : *mInputDeviceUsers.Lookup(mInputDeviceID)) { >+ maxInputChannels = std::max(maxInputChannels, >+ listener->RequestedInputChannelCount(this)); >+ } >+@@ -607,8 +520,8 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ AudioInputType AudioInputDevicePreference() { >+ MOZ_ASSERT(OnGraphThreadOrNotRunning()); >+ >+- auto result = mDeviceTrackMap.Lookup(mInputDeviceID); >+- if (!result) { >++ auto listeners = mInputDeviceUsers.Lookup(mInputDeviceID); >++ if (!listeners) { >+ return AudioInputType::Unknown; >+ } >+ bool voiceInput = false; >+@@ -617,7 +530,7 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ >+ // If at least one track is considered to be voice, >+ // XXX This could use short-circuit evaluation resp. std::any_of. >+- for (const auto& listener : result.Data()->mDataUsers) { >++ for (const auto& listener : *listeners) { >+ voiceInput |= listener->IsVoiceInput(this); >+ } >+ if (voiceInput) { >+@@ -842,7 +755,7 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ /** >+ * Devices to use for cubeb input & output, or nullptr for default device. >+ * A MediaTrackGraph always has an output (even if silent). >+- * If `mDeviceTrackMap.Count() != 0`, this MediaTrackGraph wants audio >++ * If `mInputDeviceUsers.Count() != 0`, this MediaTrackGraph wants audio >+ * input. >+ * >+ * All mInputDeviceID access is on the graph thread except for reads via >+@@ -853,13 +766,12 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ */ >+ std::atomic<CubebUtils::AudioDeviceID> mInputDeviceID; >+ CubebUtils::AudioDeviceID mOutputDeviceID; >+- >+- // Maps AudioDeviceID to a device track that delivers audio input/output >+- // data and send device-changed signals to its listeners. This is only >+- // touched on the graph thread. The NativeInputTrack* here is used for >+- // for bookkeeping on the graph thread. The owner of the NativeInputTrack is >+- // mDeviceTracks, which is only touched by main thread. >+- nsTHashMap<CubebUtils::AudioDeviceID, NativeInputTrack*> mDeviceTrackMap; >++ // Maps AudioDeviceID to an array of their users (that are listeners). This is >++ // used to deliver audio input frames and to notify the listeners that the >++ // audio device that delivers the audio frames has changed. >++ // This is only touched on the graph thread. >++ nsTHashMap<nsVoidPtrHashKey, nsTArray<RefPtr<AudioDataListener>>> >++ mInputDeviceUsers; >+ >+ /** >+ * List of resume operations waiting for a switch to an AudioCallbackDriver. >+@@ -1131,11 +1043,6 @@ class MediaTrackGraphImpl : public MediaTrackGraph, >+ * ctor, and the read/write only on the graph thread. >+ */ >+ uint32_t mMaxOutputChannelCount; >+- >+- /* >+- * Hold the NativeInputTrack for a certain device >+- */ >+- nsTHashMap<CubebUtils::AudioDeviceID, RefPtr<NativeInputTrack>> mDeviceTracks; >+ }; >+ >+ } // namespace mozilla >+diff --git dom/media/gtest/TestAudioInputProcessing.cpp dom/media/gtest/TestAudioInputProcessing.cpp >+index ed5d14fcb8..c78a56080a 100644 >+--- dom/media/gtest/TestAudioInputProcessing.cpp >++++ dom/media/gtest/TestAudioInputProcessing.cpp >+@@ -70,11 +70,8 @@ TEST(TestAudioInputProcessing, UnaccountedPacketizerBuffering) >+ processedTime = 0; >+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(nrFrames); >+ generator.GenerateInterleaved(buffer.Elements(), nrFrames); >+- aip->NotifyInputData(graph, >+- AudioInputProcessing::BufferInfo{ >+- buffer.Elements(), nrFrames, channels, rate}, >++ aip->NotifyInputData(graph, buffer.Elements(), nrFrames, rate, channels, >+ nextTime - nrFrames); >+- aip->ProcessInput(graph, nullptr); >+ aip->Pull(graph, processedTime, nextTime, segment.GetDuration(), &segment, >+ true, &ended); >+ EXPECT_EQ(aip->NumBufferedFrames(graph), 24U); >+@@ -90,11 +87,8 @@ TEST(TestAudioInputProcessing, UnaccountedPacketizerBuffering) >+ processedTime = nextTime; >+ nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(2 * nrFrames); >+ generator.GenerateInterleaved(buffer.Elements(), nrFrames); >+- aip->NotifyInputData(graph, >+- AudioInputProcessing::BufferInfo{ >+- buffer.Elements(), nrFrames, channels, rate}, >++ aip->NotifyInputData(graph, buffer.Elements(), nrFrames, rate, channels, >+ nextTime - (2 * nrFrames)); >+- aip->ProcessInput(graph, nullptr); >+ aip->Pull(graph, processedTime, nextTime, segment.GetDuration(), &segment, >+ true, &ended); >+ EXPECT_EQ(aip->NumBufferedFrames(graph), 120U); >+diff --git dom/media/webrtc/MediaEngineWebRTCAudio.cpp dom/media/webrtc/MediaEngineWebRTCAudio.cpp >+index fb59fc195f..77da55c0a1 100644 >+--- dom/media/webrtc/MediaEngineWebRTCAudio.cpp >++++ dom/media/webrtc/MediaEngineWebRTCAudio.cpp >+@@ -867,20 +867,21 @@ void AudioInputProcessing::Pull(MediaTrackGraphImpl* aGraph, GraphTime aFrom, >+ } >+ >+ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, >+- BufferInfo aInfo) { >++ AudioDataValue* aBuffer, >++ size_t aFrames, TrackRate aRate, >++ uint32_t aChannels) { >+ MOZ_ASSERT(aGraph->OnGraphThread()); >+ MOZ_ASSERT(mEnabled); >+ >+- if (!mPacketizerOutput || >+- mPacketizerOutput->mPacketSize != aInfo.mRate / 100u || >+- mPacketizerOutput->mChannels != aInfo.mChannels) { >++ if (!mPacketizerOutput || mPacketizerOutput->mPacketSize != aRate / 100u || >++ mPacketizerOutput->mChannels != aChannels) { >+ // It's ok to drop the audio still in the packetizer here: if this changes, >+ // we changed devices or something. >+ mPacketizerOutput = MakeUnique<AudioPacketizer<AudioDataValue, float>>( >+- aInfo.mRate / 100, aInfo.mChannels); >++ aRate / 100, aChannels); >+ } >+ >+- mPacketizerOutput->Input(aInfo.mBuffer, aInfo.mFrames); >++ mPacketizerOutput->Input(aBuffer, aFrames); >+ >+ while (mPacketizerOutput->PacketsAvailable()) { >+ uint32_t samplesPerPacket = >+@@ -899,11 +900,11 @@ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, >+ uint32_t channelCountFarend = 0; >+ uint32_t framesPerPacketFarend = 0; >+ >+- // Downmix from aInfo.mChannels to MAX_CHANNELS if needed. We always have >+- // floats here, the packetized performed the conversion. >+- if (aInfo.mChannels > MAX_CHANNELS) { >++ // Downmix from aChannels to MAX_CHANNELS if needed. We always have floats >++ // here, the packetized performed the conversion. >++ if (aChannels > MAX_CHANNELS) { >+ AudioConverter converter( >+- AudioConfig(aInfo.mChannels, 0, AudioConfig::FORMAT_FLT), >++ AudioConfig(aChannels, 0, AudioConfig::FORMAT_FLT), >+ AudioConfig(MAX_CHANNELS, 0, AudioConfig::FORMAT_FLT)); >+ framesPerPacketFarend = mPacketizerOutput->mPacketSize; >+ framesPerPacketFarend = >+@@ -913,9 +914,9 @@ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, >+ deinterleavedPacketDataChannelPointers.SetLength(MAX_CHANNELS); >+ } else { >+ interleavedFarend = packet; >+- channelCountFarend = aInfo.mChannels; >++ channelCountFarend = aChannels; >+ framesPerPacketFarend = mPacketizerOutput->mPacketSize; >+- deinterleavedPacketDataChannelPointers.SetLength(aInfo.mChannels); >++ deinterleavedPacketDataChannelPointers.SetLength(aChannels); >+ } >+ >+ MOZ_ASSERT(interleavedFarend && >+@@ -941,7 +942,7 @@ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, >+ >+ // Having the same config for input and output means we potentially save >+ // some CPU. >+- StreamConfig inputConfig(aInfo.mRate, channelCountFarend, false); >++ StreamConfig inputConfig(aRate, channelCountFarend, false); >+ StreamConfig outputConfig = inputConfig; >+ >+ // Passing the same pointers here saves a copy inside this function. >+@@ -1078,34 +1079,45 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraphImpl* aGraph, >+ } >+ } >+ >+-void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph, >+- const AudioSegment* aSegment) { >+- MOZ_ASSERT(aGraph); >+- MOZ_ASSERT(aGraph->OnGraphThread()); >+- >+- if (mEnded || !mEnabled || !mLiveFramesAppended || !mInputData) { >++template <typename T> >++void AudioInputProcessing::InsertInGraph(MediaTrackGraphImpl* aGraph, >++ const T* aBuffer, size_t aFrames, >++ uint32_t aChannels) { >++ if (mEnded) { >+ return; >+ } >+ >+- // One NotifyInputData might have multiple following ProcessInput calls, but >+- // we only process one input per NotifyInputData call. >+- BufferInfo inputInfo = mInputData.extract(); >++ MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); >+ >+- // If some processing is necessary, packetize and insert in the WebRTC.org >+- // code. Otherwise, directly insert the mic data in the MTG, bypassing all >+- // processing. >+- if (PassThrough(aGraph)) { >+- if (aSegment) { >+- mSegment.AppendSegment(aSegment, mPrincipal); >+- } else { >+- mSegment.AppendFromInterleavedBuffer(inputInfo.mBuffer, inputInfo.mFrames, >+- inputInfo.mChannels, mPrincipal); >+- } >++ CheckedInt<size_t> bufferSize(sizeof(T)); >++ bufferSize *= aFrames; >++ bufferSize *= aChannels; >++ RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); >++ AutoTArray<const T*, 8> channels; >++ if (aChannels == 1) { >++ PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); >++ channels.AppendElement(static_cast<T*>(buffer->Data())); >+ } else { >+- MOZ_ASSERT(aGraph->GraphRate() == inputInfo.mRate); >+- PacketizeAndProcess(aGraph, inputInfo.mBuffer, inputInfo.mFrames, >+- inputInfo.mRate, inputInfo.mChannels); >++ channels.SetLength(aChannels); >++ AutoTArray<T*, 8> write_channels; >++ write_channels.SetLength(aChannels); >++ T* samples = static_cast<T*>(buffer->Data()); >++ >++ size_t offset = 0; >++ for (uint32_t i = 0; i < aChannels; ++i) { >++ channels[i] = write_channels[i] = samples + offset; >++ offset += aFrames; >++ } >++ >++ DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, >++ write_channels.Elements()); >+ } >++ >++ LOG_FRAME("AudioInputProcessing %p Appending %zu frames of raw audio", this, >++ aFrames); >++ >++ MOZ_ASSERT(aChannels == channels.Length()); >++ mSegment.AppendFrames(buffer.forget(), channels, aFrames, mPrincipal); >+ } >+ >+ void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { >+@@ -1119,13 +1131,14 @@ void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { >+ if (mPacketizerInput) { >+ mPacketizerInput->Clear(); >+ } >+- mInputData.take(); >+ } >+ >+ // Called back on GraphDriver thread! >+ // Note this can be called back after ::Stop() >+ void AudioInputProcessing::NotifyInputData(MediaTrackGraphImpl* aGraph, >+- const BufferInfo aInfo, >++ const AudioDataValue* aBuffer, >++ size_t aFrames, TrackRate aRate, >++ uint32_t aChannels, >+ uint32_t aAlreadyBuffered) { >+ MOZ_ASSERT(aGraph->OnGraphThread()); >+ TRACE(); >+@@ -1139,7 +1152,14 @@ void AudioInputProcessing::NotifyInputData(MediaTrackGraphImpl* aGraph, >+ mLiveBufferingAppended = aAlreadyBuffered; >+ } >+ >+- mInputData = Some(aInfo); >++ // If some processing is necessary, packetize and insert in the WebRTC.org >++ // code. Otherwise, directly insert the mic data in the MTG, bypassing all >++ // processing. >++ if (PassThrough(aGraph)) { >++ InsertInGraph<AudioDataValue>(aGraph, aBuffer, aFrames, aChannels); >++ } else { >++ PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels); >++ } >+ } >+ >+ #define ResetProcessingIfNeeded(_processing) \ >+@@ -1173,7 +1193,6 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraphImpl* aGraph) { >+ void AudioInputProcessing::End() { >+ mEnded = true; >+ mSegment.Clear(); >+- mInputData.take(); >+ } >+ >+ TrackTime AudioInputProcessing::NumBufferedFrames( >+@@ -1230,28 +1249,6 @@ void AudioInputTrack::DestroyImpl() { >+ void AudioInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, >+ uint32_t aFlags) { >+ TRACE_COMMENT("AudioInputTrack %p", this); >+- >+- // Check if there is a connected NativeInputTrack >+- NativeInputTrack* source = nullptr; >+- if (!mInputs.IsEmpty()) { >+- for (const MediaInputPort* input : mInputs) { >+- MOZ_ASSERT(input->GetSource()); >+- if (input->GetSource()->AsNativeInputTrack()) { >+- source = input->GetSource()->AsNativeInputTrack(); >+- break; >+- } >+- } >+- } >+- >+- // Push the input data from the connected NativeInputTrack to mInputProcessing >+- if (source) { >+- MOZ_ASSERT(source->GraphImpl() == GraphImpl()); >+- MOZ_ASSERT(source->mSampleRate == mSampleRate); >+- MOZ_ASSERT(GraphImpl()->GraphRate() == mSampleRate); >+- mInputProcessing->ProcessInput(GraphImpl(), >+- source->GetData<AudioSegment>()); >+- } >+- >+ bool ended = false; >+ mInputProcessing->Pull( >+ GraphImpl(), aFrom, aTo, TrackTimeToGraphTime(GetEnd()), >+@@ -1275,10 +1272,6 @@ nsresult AudioInputTrack::OpenAudioInput(CubebUtils::AudioDeviceID aId, >+ MOZ_ASSERT(!mInputListener); >+ MOZ_ASSERT(mDeviceId.isNothing()); >+ mInputListener = aListener; >+- ProcessedMediaTrack* input = GraphImpl()->GetDeviceTrack(aId); >+- MOZ_ASSERT(input); >+- LOG("Open device %p (InputTrack=%p) for Mic source %p", aId, input, this); >+- mPort = AllocateInputPort(input); >+ mDeviceId.emplace(aId); >+ return GraphImpl()->OpenAudioInput(aId, aListener); >+ } >+@@ -1289,11 +1282,7 @@ void AudioInputTrack::CloseAudioInput() { >+ if (!mInputListener) { >+ return; >+ } >+- MOZ_ASSERT(mPort); >+ MOZ_ASSERT(mDeviceId.isSome()); >+- LOG("Close device %p (InputTrack=%p) for Mic source %p ", mDeviceId.value(), >+- mPort->GetSource(), this); >+- mPort->Destroy(); >+ GraphImpl()->CloseAudioInput(mDeviceId.extract(), mInputListener); >+ mInputListener = nullptr; >+ } >+diff --git dom/media/webrtc/MediaEngineWebRTCAudio.h dom/media/webrtc/MediaEngineWebRTCAudio.h >+index 46a66d9a7a..9065b28f7a 100644 >+--- dom/media/webrtc/MediaEngineWebRTCAudio.h >++++ dom/media/webrtc/MediaEngineWebRTCAudio.h >+@@ -141,9 +141,13 @@ class AudioInputProcessing : public AudioDataListener { >+ GraphTime aTrackEnd, AudioSegment* aSegment, >+ bool aLastPullThisIteration, bool* aEnded); >+ >+- void NotifyOutputData(MediaTrackGraphImpl* aGraph, BufferInfo aInfo) override; >++ void NotifyOutputData(MediaTrackGraphImpl* aGraph, AudioDataValue* aBuffer, >++ size_t aFrames, TrackRate aRate, >++ uint32_t aChannels) override; >+ void NotifyInputStopped(MediaTrackGraphImpl* aGraph) override; >+- void NotifyInputData(MediaTrackGraphImpl* aGraph, const BufferInfo aInfo, >++ void NotifyInputData(MediaTrackGraphImpl* aGraph, >++ const AudioDataValue* aBuffer, size_t aFrames, >++ TrackRate aRate, uint32_t aChannels, >+ uint32_t aAlreadyBuffered) override; >+ bool IsVoiceInput(MediaTrackGraphImpl* aGraph) const override { >+ // If we're passing data directly without AEC or any other process, this >+@@ -163,8 +167,9 @@ class AudioInputProcessing : public AudioDataListener { >+ >+ void Disconnect(MediaTrackGraphImpl* aGraph) override; >+ >+- // aSegment stores the unprocessed non-interleaved audio input data from mic >+- void ProcessInput(MediaTrackGraphImpl* aGraph, const AudioSegment* aSegment); >++ template <typename T> >++ void InsertInGraph(MediaTrackGraphImpl* aGraph, const T* aBuffer, >++ size_t aFrames, uint32_t aChannels); >+ >+ void PacketizeAndProcess(MediaTrackGraphImpl* aGraph, >+ const AudioDataValue* aBuffer, size_t aFrames, >+@@ -242,8 +247,6 @@ class AudioInputProcessing : public AudioDataListener { >+ bool mEnabled; >+ // Whether or not we've ended and removed the AudioInputTrack. >+ bool mEnded; >+- // Store the unprocessed interleaved audio input data >+- Maybe<BufferInfo> mInputData; >+ }; >+ >+ // MediaTrack subclass tailored for MediaEngineWebRTCMicrophoneSource. >+@@ -251,10 +254,6 @@ class AudioInputTrack : public ProcessedMediaTrack { >+ // Only accessed on the graph thread. >+ RefPtr<AudioInputProcessing> mInputProcessing; >+ >+- // Only accessed on the main thread. Link to the track producing raw audio >+- // input data. Graph thread should use mInputs to get the source >+- RefPtr<MediaInputPort> mPort; >+- >+ // Only accessed on the main thread. Used for bookkeeping on main thread, such >+ // that CloseAudioInput can be idempotent. >+ // XXX Should really be a CubebUtils::AudioDeviceID, but they aren't >+-- >+2.32.0 >+ >-- >2.32.0 >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Flags:
2khramtsov
:
maintainer-approval?
Actions:
View
|
Diff
Attachments on
bug 257639
: 227770 |
227789