Added
Link Here
|
0 |
- |
1 |
From 80112dfa8aef0f6391e629c3b1e67c6bb1d8554c Mon Sep 17 00:00:00 2001 |
|
|
2 |
From: Evgeniy Khramtsov <evgeniy@khramtsov.org> |
3 |
Date: Thu, 9 Sep 2021 17:52:48 +0300 |
4 |
Subject: [PATCH] Revert bug 1702646 |
5 |
|
6 |
Revert changesets: |
7 |
https://github.com/mozilla/gecko-dev/commit/0a9065018a56a7812b15411051143c2c8f7b1a08 |
8 |
https://github.com/mozilla/gecko-dev/commit/d2f2ea20bbc9e1e7bc6858ea19d689624f27055a |
9 |
https://github.com/mozilla/gecko-dev/commit/c95e8979b6b673658cdc51313d01db388bc3ff5d |
10 |
https://github.com/mozilla/gecko-dev/commit/1f72372ece815dd5c59e9a34ca0754ea84124713 |
11 |
|
12 |
1f* is the first bad changeset, it cannot be reverted alone cleanly. |
13 |
|
14 |
See: |
15 |
https://bugzilla.mozilla.org/show_bug.cgi?id=1702646 |
16 |
https://bugzilla.mozilla.org/show_bug.cgi?id=1725810 |
17 |
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=257639 |
18 |
--- |
19 |
dom/media/AudioSegment.h | 45 ------- |
20 |
dom/media/MediaTrackGraph.cpp | 63 +++------- |
21 |
dom/media/MediaTrackGraph.h | 14 +-- |
22 |
dom/media/MediaTrackGraphImpl.h | 35 ++++-- |
23 |
dom/media/gtest/TestAudioInputProcessing.cpp | 10 +- |
24 |
dom/media/webrtc/MediaEngineWebRTCAudio.cpp | 119 +++++++++---------- |
25 |
dom/media/webrtc/MediaEngineWebRTCAudio.h | 15 ++- |
26 |
7 files changed, 116 insertions(+), 185 deletions(-) |
27 |
|
28 |
diff --git dom/media/AudioSegment.h dom/media/AudioSegment.h |
29 |
index 4ec71c92bc..a0aee574e5 100644 |
30 |
--- dom/media/AudioSegment.h |
31 |
+++ dom/media/AudioSegment.h |
32 |
@@ -377,51 +377,6 @@ class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> { |
33 |
chunk->mBufferFormat = AUDIO_FORMAT_S16; |
34 |
chunk->mPrincipalHandle = aPrincipalHandle; |
35 |
} |
36 |
- void AppendSegment(const AudioSegment* aSegment, |
37 |
- const PrincipalHandle& aPrincipalHandle) { |
38 |
- MOZ_ASSERT(aSegment); |
39 |
- |
40 |
- for (const AudioChunk& c : aSegment->mChunks) { |
41 |
- AudioChunk* chunk = AppendChunk(c.GetDuration()); |
42 |
- chunk->mBuffer = c.mBuffer; |
43 |
- chunk->mChannelData = c.mChannelData; |
44 |
- chunk->mBufferFormat = c.mBufferFormat; |
45 |
- chunk->mPrincipalHandle = aPrincipalHandle; |
46 |
- } |
47 |
- } |
48 |
- template <typename T> |
49 |
- void AppendFromInterleavedBuffer(const T* aBuffer, size_t aFrames, |
50 |
- uint32_t aChannels, |
51 |
- const PrincipalHandle& aPrincipalHandle) { |
52 |
- MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); |
53 |
- |
54 |
- CheckedInt<size_t> bufferSize(sizeof(T)); |
55 |
- bufferSize *= aFrames; |
56 |
- bufferSize *= aChannels; |
57 |
- RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); |
58 |
- AutoTArray<const T*, 8> channels; |
59 |
- if (aChannels == 1) { |
60 |
- PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); |
61 |
- channels.AppendElement(static_cast<T*>(buffer->Data())); |
62 |
- } else { |
63 |
- channels.SetLength(aChannels); |
64 |
- AutoTArray<T*, 8> writeChannels; |
65 |
- writeChannels.SetLength(aChannels); |
66 |
- T* samples = static_cast<T*>(buffer->Data()); |
67 |
- |
68 |
- size_t offset = 0; |
69 |
- for (uint32_t i = 0; i < aChannels; ++i) { |
70 |
- channels[i] = writeChannels[i] = samples + offset; |
71 |
- offset += aFrames; |
72 |
- } |
73 |
- |
74 |
- DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, |
75 |
- writeChannels.Elements()); |
76 |
- } |
77 |
- |
78 |
- MOZ_ASSERT(aChannels == channels.Length()); |
79 |
- AppendFrames(buffer.forget(), channels, aFrames, aPrincipalHandle); |
80 |
- } |
81 |
// Consumes aChunk, and returns a pointer to the persistent copy of aChunk |
82 |
// in the segment. |
83 |
AudioChunk* AppendAndConsumeChunk(AudioChunk&& aChunk) { |
84 |
diff --git dom/media/MediaTrackGraph.cpp dom/media/MediaTrackGraph.cpp |
85 |
index 3b5d376028..d97eb12b90 100644 |
86 |
--- dom/media/MediaTrackGraph.cpp |
87 |
+++ dom/media/MediaTrackGraph.cpp |
88 |
@@ -62,27 +62,23 @@ LazyLogModule gMediaTrackGraphLog("MediaTrackGraph"); |
89 |
*/ |
90 |
static nsTHashMap<nsUint32HashKey, MediaTrackGraphImpl*> gGraphs; |
91 |
|
92 |
-void NativeInputTrack::AudioDataBuffers::SetOutputData(AudioDataValue* aBuffer, |
93 |
- size_t aFrames, |
94 |
- uint32_t aChannels, |
95 |
- TrackRate aRate) { |
96 |
- mOutputData = Some(BufferInfo{aBuffer, aFrames, aChannels, aRate}); |
97 |
+void NativeInputTrack::AudioDataBuffers::SetOutputData( |
98 |
+ const AudioDataValue* aBuffer, size_t aFrames, uint32_t aChannels) { |
99 |
+ mOutputData.Set(aBuffer, aFrames, aChannels); |
100 |
} |
101 |
|
102 |
-void NativeInputTrack::AudioDataBuffers::SetInputData(AudioDataValue* aBuffer, |
103 |
- size_t aFrames, |
104 |
- uint32_t aChannels, |
105 |
- TrackRate aRate) { |
106 |
- mInputData = Some(BufferInfo{aBuffer, aFrames, aChannels, aRate}); |
107 |
+void NativeInputTrack::AudioDataBuffers::SetInputData( |
108 |
+ const AudioDataValue* aBuffer, size_t aFrames, uint32_t aChannels) { |
109 |
+ mInputData.Set(aBuffer, aFrames, aChannels); |
110 |
} |
111 |
|
112 |
void NativeInputTrack::AudioDataBuffers::Clear(Scope aScope) { |
113 |
if (aScope & Scope::Input) { |
114 |
- mInputData.take(); |
115 |
+ mInputData.Clear(); |
116 |
} |
117 |
|
118 |
if (aScope & Scope::Output) { |
119 |
- mOutputData.take(); |
120 |
+ mOutputData.Clear(); |
121 |
} |
122 |
} |
123 |
|
124 |
@@ -120,33 +116,12 @@ void NativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, |
125 |
uint32_t aFlags) { |
126 |
MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning()); |
127 |
TRACE_COMMENT("NativeInputTrack %p", this); |
128 |
- |
129 |
- if (!mDataHolder || !mDataHolder->mInputData) { |
130 |
- return; |
131 |
- } |
132 |
- |
133 |
- // One NotifyInputData might have multiple following ProcessInput calls, but |
134 |
- // we only process one input per NotifyInputData call. |
135 |
- NativeInputTrack::AudioDataBuffers::BufferInfo inputInfo = |
136 |
- mDataHolder->mInputData.extract(); |
137 |
- |
138 |
- MOZ_ASSERT(mInputChannels == inputInfo.mChannels); |
139 |
- MOZ_ASSERT(inputInfo.mChannels >= 1 && inputInfo.mChannels <= 8, |
140 |
- "Support up to 8 channels"); |
141 |
- |
142 |
- GetData<AudioSegment>()->Clear(); |
143 |
- GetData<AudioSegment>()->AppendFromInterleavedBuffer( |
144 |
- inputInfo.mBuffer, inputInfo.mFrames, inputInfo.mChannels, |
145 |
- PRINCIPAL_HANDLE_NONE); |
146 |
- |
147 |
- LOG(LogLevel::Verbose, |
148 |
- ("NativeInputTrack %p Appending %zu frames of raw audio", this, |
149 |
- inputInfo.mFrames)); |
150 |
+ // TODO: Put input data to mSegment |
151 |
} |
152 |
|
153 |
uint32_t NativeInputTrack::NumberOfChannels() const { |
154 |
MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning()); |
155 |
- return mInputChannels; |
156 |
+ return mDataHolder ? mDataHolder->mInputData.mChannels : 0; |
157 |
} |
158 |
|
159 |
void NativeInputTrack::InitDataHolderIfNeeded() { |
160 |
@@ -162,9 +137,11 @@ void NativeInputTrack::NotifyOutputData(MediaTrackGraphImpl* aGraph, |
161 |
MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning()); |
162 |
MOZ_ASSERT(aGraph == mGraph, "Receive output data from another graph"); |
163 |
MOZ_ASSERT(mDataHolder); |
164 |
- mDataHolder->SetOutputData(aBuffer, aFrames, aChannels, aRate); |
165 |
+ mDataHolder->SetOutputData(aBuffer, aFrames, aChannels); |
166 |
for (auto& listener : mDataUsers) { |
167 |
- listener->NotifyOutputData(aGraph, mDataHolder->mOutputData.value()); |
168 |
+ listener->NotifyOutputData(aGraph, mDataHolder->mOutputData.mBuffer, |
169 |
+ mDataHolder->mOutputData.mFrames, aRate, |
170 |
+ mDataHolder->mOutputData.mChannels); |
171 |
} |
172 |
} |
173 |
|
174 |
@@ -173,7 +150,6 @@ void NativeInputTrack::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { |
175 |
MOZ_ASSERT(aGraph == mGraph, |
176 |
"Receive input stopped signal from another graph"); |
177 |
MOZ_ASSERT(mDataHolder); |
178 |
- mInputChannels = 0; |
179 |
mDataHolder->Clear(AudioDataBuffers::Scope::Input); |
180 |
for (auto& listener : mDataUsers) { |
181 |
listener->NotifyInputStopped(aGraph); |
182 |
@@ -189,14 +165,11 @@ void NativeInputTrack::NotifyInputData(MediaTrackGraphImpl* aGraph, |
183 |
MOZ_ASSERT(aGraph == mGraph, "Receive input data from another graph"); |
184 |
|
185 |
MOZ_ASSERT(mDataHolder); |
186 |
- MOZ_ASSERT(aChannels); |
187 |
- if (!mInputChannels) { |
188 |
- mInputChannels = aChannels; |
189 |
- } |
190 |
- mDataHolder->SetInputData(const_cast<AudioDataValue*>(aBuffer), aFrames, |
191 |
- aChannels, aRate); |
192 |
+ mDataHolder->SetInputData(aBuffer, aFrames, aChannels); |
193 |
for (auto& listener : mDataUsers) { |
194 |
- listener->NotifyInputData(aGraph, mDataHolder->mInputData.value(), |
195 |
+ listener->NotifyInputData(aGraph, mDataHolder->mInputData.mBuffer, |
196 |
+ mDataHolder->mInputData.mFrames, aRate, |
197 |
+ mDataHolder->mInputData.mChannels, |
198 |
aAlreadyBuffered); |
199 |
} |
200 |
} |
201 |
diff --git dom/media/MediaTrackGraph.h dom/media/MediaTrackGraph.h |
202 |
index d85cb502b9..6d92f01b48 100644 |
203 |
--- dom/media/MediaTrackGraph.h |
204 |
+++ dom/media/MediaTrackGraph.h |
205 |
@@ -107,14 +107,6 @@ class AudioDataListenerInterface { |
206 |
virtual ~AudioDataListenerInterface() = default; |
207 |
|
208 |
public: |
209 |
- // Information for the interleaved buffer coming from the audio callbacks |
210 |
- struct BufferInfo { |
211 |
- AudioDataValue* mBuffer = nullptr; |
212 |
- size_t mFrames = 0; |
213 |
- uint32_t mChannels = 0; |
214 |
- TrackRate mRate = 0; |
215 |
- }; |
216 |
- |
217 |
/* These are for cubeb audio input & output streams: */ |
218 |
/** |
219 |
* Output data to speakers, for use as the "far-end" data for echo |
220 |
@@ -122,7 +114,8 @@ class AudioDataListenerInterface { |
221 |
* chunks. |
222 |
*/ |
223 |
virtual void NotifyOutputData(MediaTrackGraphImpl* aGraph, |
224 |
- BufferInfo aInfo) = 0; |
225 |
+ AudioDataValue* aBuffer, size_t aFrames, |
226 |
+ TrackRate aRate, uint32_t aChannels) = 0; |
227 |
/** |
228 |
* An AudioCallbackDriver with an input stream signaling that it has stopped |
229 |
* for any reason and the AudioDataListener will not be notified of input data |
230 |
@@ -134,7 +127,8 @@ class AudioDataListenerInterface { |
231 |
* guaranteed to be in any particular size chunks. |
232 |
*/ |
233 |
virtual void NotifyInputData(MediaTrackGraphImpl* aGraph, |
234 |
- const BufferInfo aInfo, |
235 |
+ const AudioDataValue* aBuffer, size_t aFrames, |
236 |
+ TrackRate aRate, uint32_t aChannels, |
237 |
uint32_t aAlreadyBuffered) = 0; |
238 |
|
239 |
/** |
240 |
diff --git dom/media/MediaTrackGraphImpl.h dom/media/MediaTrackGraphImpl.h |
241 |
index 8baeb25a7d..52c08112b5 100644 |
242 |
--- dom/media/MediaTrackGraphImpl.h |
243 |
+++ dom/media/MediaTrackGraphImpl.h |
244 |
@@ -75,13 +75,32 @@ class NativeInputTrack : public ProcessedMediaTrack { |
245 |
nsTArray<RefPtr<AudioDataListener>> mDataUsers; |
246 |
|
247 |
private: |
248 |
+ struct BufferInfo { |
249 |
+ AudioDataValue* mBuffer = nullptr; |
250 |
+ size_t mFrames = 0; |
251 |
+ uint32_t mChannels = 0; |
252 |
+ |
253 |
+ void Set(const AudioDataValue* aBuffer, size_t aFrames, |
254 |
+ uint32_t aChannels) { |
255 |
+ mBuffer = const_cast<AudioDataValue*>(aBuffer); |
256 |
+ mFrames = aFrames; |
257 |
+ mChannels = aChannels; |
258 |
+ } |
259 |
+ |
260 |
+ void Clear() { |
261 |
+ mBuffer = nullptr; |
262 |
+ mFrames = 0; |
263 |
+ mChannels = 0; |
264 |
+ } |
265 |
+ }; |
266 |
+ |
267 |
class AudioDataBuffers { |
268 |
public: |
269 |
AudioDataBuffers() = default; |
270 |
- void SetOutputData(AudioDataValue* aBuffer, size_t aFrames, |
271 |
- uint32_t aChannels, TrackRate aRate); |
272 |
- void SetInputData(AudioDataValue* aBuffer, size_t aFrames, |
273 |
- uint32_t aChannels, TrackRate aRate); |
274 |
+ void SetOutputData(const AudioDataValue* aBuffer, size_t aFrames, |
275 |
+ uint32_t aChannels); |
276 |
+ void SetInputData(const AudioDataValue* aBuffer, size_t aFrames, |
277 |
+ uint32_t aChannels); |
278 |
|
279 |
enum Scope : unsigned char { |
280 |
Input = 0x01, |
281 |
@@ -89,20 +108,16 @@ class NativeInputTrack : public ProcessedMediaTrack { |
282 |
}; |
283 |
void Clear(Scope aScope); |
284 |
|
285 |
- typedef AudioDataListenerInterface::BufferInfo BufferInfo; |
286 |
// Storing the audio output data coming from NotifyOutputData |
287 |
- Maybe<BufferInfo> mOutputData; |
288 |
+ BufferInfo mOutputData; |
289 |
// Storing the audio input data coming from NotifyInputData |
290 |
- Maybe<BufferInfo> mInputData; |
291 |
+ BufferInfo mInputData; |
292 |
}; |
293 |
|
294 |
// Only accessed on the graph thread. |
295 |
// Storing the audio data coming from GraphDriver directly. |
296 |
Maybe<AudioDataBuffers> mDataHolder; |
297 |
|
298 |
- // Only accessed on the graph thread. |
299 |
- uint32_t mInputChannels = 0; |
300 |
- |
301 |
// Only accessed on the main thread. |
302 |
// When this becomes zero, this NativeInputTrack is no longer needed. |
303 |
int32_t mUserCount = 0; |
304 |
diff --git dom/media/gtest/TestAudioInputProcessing.cpp dom/media/gtest/TestAudioInputProcessing.cpp |
305 |
index ed5d14fcb8..c78a56080a 100644 |
306 |
--- dom/media/gtest/TestAudioInputProcessing.cpp |
307 |
+++ dom/media/gtest/TestAudioInputProcessing.cpp |
308 |
@@ -70,11 +70,8 @@ TEST(TestAudioInputProcessing, UnaccountedPacketizerBuffering) |
309 |
processedTime = 0; |
310 |
nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(nrFrames); |
311 |
generator.GenerateInterleaved(buffer.Elements(), nrFrames); |
312 |
- aip->NotifyInputData(graph, |
313 |
- AudioInputProcessing::BufferInfo{ |
314 |
- buffer.Elements(), nrFrames, channels, rate}, |
315 |
+ aip->NotifyInputData(graph, buffer.Elements(), nrFrames, rate, channels, |
316 |
nextTime - nrFrames); |
317 |
- aip->ProcessInput(graph, nullptr); |
318 |
aip->Pull(graph, processedTime, nextTime, segment.GetDuration(), &segment, |
319 |
true, &ended); |
320 |
EXPECT_EQ(aip->NumBufferedFrames(graph), 24U); |
321 |
@@ -90,11 +87,8 @@ TEST(TestAudioInputProcessing, UnaccountedPacketizerBuffering) |
322 |
processedTime = nextTime; |
323 |
nextTime = MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(2 * nrFrames); |
324 |
generator.GenerateInterleaved(buffer.Elements(), nrFrames); |
325 |
- aip->NotifyInputData(graph, |
326 |
- AudioInputProcessing::BufferInfo{ |
327 |
- buffer.Elements(), nrFrames, channels, rate}, |
328 |
+ aip->NotifyInputData(graph, buffer.Elements(), nrFrames, rate, channels, |
329 |
nextTime - (2 * nrFrames)); |
330 |
- aip->ProcessInput(graph, nullptr); |
331 |
aip->Pull(graph, processedTime, nextTime, segment.GetDuration(), &segment, |
332 |
true, &ended); |
333 |
EXPECT_EQ(aip->NumBufferedFrames(graph), 120U); |
334 |
diff --git dom/media/webrtc/MediaEngineWebRTCAudio.cpp dom/media/webrtc/MediaEngineWebRTCAudio.cpp |
335 |
index fb59fc195f..9c2d1f2ff2 100644 |
336 |
--- dom/media/webrtc/MediaEngineWebRTCAudio.cpp |
337 |
+++ dom/media/webrtc/MediaEngineWebRTCAudio.cpp |
338 |
@@ -867,20 +867,21 @@ void AudioInputProcessing::Pull(MediaTrackGraphImpl* aGraph, GraphTime aFrom, |
339 |
} |
340 |
|
341 |
void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, |
342 |
- BufferInfo aInfo) { |
343 |
+ AudioDataValue* aBuffer, |
344 |
+ size_t aFrames, TrackRate aRate, |
345 |
+ uint32_t aChannels) { |
346 |
MOZ_ASSERT(aGraph->OnGraphThread()); |
347 |
MOZ_ASSERT(mEnabled); |
348 |
|
349 |
- if (!mPacketizerOutput || |
350 |
- mPacketizerOutput->mPacketSize != aInfo.mRate / 100u || |
351 |
- mPacketizerOutput->mChannels != aInfo.mChannels) { |
352 |
+ if (!mPacketizerOutput || mPacketizerOutput->mPacketSize != aRate / 100u || |
353 |
+ mPacketizerOutput->mChannels != aChannels) { |
354 |
// It's ok to drop the audio still in the packetizer here: if this changes, |
355 |
// we changed devices or something. |
356 |
mPacketizerOutput = MakeUnique<AudioPacketizer<AudioDataValue, float>>( |
357 |
- aInfo.mRate / 100, aInfo.mChannels); |
358 |
+ aRate / 100, aChannels); |
359 |
} |
360 |
|
361 |
- mPacketizerOutput->Input(aInfo.mBuffer, aInfo.mFrames); |
362 |
+ mPacketizerOutput->Input(aBuffer, aFrames); |
363 |
|
364 |
while (mPacketizerOutput->PacketsAvailable()) { |
365 |
uint32_t samplesPerPacket = |
366 |
@@ -899,11 +900,11 @@ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, |
367 |
uint32_t channelCountFarend = 0; |
368 |
uint32_t framesPerPacketFarend = 0; |
369 |
|
370 |
- // Downmix from aInfo.mChannels to MAX_CHANNELS if needed. We always have |
371 |
- // floats here, the packetized performed the conversion. |
372 |
- if (aInfo.mChannels > MAX_CHANNELS) { |
373 |
+ // Downmix from aChannels to MAX_CHANNELS if needed. We always have floats |
374 |
+ // here, the packetized performed the conversion. |
375 |
+ if (aChannels > MAX_CHANNELS) { |
376 |
AudioConverter converter( |
377 |
- AudioConfig(aInfo.mChannels, 0, AudioConfig::FORMAT_FLT), |
378 |
+ AudioConfig(aChannels, 0, AudioConfig::FORMAT_FLT), |
379 |
AudioConfig(MAX_CHANNELS, 0, AudioConfig::FORMAT_FLT)); |
380 |
framesPerPacketFarend = mPacketizerOutput->mPacketSize; |
381 |
framesPerPacketFarend = |
382 |
@@ -913,9 +914,9 @@ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, |
383 |
deinterleavedPacketDataChannelPointers.SetLength(MAX_CHANNELS); |
384 |
} else { |
385 |
interleavedFarend = packet; |
386 |
- channelCountFarend = aInfo.mChannels; |
387 |
+ channelCountFarend = aChannels; |
388 |
framesPerPacketFarend = mPacketizerOutput->mPacketSize; |
389 |
- deinterleavedPacketDataChannelPointers.SetLength(aInfo.mChannels); |
390 |
+ deinterleavedPacketDataChannelPointers.SetLength(aChannels); |
391 |
} |
392 |
|
393 |
MOZ_ASSERT(interleavedFarend && |
394 |
@@ -941,7 +942,7 @@ void AudioInputProcessing::NotifyOutputData(MediaTrackGraphImpl* aGraph, |
395 |
|
396 |
// Having the same config for input and output means we potentially save |
397 |
// some CPU. |
398 |
- StreamConfig inputConfig(aInfo.mRate, channelCountFarend, false); |
399 |
+ StreamConfig inputConfig(aRate, channelCountFarend, false); |
400 |
StreamConfig outputConfig = inputConfig; |
401 |
|
402 |
// Passing the same pointers here saves a copy inside this function. |
403 |
@@ -1078,34 +1079,45 @@ void AudioInputProcessing::PacketizeAndProcess(MediaTrackGraphImpl* aGraph, |
404 |
} |
405 |
} |
406 |
|
407 |
-void AudioInputProcessing::ProcessInput(MediaTrackGraphImpl* aGraph, |
408 |
- const AudioSegment* aSegment) { |
409 |
- MOZ_ASSERT(aGraph); |
410 |
- MOZ_ASSERT(aGraph->OnGraphThread()); |
411 |
- |
412 |
- if (mEnded || !mEnabled || !mLiveFramesAppended || !mInputData) { |
413 |
+template <typename T> |
414 |
+void AudioInputProcessing::InsertInGraph(MediaTrackGraphImpl* aGraph, |
415 |
+ const T* aBuffer, size_t aFrames, |
416 |
+ uint32_t aChannels) { |
417 |
+ if (mEnded) { |
418 |
return; |
419 |
} |
420 |
|
421 |
- // One NotifyInputData might have multiple following ProcessInput calls, but |
422 |
- // we only process one input per NotifyInputData call. |
423 |
- BufferInfo inputInfo = mInputData.extract(); |
424 |
+ MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels"); |
425 |
|
426 |
- // If some processing is necessary, packetize and insert in the WebRTC.org |
427 |
- // code. Otherwise, directly insert the mic data in the MTG, bypassing all |
428 |
- // processing. |
429 |
- if (PassThrough(aGraph)) { |
430 |
- if (aSegment) { |
431 |
- mSegment.AppendSegment(aSegment, mPrincipal); |
432 |
- } else { |
433 |
- mSegment.AppendFromInterleavedBuffer(inputInfo.mBuffer, inputInfo.mFrames, |
434 |
- inputInfo.mChannels, mPrincipal); |
435 |
- } |
436 |
+ CheckedInt<size_t> bufferSize(sizeof(T)); |
437 |
+ bufferSize *= aFrames; |
438 |
+ bufferSize *= aChannels; |
439 |
+ RefPtr<SharedBuffer> buffer = SharedBuffer::Create(bufferSize); |
440 |
+ AutoTArray<const T*, 8> channels; |
441 |
+ if (aChannels == 1) { |
442 |
+ PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); |
443 |
+ channels.AppendElement(static_cast<T*>(buffer->Data())); |
444 |
} else { |
445 |
- MOZ_ASSERT(aGraph->GraphRate() == inputInfo.mRate); |
446 |
- PacketizeAndProcess(aGraph, inputInfo.mBuffer, inputInfo.mFrames, |
447 |
- inputInfo.mRate, inputInfo.mChannels); |
448 |
+ channels.SetLength(aChannels); |
449 |
+ AutoTArray<T*, 8> write_channels; |
450 |
+ write_channels.SetLength(aChannels); |
451 |
+ T* samples = static_cast<T*>(buffer->Data()); |
452 |
+ |
453 |
+ size_t offset = 0; |
454 |
+ for (uint32_t i = 0; i < aChannels; ++i) { |
455 |
+ channels[i] = write_channels[i] = samples + offset; |
456 |
+ offset += aFrames; |
457 |
+ } |
458 |
+ |
459 |
+ DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, |
460 |
+ write_channels.Elements()); |
461 |
} |
462 |
+ |
463 |
+ LOG_FRAME("AudioInputProcessing %p Appending %zu frames of raw audio", this, |
464 |
+ aFrames); |
465 |
+ |
466 |
+ MOZ_ASSERT(aChannels == channels.Length()); |
467 |
+ mSegment.AppendFrames(buffer.forget(), channels, aFrames, mPrincipal); |
468 |
} |
469 |
|
470 |
void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { |
471 |
@@ -1119,13 +1131,14 @@ void AudioInputProcessing::NotifyInputStopped(MediaTrackGraphImpl* aGraph) { |
472 |
if (mPacketizerInput) { |
473 |
mPacketizerInput->Clear(); |
474 |
} |
475 |
- mInputData.take(); |
476 |
} |
477 |
|
478 |
// Called back on GraphDriver thread! |
479 |
// Note this can be called back after ::Stop() |
480 |
void AudioInputProcessing::NotifyInputData(MediaTrackGraphImpl* aGraph, |
481 |
- const BufferInfo aInfo, |
482 |
+ const AudioDataValue* aBuffer, |
483 |
+ size_t aFrames, TrackRate aRate, |
484 |
+ uint32_t aChannels, |
485 |
uint32_t aAlreadyBuffered) { |
486 |
MOZ_ASSERT(aGraph->OnGraphThread()); |
487 |
TRACE(); |
488 |
@@ -1139,7 +1152,14 @@ void AudioInputProcessing::NotifyInputData(MediaTrackGraphImpl* aGraph, |
489 |
mLiveBufferingAppended = aAlreadyBuffered; |
490 |
} |
491 |
|
492 |
- mInputData = Some(aInfo); |
493 |
+ // If some processing is necessary, packetize and insert in the WebRTC.org |
494 |
+ // code. Otherwise, directly insert the mic data in the MTG, bypassing all |
495 |
+ // processing. |
496 |
+ if (PassThrough(aGraph)) { |
497 |
+ InsertInGraph<AudioDataValue>(aGraph, aBuffer, aFrames, aChannels); |
498 |
+ } else { |
499 |
+ PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels); |
500 |
+ } |
501 |
} |
502 |
|
503 |
#define ResetProcessingIfNeeded(_processing) \ |
504 |
@@ -1173,7 +1193,6 @@ void AudioInputProcessing::DeviceChanged(MediaTrackGraphImpl* aGraph) { |
505 |
void AudioInputProcessing::End() { |
506 |
mEnded = true; |
507 |
mSegment.Clear(); |
508 |
- mInputData.take(); |
509 |
} |
510 |
|
511 |
TrackTime AudioInputProcessing::NumBufferedFrames( |
512 |
@@ -1230,28 +1249,6 @@ void AudioInputTrack::DestroyImpl() { |
513 |
void AudioInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo, |
514 |
uint32_t aFlags) { |
515 |
TRACE_COMMENT("AudioInputTrack %p", this); |
516 |
- |
517 |
- // Check if there is a connected NativeInputTrack |
518 |
- NativeInputTrack* source = nullptr; |
519 |
- if (!mInputs.IsEmpty()) { |
520 |
- for (const MediaInputPort* input : mInputs) { |
521 |
- MOZ_ASSERT(input->GetSource()); |
522 |
- if (input->GetSource()->AsNativeInputTrack()) { |
523 |
- source = input->GetSource()->AsNativeInputTrack(); |
524 |
- break; |
525 |
- } |
526 |
- } |
527 |
- } |
528 |
- |
529 |
- // Push the input data from the connected NativeInputTrack to mInputProcessing |
530 |
- if (source) { |
531 |
- MOZ_ASSERT(source->GraphImpl() == GraphImpl()); |
532 |
- MOZ_ASSERT(source->mSampleRate == mSampleRate); |
533 |
- MOZ_ASSERT(GraphImpl()->GraphRate() == mSampleRate); |
534 |
- mInputProcessing->ProcessInput(GraphImpl(), |
535 |
- source->GetData<AudioSegment>()); |
536 |
- } |
537 |
- |
538 |
bool ended = false; |
539 |
mInputProcessing->Pull( |
540 |
GraphImpl(), aFrom, aTo, TrackTimeToGraphTime(GetEnd()), |
541 |
diff --git dom/media/webrtc/MediaEngineWebRTCAudio.h dom/media/webrtc/MediaEngineWebRTCAudio.h |
542 |
index 46a66d9a7a..408bbc6b6e 100644 |
543 |
--- dom/media/webrtc/MediaEngineWebRTCAudio.h |
544 |
+++ dom/media/webrtc/MediaEngineWebRTCAudio.h |
545 |
@@ -141,9 +141,13 @@ class AudioInputProcessing : public AudioDataListener { |
546 |
GraphTime aTrackEnd, AudioSegment* aSegment, |
547 |
bool aLastPullThisIteration, bool* aEnded); |
548 |
|
549 |
- void NotifyOutputData(MediaTrackGraphImpl* aGraph, BufferInfo aInfo) override; |
550 |
+ void NotifyOutputData(MediaTrackGraphImpl* aGraph, AudioDataValue* aBuffer, |
551 |
+ size_t aFrames, TrackRate aRate, |
552 |
+ uint32_t aChannels) override; |
553 |
void NotifyInputStopped(MediaTrackGraphImpl* aGraph) override; |
554 |
- void NotifyInputData(MediaTrackGraphImpl* aGraph, const BufferInfo aInfo, |
555 |
+ void NotifyInputData(MediaTrackGraphImpl* aGraph, |
556 |
+ const AudioDataValue* aBuffer, size_t aFrames, |
557 |
+ TrackRate aRate, uint32_t aChannels, |
558 |
uint32_t aAlreadyBuffered) override; |
559 |
bool IsVoiceInput(MediaTrackGraphImpl* aGraph) const override { |
560 |
// If we're passing data directly without AEC or any other process, this |
561 |
@@ -163,8 +167,9 @@ class AudioInputProcessing : public AudioDataListener { |
562 |
|
563 |
void Disconnect(MediaTrackGraphImpl* aGraph) override; |
564 |
|
565 |
- // aSegment stores the unprocessed non-interleaved audio input data from mic |
566 |
- void ProcessInput(MediaTrackGraphImpl* aGraph, const AudioSegment* aSegment); |
567 |
+ template <typename T> |
568 |
+ void InsertInGraph(MediaTrackGraphImpl* aGraph, const T* aBuffer, |
569 |
+ size_t aFrames, uint32_t aChannels); |
570 |
|
571 |
void PacketizeAndProcess(MediaTrackGraphImpl* aGraph, |
572 |
const AudioDataValue* aBuffer, size_t aFrames, |
573 |
@@ -242,8 +247,6 @@ class AudioInputProcessing : public AudioDataListener { |
574 |
bool mEnabled; |
575 |
// Whether or not we've ended and removed the AudioInputTrack. |
576 |
bool mEnded; |
577 |
- // Store the unprocessed interleaved audio input data |
578 |
- Maybe<BufferInfo> mInputData; |
579 |
}; |
580 |
|
581 |
// MediaTrack subclass tailored for MediaEngineWebRTCMicrophoneSource. |
582 |
-- |
583 |
2.32.0 |
584 |
|