/src/mozilla-central/dom/media/encoder/VP8TrackEncoder.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
4 | | * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "VP8TrackEncoder.h" |
7 | | |
8 | | #include "GeckoProfiler.h" |
9 | | #include "ImageToI420.h" |
10 | | #include "LayersLogging.h" |
11 | | #include "mozilla/gfx/2D.h" |
12 | | #include "prsystem.h" |
13 | | #include "VideoSegment.h" |
14 | | #include "VideoUtils.h" |
15 | | #include "vpx/vp8cx.h" |
16 | | #include "vpx/vpx_encoder.h" |
17 | | #include "WebMWriter.h" |
18 | | #include "mozilla/media/MediaUtils.h" |
19 | | #include "mozilla/dom/ImageUtils.h" |
20 | | #include "mozilla/dom/ImageBitmapBinding.h" |
21 | | |
22 | | namespace mozilla { |
23 | | |
24 | | LazyLogModule gVP8TrackEncoderLog("VP8TrackEncoder"); |
25 | 0 | #define VP8LOG(level, msg, ...) MOZ_LOG(gVP8TrackEncoderLog, \ |
26 | 0 | level, \ |
27 | 0 | (msg, ##__VA_ARGS__)) |
28 | | |
29 | 0 | #define DEFAULT_BITRATE_BPS 2500000 |
30 | 0 | #define MAX_KEYFRAME_INTERVAL 600 |
31 | | |
32 | | using namespace mozilla::gfx; |
33 | | using namespace mozilla::layers; |
34 | | using namespace mozilla::media; |
35 | | using namespace mozilla::dom; |
36 | | |
37 | | VP8TrackEncoder::VP8TrackEncoder(TrackRate aTrackRate, |
38 | | FrameDroppingMode aFrameDroppingMode) |
39 | | : VideoTrackEncoder(aTrackRate, aFrameDroppingMode) |
40 | | , mVPXContext(new vpx_codec_ctx_t()) |
41 | | , mVPXImageWrapper(new vpx_image_t()) |
42 | 0 | { |
43 | 0 | MOZ_COUNT_CTOR(VP8TrackEncoder); |
44 | 0 | } |
45 | | |
46 | | VP8TrackEncoder::~VP8TrackEncoder() |
47 | 0 | { |
48 | 0 | Destroy(); |
49 | 0 | MOZ_COUNT_DTOR(VP8TrackEncoder); |
50 | 0 | } |
51 | | |
52 | | void |
53 | | VP8TrackEncoder::Destroy() |
54 | 0 | { |
55 | 0 | if (mInitialized) { |
56 | 0 | vpx_codec_destroy(mVPXContext); |
57 | 0 | } |
58 | 0 |
|
59 | 0 | if (mVPXImageWrapper) { |
60 | 0 | vpx_img_free(mVPXImageWrapper); |
61 | 0 | } |
62 | 0 | mInitialized = false; |
63 | 0 | } |
64 | | |
65 | | nsresult |
66 | | VP8TrackEncoder::Init(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth, |
67 | | int32_t aDisplayHeight) |
68 | 0 | { |
69 | 0 | if (aWidth < 1 || aHeight < 1 || aDisplayWidth < 1 || aDisplayHeight < 1) { |
70 | 0 | return NS_ERROR_FAILURE; |
71 | 0 | } |
72 | 0 | |
73 | 0 | if (mInitialized) { |
74 | 0 | MOZ_ASSERT(false); |
75 | 0 | return NS_ERROR_FAILURE; |
76 | 0 | } |
77 | 0 |
|
78 | 0 | // Encoder configuration structure. |
79 | 0 | vpx_codec_enc_cfg_t config; |
80 | 0 | nsresult rv = SetConfigurationValues(aWidth, aHeight, aDisplayWidth, aDisplayHeight, config); |
81 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); |
82 | 0 |
|
83 | 0 | // Creating a wrapper to the image - setting image data to NULL. Actual |
84 | 0 | // pointer will be set in encode. Setting align to 1, as it is meaningless |
85 | 0 | // (actual memory is not allocated). |
86 | 0 | vpx_img_wrap(mVPXImageWrapper, VPX_IMG_FMT_I420, |
87 | 0 | mFrameWidth, mFrameHeight, 1, nullptr); |
88 | 0 |
|
89 | 0 | vpx_codec_flags_t flags = 0; |
90 | 0 | flags |= VPX_CODEC_USE_OUTPUT_PARTITION; |
91 | 0 | if (vpx_codec_enc_init(mVPXContext, vpx_codec_vp8_cx(), &config, flags)) { |
92 | 0 | return NS_ERROR_FAILURE; |
93 | 0 | } |
94 | 0 | |
95 | 0 | vpx_codec_control(mVPXContext, VP8E_SET_STATIC_THRESHOLD, 1); |
96 | 0 | vpx_codec_control(mVPXContext, VP8E_SET_CPUUSED, -6); |
97 | 0 | vpx_codec_control(mVPXContext, VP8E_SET_TOKEN_PARTITIONS, |
98 | 0 | VP8_ONE_TOKENPARTITION); |
99 | 0 |
|
100 | 0 | SetInitialized(); |
101 | 0 |
|
102 | 0 | return NS_OK; |
103 | 0 | } |
104 | | |
105 | | nsresult |
106 | | VP8TrackEncoder::Reconfigure(int32_t aWidth, int32_t aHeight, |
107 | | int32_t aDisplayWidth, int32_t aDisplayHeight) |
108 | 0 | { |
109 | 0 | if(aWidth <= 0 || aHeight <= 0 || aDisplayWidth <= 0 || aDisplayHeight <= 0) { |
110 | 0 | MOZ_ASSERT(false); |
111 | 0 | return NS_ERROR_FAILURE; |
112 | 0 | } |
113 | 0 |
|
114 | 0 | if (!mInitialized) { |
115 | 0 | MOZ_ASSERT(false); |
116 | 0 | return NS_ERROR_FAILURE; |
117 | 0 | } |
118 | 0 |
|
119 | 0 | // Recreate image wrapper |
120 | 0 | vpx_img_free(mVPXImageWrapper); |
121 | 0 | vpx_img_wrap(mVPXImageWrapper, VPX_IMG_FMT_I420, aWidth, aHeight, 1, nullptr); |
122 | 0 | // Encoder configuration structure. |
123 | 0 | vpx_codec_enc_cfg_t config; |
124 | 0 | nsresult rv = SetConfigurationValues(aWidth, aHeight, aDisplayWidth, aDisplayHeight, config); |
125 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); |
126 | 0 | // Set new configuration |
127 | 0 | if (vpx_codec_enc_config_set(mVPXContext.get(), &config) != VPX_CODEC_OK) { |
128 | 0 | VP8LOG(LogLevel::Error, "Failed to set new configuration"); |
129 | 0 | return NS_ERROR_FAILURE; |
130 | 0 | } |
131 | 0 | return NS_OK; |
132 | 0 | } |
133 | | |
134 | | nsresult |
135 | | VP8TrackEncoder::SetConfigurationValues(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth, |
136 | | int32_t aDisplayHeight, vpx_codec_enc_cfg_t& config) |
137 | 0 | { |
138 | 0 | mFrameWidth = aWidth; |
139 | 0 | mFrameHeight = aHeight; |
140 | 0 | mDisplayWidth = aDisplayWidth; |
141 | 0 | mDisplayHeight = aDisplayHeight; |
142 | 0 |
|
143 | 0 | // Encoder configuration structure. |
144 | 0 | memset(&config, 0, sizeof(vpx_codec_enc_cfg_t)); |
145 | 0 | if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &config, 0)) { |
146 | 0 | VP8LOG(LogLevel::Error, "Failed to get default configuration"); |
147 | 0 | return NS_ERROR_FAILURE; |
148 | 0 | } |
149 | 0 |
|
150 | 0 | config.g_w = mFrameWidth; |
151 | 0 | config.g_h = mFrameHeight; |
152 | 0 | // TODO: Maybe we should have various aFrameRate bitrate pair for each devices? |
153 | 0 | // or for different platform |
154 | 0 |
|
155 | 0 | // rc_target_bitrate needs kbit/s |
156 | 0 | config.rc_target_bitrate = (mVideoBitrate != 0 ? mVideoBitrate : DEFAULT_BITRATE_BPS)/1000; |
157 | 0 |
|
158 | 0 | // Setting the time base of the codec |
159 | 0 | config.g_timebase.num = 1; |
160 | 0 | config.g_timebase.den = mTrackRate; |
161 | 0 |
|
162 | 0 | config.g_error_resilient = 0; |
163 | 0 |
|
164 | 0 | config.g_lag_in_frames = 0; // 0- no frame lagging |
165 | 0 |
|
166 | 0 | int32_t number_of_cores = PR_GetNumberOfProcessors(); |
167 | 0 | if (mFrameWidth * mFrameHeight > 1280 * 960 && number_of_cores >= 6) { |
168 | 0 | config.g_threads = 3; // 3 threads for 1080p. |
169 | 0 | } else if (mFrameWidth * mFrameHeight > 640 * 480 && number_of_cores >= 3) { |
170 | 0 | config.g_threads = 2; // 2 threads for qHD/HD. |
171 | 0 | } else { |
172 | 0 | config.g_threads = 1; // 1 thread for VGA or less |
173 | 0 | } |
174 | 0 |
|
175 | 0 | // rate control settings |
176 | 0 | config.rc_dropframe_thresh = 0; |
177 | 0 | config.rc_end_usage = VPX_VBR; |
178 | 0 | config.g_pass = VPX_RC_ONE_PASS; |
179 | 0 | // ffmpeg doesn't currently support streams that use resize. |
180 | 0 | // Therefore, for safety, we should turn it off until it does. |
181 | 0 | config.rc_resize_allowed = 0; |
182 | 0 | config.rc_undershoot_pct = 100; |
183 | 0 | config.rc_overshoot_pct = 15; |
184 | 0 | config.rc_buf_initial_sz = 500; |
185 | 0 | config.rc_buf_optimal_sz = 600; |
186 | 0 | config.rc_buf_sz = 1000; |
187 | 0 |
|
188 | 0 | // we set key frame interval to automatic and later manually |
189 | 0 | // force key frame by setting VPX_EFLAG_FORCE_KF when mKeyFrameInterval > 0 |
190 | 0 | config.kf_mode = VPX_KF_AUTO; |
191 | 0 | config.kf_max_dist = MAX_KEYFRAME_INTERVAL; |
192 | 0 |
|
193 | 0 | return NS_OK; |
194 | 0 | } |
195 | | |
196 | | already_AddRefed<TrackMetadataBase> |
197 | | VP8TrackEncoder::GetMetadata() |
198 | 0 | { |
199 | 0 | AUTO_PROFILER_LABEL("VP8TrackEncoder::GetMetadata", OTHER); |
200 | 0 |
|
201 | 0 | MOZ_ASSERT(mInitialized || mCanceled); |
202 | 0 |
|
203 | 0 | if (mCanceled || mEncodingComplete) { |
204 | 0 | return nullptr; |
205 | 0 | } |
206 | 0 | |
207 | 0 | if (!mInitialized) { |
208 | 0 | return nullptr; |
209 | 0 | } |
210 | 0 | |
211 | 0 | RefPtr<VP8Metadata> meta = new VP8Metadata(); |
212 | 0 | meta->mWidth = mFrameWidth; |
213 | 0 | meta->mHeight = mFrameHeight; |
214 | 0 | meta->mDisplayWidth = mDisplayWidth; |
215 | 0 | meta->mDisplayHeight = mDisplayHeight; |
216 | 0 |
|
217 | 0 | VP8LOG(LogLevel::Info, "GetMetadata() width=%d, height=%d, " |
218 | 0 | "displayWidht=%d, displayHeight=%d", |
219 | 0 | meta->mWidth, meta->mHeight, meta->mDisplayWidth, meta->mDisplayHeight); |
220 | 0 |
|
221 | 0 | return meta.forget(); |
222 | 0 | } |
223 | | |
224 | | nsresult |
225 | | VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData) |
226 | 0 | { |
227 | 0 | vpx_codec_iter_t iter = nullptr; |
228 | 0 | EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME; |
229 | 0 | nsTArray<uint8_t> frameData; |
230 | 0 | const vpx_codec_cx_pkt_t *pkt = nullptr; |
231 | 0 | while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) { |
232 | 0 | switch (pkt->kind) { |
233 | 0 | case VPX_CODEC_CX_FRAME_PKT: { |
234 | 0 | // Copy the encoded data from libvpx to frameData |
235 | 0 | frameData.AppendElements((uint8_t*)pkt->data.frame.buf, |
236 | 0 | pkt->data.frame.sz); |
237 | 0 | break; |
238 | 0 | } |
239 | 0 | default: { |
240 | 0 | break; |
241 | 0 | } |
242 | 0 | } |
243 | 0 | // End of frame |
244 | 0 | if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) { |
245 | 0 | if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) { |
246 | 0 | frameType = EncodedFrame::VP8_I_FRAME; |
247 | 0 | } |
248 | 0 | break; |
249 | 0 | } |
250 | 0 | } |
251 | 0 |
|
252 | 0 | if (!frameData.IsEmpty()) { |
253 | 0 | // Copy the encoded data to aData. |
254 | 0 | EncodedFrame* videoData = new EncodedFrame(); |
255 | 0 | videoData->SetFrameType(frameType); |
256 | 0 |
|
257 | 0 | // Convert the timestamp and duration to Usecs. |
258 | 0 | CheckedInt64 timestamp = FramesToUsecs(pkt->data.frame.pts, mTrackRate); |
259 | 0 | if (!timestamp.isValid()) { |
260 | 0 | NS_ERROR("Microsecond timestamp overflow"); |
261 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
262 | 0 | } |
263 | 0 | videoData->SetTimeStamp((uint64_t)timestamp.value()); |
264 | 0 |
|
265 | 0 | mExtractedDuration += pkt->data.frame.duration; |
266 | 0 | if (!mExtractedDuration.isValid()) { |
267 | 0 | NS_ERROR("Duration overflow"); |
268 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
269 | 0 | } |
270 | 0 |
|
271 | 0 | CheckedInt64 totalDuration = |
272 | 0 | FramesToUsecs(mExtractedDuration.value(), mTrackRate); |
273 | 0 | if (!totalDuration.isValid()) { |
274 | 0 | NS_ERROR("Duration overflow"); |
275 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
276 | 0 | } |
277 | 0 |
|
278 | 0 | CheckedInt64 duration = totalDuration - mExtractedDurationUs; |
279 | 0 | if (!duration.isValid()) { |
280 | 0 | NS_ERROR("Duration overflow"); |
281 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
282 | 0 | } |
283 | 0 |
|
284 | 0 | mExtractedDurationUs = totalDuration; |
285 | 0 | videoData->SetDuration((uint64_t)duration.value()); |
286 | 0 | videoData->SwapInFrameData(frameData); |
287 | 0 | VP8LOG(LogLevel::Verbose, |
288 | 0 | "GetEncodedPartitions TimeStamp %" PRIu64 ", Duration %" PRIu64 ", FrameType %d", |
289 | 0 | videoData->GetTimeStamp(), videoData->GetDuration(), |
290 | 0 | videoData->GetFrameType()); |
291 | 0 | aData.AppendEncodedFrame(videoData); |
292 | 0 | } |
293 | 0 |
|
294 | 0 | return pkt ? NS_OK : NS_ERROR_NOT_AVAILABLE; |
295 | 0 | } |
296 | | |
297 | | |
298 | | template<int N> |
299 | | static int Aligned(int aValue) |
300 | 0 | { |
301 | 0 | if (aValue < N) { |
302 | 0 | return N; |
303 | 0 | } |
304 | 0 | |
305 | 0 | // The `- 1` avoids overreaching when `aValue % N == 0`. |
306 | 0 | return (((aValue - 1) / N) + 1) * N; |
307 | 0 | } |
308 | | |
309 | | nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk) |
310 | 0 | { |
311 | 0 | RefPtr<Image> img; |
312 | 0 | if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) { |
313 | 0 | if (!mMuteFrame) { |
314 | 0 | mMuteFrame = VideoFrame::CreateBlackImage(gfx::IntSize(mFrameWidth, mFrameHeight)); |
315 | 0 | } |
316 | 0 | if (!mMuteFrame) { |
317 | 0 | VP8LOG(LogLevel::Warning, "Failed to allocate black image of size %dx%d", |
318 | 0 | mFrameWidth, mFrameHeight); |
319 | 0 | return NS_OK; |
320 | 0 | } |
321 | 0 | img = mMuteFrame; |
322 | 0 | } else { |
323 | 0 | img = aChunk.mFrame.GetImage(); |
324 | 0 | } |
325 | 0 |
|
326 | 0 | if (img->GetSize() != IntSize(mFrameWidth, mFrameHeight)) { |
327 | 0 | VP8LOG(LogLevel::Info, |
328 | 0 | "Dynamic resolution change (was %dx%d, now %dx%d).", |
329 | 0 | mFrameWidth, mFrameHeight, img->GetSize().width, img->GetSize().height); |
330 | 0 |
|
331 | 0 |
|
332 | 0 | gfx::IntSize intrinsicSize = aChunk.mFrame.GetIntrinsicSize(); |
333 | 0 | gfx::IntSize imgSize = aChunk.mFrame.GetImage()->GetSize(); |
334 | 0 | if (imgSize <= IntSize(mFrameWidth, mFrameHeight) && // check buffer size instead |
335 | 0 | // If the new size is less than or equal to old, |
336 | 0 | // the existing encoder instance can continue. |
337 | 0 | NS_SUCCEEDED(Reconfigure(imgSize.width, |
338 | 0 | imgSize.height, |
339 | 0 | intrinsicSize.width, |
340 | 0 | intrinsicSize.height))) { |
341 | 0 | VP8LOG(LogLevel::Info, "Reconfigured VP8 encoder."); |
342 | 0 | } else { |
343 | 0 | // New frame size is larger; re-create the encoder. |
344 | 0 | Destroy(); |
345 | 0 | nsresult rv = Init(imgSize.width, |
346 | 0 | imgSize.height, |
347 | 0 | intrinsicSize.width, |
348 | 0 | intrinsicSize.height); |
349 | 0 | VP8LOG(LogLevel::Info, "Recreated VP8 encoder."); |
350 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
351 | 0 | } |
352 | 0 | } |
353 | 0 |
|
354 | 0 | // Clear image state from last frame |
355 | 0 | mVPXImageWrapper->planes[VPX_PLANE_Y] = nullptr; |
356 | 0 | mVPXImageWrapper->stride[VPX_PLANE_Y] = 0; |
357 | 0 | mVPXImageWrapper->planes[VPX_PLANE_U] = nullptr; |
358 | 0 | mVPXImageWrapper->stride[VPX_PLANE_U] = 0; |
359 | 0 | mVPXImageWrapper->planes[VPX_PLANE_V] = nullptr; |
360 | 0 | mVPXImageWrapper->stride[VPX_PLANE_V] = 0; |
361 | 0 |
|
362 | 0 | int yStride = Aligned<16>(mFrameWidth); |
363 | 0 | int yHeight = mFrameHeight; |
364 | 0 | size_t yPlaneSize = yStride * yHeight; |
365 | 0 |
|
366 | 0 | int uvStride = Aligned<16>((mFrameWidth + 1) / 2); |
367 | 0 | int uvHeight = (mFrameHeight + 1) / 2; |
368 | 0 | size_t uvPlaneSize = uvStride * uvHeight; |
369 | 0 |
|
370 | 0 | size_t neededSize = yPlaneSize + uvPlaneSize * 2; |
371 | 0 |
|
372 | 0 | if (neededSize > mI420FrameSize) { |
373 | 0 | mI420Frame.reset(new (fallible) uint8_t[neededSize]); |
374 | 0 | } |
375 | 0 |
|
376 | 0 | if (!mI420Frame) { |
377 | 0 | VP8LOG(LogLevel::Warning, |
378 | 0 | "Allocating I420 frame of size %zu failed", |
379 | 0 | neededSize); |
380 | 0 | return NS_ERROR_FAILURE; |
381 | 0 | } |
382 | 0 | mI420FrameSize = neededSize; |
383 | 0 |
|
384 | 0 | uint8_t* yChannel = &mI420Frame[0]; |
385 | 0 | uint8_t* uChannel = &mI420Frame[yPlaneSize]; |
386 | 0 | uint8_t* vChannel = &mI420Frame[yPlaneSize + uvPlaneSize]; |
387 | 0 |
|
388 | 0 | nsresult rv = ConvertToI420( |
389 | 0 | img, |
390 | 0 | yChannel, |
391 | 0 | yStride, |
392 | 0 | uChannel, |
393 | 0 | uvStride, |
394 | 0 | vChannel, |
395 | 0 | uvStride); |
396 | 0 |
|
397 | 0 | if (NS_FAILED(rv)) { |
398 | 0 | VP8LOG(LogLevel::Error, "Converting to I420 failed"); |
399 | 0 | return rv; |
400 | 0 | } |
401 | 0 |
|
402 | 0 | mVPXImageWrapper->planes[VPX_PLANE_Y] = yChannel; |
403 | 0 | mVPXImageWrapper->stride[VPX_PLANE_Y] = yStride; |
404 | 0 | mVPXImageWrapper->planes[VPX_PLANE_U] = uChannel; |
405 | 0 | mVPXImageWrapper->stride[VPX_PLANE_U] = uvStride; |
406 | 0 | mVPXImageWrapper->planes[VPX_PLANE_V] = vChannel; |
407 | 0 | mVPXImageWrapper->stride[VPX_PLANE_V] = uvStride; |
408 | 0 |
|
409 | 0 | return NS_OK; |
410 | 0 | } |
411 | | |
412 | | // These two define value used in GetNextEncodeOperation to determine the |
413 | | // EncodeOperation for next target frame. |
414 | 0 | #define I_FRAME_RATIO (0.5) |
415 | 0 | #define SKIP_FRAME_RATIO (0.75) |
416 | | |
417 | | /** |
418 | | * Compares the elapsed time from the beginning of GetEncodedTrack and |
419 | | * the processed frame duration in mSourceSegment |
420 | | * in order to set the nextEncodeOperation for next target frame. |
421 | | */ |
422 | | VP8TrackEncoder::EncodeOperation |
423 | | VP8TrackEncoder::GetNextEncodeOperation(TimeDuration aTimeElapsed, |
424 | | StreamTime aProcessedDuration) |
425 | 0 | { |
426 | 0 | if (mFrameDroppingMode == FrameDroppingMode::DISALLOW) { |
427 | 0 | return ENCODE_NORMAL_FRAME; |
428 | 0 | } |
429 | 0 | |
430 | 0 | int64_t durationInUsec = |
431 | 0 | FramesToUsecs(aProcessedDuration, mTrackRate).value(); |
432 | 0 | if (aTimeElapsed.ToMicroseconds() > (durationInUsec * SKIP_FRAME_RATIO)) { |
433 | 0 | // The encoder is too slow. |
434 | 0 | // We should skip next frame to consume the mSourceSegment. |
435 | 0 | return SKIP_FRAME; |
436 | 0 | } else if (aTimeElapsed.ToMicroseconds() > (durationInUsec * I_FRAME_RATIO)) { |
437 | 0 | // The encoder is a little slow. |
438 | 0 | // We force the encoder to encode an I-frame to accelerate. |
439 | 0 | return ENCODE_I_FRAME; |
440 | 0 | } else { |
441 | 0 | return ENCODE_NORMAL_FRAME; |
442 | 0 | } |
443 | 0 | } |
444 | | |
445 | | /** |
446 | | * Encoding flow in GetEncodedTrack(): |
447 | | * 1: Check the mInitialized state and the packet duration. |
448 | | * 2: Move the data from mRawSegment to mSourceSegment. |
449 | | * 3: Encode the video chunks in mSourceSegment in a for-loop. |
450 | | * 3.1: The duration is taken straight from the video chunk's duration. |
451 | | * 3.2: Setup the video chunk with mVPXImageWrapper by PrepareRawFrame(). |
452 | | * 3.3: Pass frame to vp8 encoder by vpx_codec_encode(). |
453 | | * 3.4: Get the encoded frame from encoder by GetEncodedPartitions(). |
454 | | * 3.5: Set the nextEncodeOperation for the next target frame. |
455 | | * There is a heuristic: If the frame duration we have processed in |
456 | | * mSourceSegment is 100ms, means that we can't spend more than 100ms to |
457 | | * encode it. |
458 | | * 4. Remove the encoded chunks in mSourceSegment after for-loop. |
459 | | */ |
460 | | nsresult |
461 | | VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) |
462 | 0 | { |
463 | 0 | AUTO_PROFILER_LABEL("VP8TrackEncoder::GetEncodedTrack", OTHER); |
464 | 0 |
|
465 | 0 | MOZ_ASSERT(mInitialized || mCanceled); |
466 | 0 |
|
467 | 0 | if (mCanceled || mEncodingComplete) { |
468 | 0 | return NS_ERROR_FAILURE; |
469 | 0 | } |
470 | 0 | |
471 | 0 | if (!mInitialized) { |
472 | 0 | return NS_ERROR_FAILURE; |
473 | 0 | } |
474 | 0 | |
475 | 0 | TakeTrackData(mSourceSegment); |
476 | 0 |
|
477 | 0 | StreamTime totalProcessedDuration = 0; |
478 | 0 | TimeStamp timebase = TimeStamp::Now(); |
479 | 0 | EncodeOperation nextEncodeOperation = ENCODE_NORMAL_FRAME; |
480 | 0 |
|
481 | 0 | for (VideoSegment::ChunkIterator iter(mSourceSegment); |
482 | 0 | !iter.IsEnded(); iter.Next()) { |
483 | 0 | VideoChunk &chunk = *iter; |
484 | 0 | VP8LOG(LogLevel::Verbose, "nextEncodeOperation is %d for frame of duration %" PRId64, |
485 | 0 | nextEncodeOperation, chunk.GetDuration()); |
486 | 0 |
|
487 | 0 | // Encode frame. |
488 | 0 | if (nextEncodeOperation != SKIP_FRAME) { |
489 | 0 | nsresult rv = PrepareRawFrame(chunk); |
490 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); |
491 | 0 |
|
492 | 0 | // Encode the data with VP8 encoder |
493 | 0 | int flags = 0; |
494 | 0 | if (nextEncodeOperation == ENCODE_I_FRAME) { |
495 | 0 | VP8LOG(LogLevel::Warning, "MediaRecorder lagging behind. Encoding keyframe."); |
496 | 0 | flags |= VPX_EFLAG_FORCE_KF; |
497 | 0 | } |
498 | 0 |
|
499 | 0 | // Sum duration of non-key frames and force keyframe if exceeded the given keyframe interval |
500 | 0 | if (mKeyFrameInterval > 0) |
501 | 0 | { |
502 | 0 | if ((mDurationSinceLastKeyframe * 1000 / mTrackRate) >= mKeyFrameInterval) |
503 | 0 | { |
504 | 0 | mDurationSinceLastKeyframe = 0; |
505 | 0 | flags |= VPX_EFLAG_FORCE_KF; |
506 | 0 | } |
507 | 0 | mDurationSinceLastKeyframe += chunk.GetDuration(); |
508 | 0 | } |
509 | 0 |
|
510 | 0 | if (vpx_codec_encode(mVPXContext, mVPXImageWrapper, mEncodedTimestamp, |
511 | 0 | (unsigned long)chunk.GetDuration(), flags, |
512 | 0 | VPX_DL_REALTIME)) { |
513 | 0 | VP8LOG(LogLevel::Error, "vpx_codec_encode failed to encode the frame."); |
514 | 0 | return NS_ERROR_FAILURE; |
515 | 0 | } |
516 | 0 | // Get the encoded data from VP8 encoder. |
517 | 0 | rv = GetEncodedPartitions(aData); |
518 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE); |
519 | 0 | } else { |
520 | 0 | // SKIP_FRAME |
521 | 0 | // Extend the duration of the last encoded data in aData |
522 | 0 | // because this frame will be skipped. |
523 | 0 | VP8LOG(LogLevel::Warning, "MediaRecorder lagging behind. Skipping a frame."); |
524 | 0 | RefPtr<EncodedFrame> last = aData.GetEncodedFrames().LastElement(); |
525 | 0 | if (last) { |
526 | 0 | mExtractedDuration += chunk.mDuration; |
527 | 0 | if (!mExtractedDuration.isValid()) { |
528 | 0 | NS_ERROR("skipped duration overflow"); |
529 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
530 | 0 | } |
531 | 0 |
|
532 | 0 | CheckedInt64 totalDuration = FramesToUsecs(mExtractedDuration.value(), mTrackRate); |
533 | 0 | CheckedInt64 skippedDuration = totalDuration - mExtractedDurationUs; |
534 | 0 | mExtractedDurationUs = totalDuration; |
535 | 0 | if (!skippedDuration.isValid()) { |
536 | 0 | NS_ERROR("skipped duration overflow"); |
537 | 0 | return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; |
538 | 0 | } |
539 | 0 | last->SetDuration(last->GetDuration() + |
540 | 0 | (static_cast<uint64_t>(skippedDuration.value()))); |
541 | 0 | } |
542 | 0 | } |
543 | 0 |
|
544 | 0 | // Move forward the mEncodedTimestamp. |
545 | 0 | mEncodedTimestamp += chunk.GetDuration(); |
546 | 0 | totalProcessedDuration += chunk.GetDuration(); |
547 | 0 |
|
548 | 0 | // Check what to do next. |
549 | 0 | TimeDuration elapsedTime = TimeStamp::Now() - timebase; |
550 | 0 | nextEncodeOperation = GetNextEncodeOperation(elapsedTime, |
551 | 0 | totalProcessedDuration); |
552 | 0 | } |
553 | 0 |
|
554 | 0 | // Remove the chunks we have processed. |
555 | 0 | mSourceSegment.Clear(); |
556 | 0 |
|
557 | 0 | // End of stream, pull the rest frames in encoder. |
558 | 0 | if (mEndOfStream) { |
559 | 0 | VP8LOG(LogLevel::Debug, "mEndOfStream is true"); |
560 | 0 | mEncodingComplete = true; |
561 | 0 | // Bug 1243611, keep calling vpx_codec_encode and vpx_codec_get_cx_data |
562 | 0 | // until vpx_codec_get_cx_data return null. |
563 | 0 | do { |
564 | 0 | if (vpx_codec_encode(mVPXContext, nullptr, mEncodedTimestamp, |
565 | 0 | 0, 0, VPX_DL_REALTIME)) { |
566 | 0 | return NS_ERROR_FAILURE; |
567 | 0 | } |
568 | 0 | } while(NS_SUCCEEDED(GetEncodedPartitions(aData))); |
569 | 0 | } |
570 | 0 |
|
571 | 0 | return NS_OK ; |
572 | 0 | } |
573 | | |
574 | | } // namespace mozilla |