Skip to content

Commit 7ef8093

Browse files
Guido UrdanetaCommit Bot
Guido Urdaneta
authored and
Commit Bot
committed
Revert "NV12 support for VP8 simulcast"
This reverts commit 76d3e7a. Reason for revert: Causes multiple Chromium WPT tests to crash, preventing rolls. Sample failed run: https://ci.chromium.org/p/chromium/builders/try/win10_chromium_x64_rel_ng/685757? Sample stack trace: #0 0x7ff8623fbde9 base::debug::CollectStackTrace() STDERR: UCL-VR#1 0x7ff862311ca3 [2665012:17:1009/162250.249660:WARNING:timestamp_aligner.cc(131)] too short translated timestamp interval: system time (us) = 3042652370324, interval (us) = 834 STDERR: base::debug::StackTrace::StackTrace() STDERR: UCL-VR#2 0x7ff8623fb93b base::debug::(anonymous namespace)::StackDumpSignalHandler() STDERR: UCL-VR#3 0x7ff857a70140 [2665012:17:1009/162250.249947:WARNING:timestamp_aligner.cc(131)] too short translated timestamp interval: system time (us) = 3042652370634, interval (us) = 742 STDERR: (/lib/x86_64-linux-gnu/libpthread-2.31.so+0x1413f) STDERR: UCL-VR#4 0x7ff85778edb1 gsignal STDERR: UCL-VR#5 0x7ff857778537 abort STDERR: UCL-VR#6 0x7ff855d5eee2 [2665012:17:1009/162250.250342:WARNING:timestamp_aligner.cc(131)] too short translated timestamp interval: system time (us) = 3042652371030, interval (us) = 706 STDERR: [2665012:17:1009/162250.250514:WARNING:timestamp_aligner.cc(131)] too short translated timestamp interval: system time (us) = 3042652371204, interval (us) = 963 STDERR: rtc::webrtc_checks_impl::FatalLog() STDERR: UCL-VR#7 0x7ff855f14e62 webrtc::LibvpxVp8Encoder::PrepareRawImagesForEncoding() STDERR: UCL-VR#8 0x7ff855f14412 webrtc::LibvpxVp8Encoder::Encode() STDERR: UCL-VR#9 0x7ff855bae765 webrtc::SimulcastEncoderAdapter::Encode() STDERR: UCL-VR#10 0x7ff85607d598 webrtc::VideoStreamEncoder::EncodeVideoFrame() STDERR: UCL-VR#11 0x7ff85607c60d webrtc::VideoStreamEncoder::MaybeEncodeVideoFrame() STDERR: pixiv#12 0x7ff8560816f5 webrtc::webrtc_new_closure_impl::ClosureTask<>::Run() STDERR: pixiv#13 0x7ff855b352b5 (anonymous namespace)::WebrtcTaskQueue::RunTask() STDERR: pixiv#14 0x7ff855b3531e base::internal::Invoker<>::RunOnce() STDERR: pixiv#15 0x7ff86239785b base::TaskAnnotator::RunTask() STDERR: pixiv#16 0x7ff8623c8557 base::internal::TaskTracker::RunSkipOnShutdown() STDERR: pixiv#17 0x7ff8623c7d92 base::internal::TaskTracker::RunTask() STDERR: pixiv#18 0x7ff862415a06 base::internal::TaskTrackerPosix::RunTask() STDERR: pixiv#19 0x7ff8623c75e6 base::internal::TaskTracker::RunAndPopNextTask() STDERR: pixiv#20 0x7ff8623d3a8d base::internal::WorkerThread::RunWorker() STDERR: pixiv#21 0x7ff8623d368d base::internal::WorkerThread::RunPooledWorker() STDERR: pixiv#22 0x7ff862416509 base::(anonymous namespace)::ThreadFunc() STDERR: pixiv#23 0x7ff857a64ea7 start_thread Original change's description: > NV12 support for VP8 simulcast > > Tested using video_loopback with generated NV12 frames. > > Bug: webrtc:11635, webrtc:11975 > Change-Id: I14b2d663c55a83d80e48e226fcf706cb18903193 > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/186722 > Commit-Queue: Evan Shrubsole <[email protected]> > Reviewed-by: Ilya Nikolaevskiy <[email protected]> > Cr-Commit-Position: refs/heads/master@{#32325} [email protected],[email protected] # Not skipping CQ checks because original CL landed > 1 day ago. Bug: webrtc:11635 Bug: webrtc:11975 Change-Id: I61c8aed1270bc9c2f7f0577fa5ca717a325f548a Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/187484 Reviewed-by: Guido Urdaneta <[email protected]> Reviewed-by: Ilya Nikolaevskiy <[email protected]> Commit-Queue: Guido Urdaneta <[email protected]> Cr-Commit-Position: refs/heads/master@{#32369}
1 parent 6f04b65 commit 7ef8093

File tree

3 files changed

+42
-158
lines changed

3 files changed

+42
-158
lines changed

Diff for: modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc

+42-114
Original file line numberDiff line numberDiff line change
@@ -497,10 +497,6 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
497497
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
498498
}
499499

500-
// Use the previous pixel format to avoid extra image allocations.
501-
vpx_img_fmt_t pixel_format =
502-
raw_images_.empty() ? VPX_IMG_FMT_I420 : raw_images_[0].fmt;
503-
504500
int retVal = Release();
505501
if (retVal < 0) {
506502
return retVal;
@@ -654,8 +650,8 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
654650
// Creating a wrapper to the image - setting image data to NULL.
655651
// Actual pointer will be set in encode. Setting align to 1, as it
656652
// is meaningless (no memory allocation is done here).
657-
libvpx_->img_wrap(&raw_images_[0], pixel_format, inst->width, inst->height, 1,
658-
NULL);
653+
libvpx_->img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width,
654+
inst->height, 1, NULL);
659655

660656
// Note the order we use is different from webm, we have lowest resolution
661657
// at position 0 and they have highest resolution at position 0.
@@ -703,9 +699,10 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
703699
// Setting alignment to 32 - as that ensures at least 16 for all
704700
// planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for
705701
// the y plane, but only half of it to the u and v planes.
706-
libvpx_->img_alloc(
707-
&raw_images_[i], pixel_format, inst->simulcastStream[stream_idx].width,
708-
inst->simulcastStream[stream_idx].height, kVp832ByteAlign);
702+
libvpx_->img_alloc(&raw_images_[i], VPX_IMG_FMT_I420,
703+
inst->simulcastStream[stream_idx].width,
704+
inst->simulcastStream[stream_idx].height,
705+
kVp832ByteAlign);
709706
SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx);
710707
vpx_configs_[i].rc_target_bitrate = stream_bitrates[stream_idx];
711708
if (stream_bitrates[stream_idx] > 0) {
@@ -1017,12 +1014,26 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
10171014
flags[i] = send_key_frame ? VPX_EFLAG_FORCE_KF : EncodeFlags(tl_configs[i]);
10181015
}
10191016

1020-
rtc::scoped_refptr<VideoFrameBuffer> input_image = frame.video_frame_buffer();
1021-
if (input_image->type() != VideoFrameBuffer::Type::kI420 &&
1022-
input_image->type() != VideoFrameBuffer::Type::kNV12) {
1023-
input_image = input_image->ToI420();
1024-
}
1025-
PrepareRawImagesForEncoding(input_image);
1017+
rtc::scoped_refptr<I420BufferInterface> input_image =
1018+
frame.video_frame_buffer()->ToI420();
1019+
// Since we are extracting raw pointers from |input_image| to
1020+
// |raw_images_[0]|, the resolution of these frames must match.
1021+
RTC_DCHECK_EQ(input_image->width(), raw_images_[0].d_w);
1022+
RTC_DCHECK_EQ(input_image->height(), raw_images_[0].d_h);
1023+
1024+
// Image in vpx_image_t format.
1025+
// Input image is const. VP8's raw image is not defined as const.
1026+
raw_images_[0].planes[VPX_PLANE_Y] =
1027+
const_cast<uint8_t*>(input_image->DataY());
1028+
raw_images_[0].planes[VPX_PLANE_U] =
1029+
const_cast<uint8_t*>(input_image->DataU());
1030+
raw_images_[0].planes[VPX_PLANE_V] =
1031+
const_cast<uint8_t*>(input_image->DataV());
1032+
1033+
raw_images_[0].stride[VPX_PLANE_Y] = input_image->StrideY();
1034+
raw_images_[0].stride[VPX_PLANE_U] = input_image->StrideU();
1035+
raw_images_[0].stride[VPX_PLANE_V] = input_image->StrideV();
1036+
10261037
struct CleanUpOnExit {
10271038
explicit CleanUpOnExit(vpx_image_t& raw_image) : raw_image_(raw_image) {}
10281039
~CleanUpOnExit() {
@@ -1033,6 +1044,22 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
10331044
vpx_image_t& raw_image_;
10341045
} clean_up_on_exit(raw_images_[0]);
10351046

1047+
for (size_t i = 1; i < encoders_.size(); ++i) {
1048+
// Scale the image down a number of times by downsampling factor
1049+
libyuv::I420Scale(
1050+
raw_images_[i - 1].planes[VPX_PLANE_Y],
1051+
raw_images_[i - 1].stride[VPX_PLANE_Y],
1052+
raw_images_[i - 1].planes[VPX_PLANE_U],
1053+
raw_images_[i - 1].stride[VPX_PLANE_U],
1054+
raw_images_[i - 1].planes[VPX_PLANE_V],
1055+
raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
1056+
raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
1057+
raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
1058+
raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
1059+
raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
1060+
raw_images_[i].d_h, libyuv::kFilterBilinear);
1061+
}
1062+
10361063
if (send_key_frame) {
10371064
// Adapt the size of the key frame when in screenshare with 1 temporal
10381065
// layer.
@@ -1284,105 +1311,6 @@ int LibvpxVp8Encoder::RegisterEncodeCompleteCallback(
12841311
return WEBRTC_VIDEO_CODEC_OK;
12851312
}
12861313

1287-
void LibvpxVp8Encoder::PrepareRawImagesForEncoding(
1288-
const rtc::scoped_refptr<VideoFrameBuffer>& frame) {
1289-
// Since we are extracting raw pointers from |input_image| to
1290-
// |raw_images_[0]|, the resolution of these frames must match.
1291-
RTC_DCHECK_EQ(frame->width(), raw_images_[0].d_w);
1292-
RTC_DCHECK_EQ(frame->height(), raw_images_[0].d_h);
1293-
switch (frame->type()) {
1294-
case VideoFrameBuffer::Type::kI420:
1295-
return PrepareI420Image(frame->GetI420());
1296-
case VideoFrameBuffer::Type::kNV12:
1297-
return PrepareNV12Image(frame->GetNV12());
1298-
default:
1299-
RTC_NOTREACHED();
1300-
}
1301-
}
1302-
1303-
void LibvpxVp8Encoder::MaybeUpdatePixelFormat(vpx_img_fmt fmt) {
1304-
RTC_DCHECK(!raw_images_.empty());
1305-
if (raw_images_[0].fmt == fmt) {
1306-
RTC_DCHECK(std::all_of(
1307-
std::next(raw_images_.begin()), raw_images_.end(),
1308-
[fmt](const vpx_image_t& raw_img) { return raw_img.fmt == fmt; }))
1309-
<< "Not all raw images had the right format!";
1310-
return;
1311-
}
1312-
RTC_LOG(INFO) << "Updating vp8 encoder pixel format to "
1313-
<< (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420");
1314-
for (size_t i = 0; i < raw_images_.size(); ++i) {
1315-
vpx_image_t& img = raw_images_[i];
1316-
auto d_w = img.d_w;
1317-
auto d_h = img.d_h;
1318-
libvpx_->img_free(&img);
1319-
// First image is wrapping the input frame, the rest are allocated.
1320-
if (i == 0) {
1321-
libvpx_->img_wrap(&img, fmt, d_w, d_h, 1, NULL);
1322-
} else {
1323-
libvpx_->img_alloc(&img, fmt, d_w, d_h, kVp832ByteAlign);
1324-
}
1325-
}
1326-
}
1327-
1328-
void LibvpxVp8Encoder::PrepareI420Image(const I420BufferInterface* frame) {
1329-
RTC_DCHECK(!raw_images_.empty());
1330-
MaybeUpdatePixelFormat(VPX_IMG_FMT_I420);
1331-
// Image in vpx_image_t format.
1332-
// Input image is const. VP8's raw image is not defined as const.
1333-
raw_images_[0].planes[VPX_PLANE_Y] = const_cast<uint8_t*>(frame->DataY());
1334-
raw_images_[0].planes[VPX_PLANE_U] = const_cast<uint8_t*>(frame->DataU());
1335-
raw_images_[0].planes[VPX_PLANE_V] = const_cast<uint8_t*>(frame->DataV());
1336-
1337-
raw_images_[0].stride[VPX_PLANE_Y] = frame->StrideY();
1338-
raw_images_[0].stride[VPX_PLANE_U] = frame->StrideU();
1339-
raw_images_[0].stride[VPX_PLANE_V] = frame->StrideV();
1340-
1341-
for (size_t i = 1; i < encoders_.size(); ++i) {
1342-
// Scale the image down a number of times by downsampling factor
1343-
libyuv::I420Scale(
1344-
raw_images_[i - 1].planes[VPX_PLANE_Y],
1345-
raw_images_[i - 1].stride[VPX_PLANE_Y],
1346-
raw_images_[i - 1].planes[VPX_PLANE_U],
1347-
raw_images_[i - 1].stride[VPX_PLANE_U],
1348-
raw_images_[i - 1].planes[VPX_PLANE_V],
1349-
raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
1350-
raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
1351-
raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
1352-
raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
1353-
raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
1354-
raw_images_[i].d_h, libyuv::kFilterBilinear);
1355-
}
1356-
}
1357-
1358-
void LibvpxVp8Encoder::PrepareNV12Image(const NV12BufferInterface* frame) {
1359-
RTC_DCHECK(!raw_images_.empty());
1360-
MaybeUpdatePixelFormat(VPX_IMG_FMT_NV12);
1361-
// Image in vpx_image_t format.
1362-
// Input image is const. VP8's raw image is not defined as const.
1363-
raw_images_[0].planes[VPX_PLANE_Y] = const_cast<uint8_t*>(frame->DataY());
1364-
raw_images_[0].planes[VPX_PLANE_U] = const_cast<uint8_t*>(frame->DataUV());
1365-
raw_images_[0].planes[VPX_PLANE_V] = raw_images_[0].planes[VPX_PLANE_U] + 1;
1366-
raw_images_[0].stride[VPX_PLANE_Y] = frame->StrideY();
1367-
raw_images_[0].stride[VPX_PLANE_U] = frame->StrideUV();
1368-
raw_images_[0].stride[VPX_PLANE_V] = frame->StrideUV();
1369-
1370-
for (size_t i = 1; i < encoders_.size(); ++i) {
1371-
// Scale the image down a number of times by downsampling factor
1372-
libyuv::NV12Scale(
1373-
raw_images_[i - 1].planes[VPX_PLANE_Y],
1374-
raw_images_[i - 1].stride[VPX_PLANE_Y],
1375-
raw_images_[i - 1].planes[VPX_PLANE_U],
1376-
raw_images_[i - 1].stride[VPX_PLANE_U], raw_images_[i - 1].d_w,
1377-
raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
1378-
raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
1379-
raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].d_w,
1380-
raw_images_[i].d_h, libyuv::kFilterBilinear);
1381-
raw_images_[i].planes[VPX_PLANE_V] = raw_images_[i].planes[VPX_PLANE_U] + 1;
1382-
raw_images_[i].stride[VPX_PLANE_V] = raw_images_[i].stride[VPX_PLANE_U] + 1;
1383-
}
1384-
}
1385-
13861314
// static
13871315
LibvpxVp8Encoder::VariableFramerateExperiment
13881316
LibvpxVp8Encoder::ParseVariableFramerateConfig(std::string group_name) {

Diff for: modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h

-6
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,6 @@ class LibvpxVp8Encoder : public VideoEncoder {
9393

9494
bool UpdateVpxConfiguration(size_t stream_index);
9595

96-
void PrepareRawImagesForEncoding(
97-
const rtc::scoped_refptr<VideoFrameBuffer>& frame);
98-
void MaybeUpdatePixelFormat(vpx_img_fmt fmt);
99-
void PrepareI420Image(const I420BufferInterface* frame);
100-
void PrepareNV12Image(const NV12BufferInterface* frame);
101-
10296
const std::unique_ptr<LibvpxInterface> libvpx_;
10397

10498
const CpuSpeedExperiment experimental_cpu_speed_config_arm_;

Diff for: modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc

-38
Original file line numberDiff line numberDiff line change
@@ -266,44 +266,6 @@ TEST_F(TestVp8Impl, EncodeFrameAndRelease) {
266266
encoder_->Encode(NextInputFrame(), nullptr));
267267
}
268268

269-
TEST_F(TestVp8Impl, EncodeNv12FrameSimulcast) {
270-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
271-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
272-
encoder_->InitEncode(&codec_settings_, kSettings));
273-
274-
EncodedImage encoded_frame;
275-
CodecSpecificInfo codec_specific_info;
276-
input_frame_generator_ = test::CreateSquareFrameGenerator(
277-
kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
278-
absl::nullopt);
279-
EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
280-
281-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
282-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
283-
encoder_->Encode(NextInputFrame(), nullptr));
284-
}
285-
286-
TEST_F(TestVp8Impl, EncodeI420FrameAfterNv12Frame) {
287-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
288-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
289-
encoder_->InitEncode(&codec_settings_, kSettings));
290-
291-
EncodedImage encoded_frame;
292-
CodecSpecificInfo codec_specific_info;
293-
input_frame_generator_ = test::CreateSquareFrameGenerator(
294-
kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12,
295-
absl::nullopt);
296-
EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
297-
input_frame_generator_ = test::CreateSquareFrameGenerator(
298-
kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420,
299-
absl::nullopt);
300-
EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info);
301-
302-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
303-
EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED,
304-
encoder_->Encode(NextInputFrame(), nullptr));
305-
}
306-
307269
TEST_F(TestVp8Impl, InitDecode) {
308270
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
309271
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,

0 commit comments

Comments
 (0)