summaryrefslogtreecommitdiff
path: root/chromium/media/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/media/gpu')
-rw-r--r--chromium/media/gpu/BUILD.gn4
-rw-r--r--chromium/media/gpu/OWNERS1
-rw-r--r--chromium/media/gpu/android/codec_image.cc34
-rw-r--r--chromium/media/gpu/android/codec_image.h22
-rw-r--r--chromium/media/gpu/android/codec_image_unittest.cc8
-rw-r--r--chromium/media/gpu/android/frame_info_helper.cc229
-rw-r--r--chromium/media/gpu/android/frame_info_helper.h21
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.cc76
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.h37
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder_unittest.cc46
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.cc65
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.h16
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl_unittest.cc120
-rw-r--r--chromium/media/gpu/chromeos/BUILD.gn1
-rw-r--r--chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc28
-rw-r--r--chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h9
-rw-r--r--chromium/media/gpu/chromeos/fourcc.cc13
-rw-r--r--chromium/media/gpu/chromeos/fourcc.h4
-rw-r--r--chromium/media/gpu/chromeos/fourcc_unittests.cc10
-rw-r--r--chromium/media/gpu/chromeos/image_processor.cc7
-rw-r--r--chromium/media/gpu/chromeos/image_processor.h2
-rw-r--r--chromium/media/gpu/chromeos/image_processor_backend.cc2
-rw-r--r--chromium/media/gpu/chromeos/image_processor_backend.h5
-rw-r--r--chromium/media/gpu/chromeos/image_processor_factory.cc11
-rw-r--r--chromium/media/gpu/chromeos/image_processor_factory.h1
-rw-r--r--chromium/media/gpu/chromeos/image_processor_test.cc42
-rw-r--r--chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc82
-rw-r--r--chromium/media/gpu/chromeos/libyuv_image_processor_backend.h2
-rw-r--r--chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc11
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_pool.cc29
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_pool.h15
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc115
-rw-r--r--chromium/media/gpu/chromeos/platform_video_frame_utils.cc14
-rw-r--r--chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc7
-rw-r--r--chromium/media/gpu/chromeos/vd_video_decode_accelerator.h7
-rw-r--r--chromium/media/gpu/chromeos/video_decoder_pipeline.cc144
-rw-r--r--chromium/media/gpu/chromeos/video_decoder_pipeline.h57
-rw-r--r--chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc229
-rw-r--r--chromium/media/gpu/h264_decoder.cc8
-rw-r--r--chromium/media/gpu/h264_dpb.cc1
-rw-r--r--chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc4
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc4
-rw-r--r--chromium/media/gpu/ipc/service/picture_buffer_manager.cc11
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder.cc64
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder.h62
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc17
-rw-r--r--chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc8
-rw-r--r--chromium/media/gpu/test/BUILD.gn14
-rw-r--r--chromium/media/gpu/v4l2/BUILD.gn6
-rw-r--r--chromium/media/gpu/v4l2/v4l2_decode_surface.cc5
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.cc104
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.h28
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc18
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor_backend.h3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc4
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vda_helpers.cc7
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vda_helpers.h3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc21
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h10
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder.cc (renamed from chromium/media/gpu/v4l2/v4l2_slice_video_decoder.cc)171
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder.h (renamed from chromium/media/gpu/v4l2/v4l2_slice_video_decoder.h)39
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h16
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc608
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h151
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc6
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc15
-rw-r--r--chromium/media/gpu/vaapi/BUILD.gn15
-rw-r--r--chromium/media/gpu/vaapi/accelerated_video_encoder.cc17
-rw-r--r--chromium/media/gpu/vaapi/accelerated_video_encoder.h27
-rw-r--r--chromium/media/gpu/vaapi/test_utils.cc12
-rw-r--r--chromium/media/gpu/vaapi/va.sigs3
-rw-r--r--chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc18
-rw-r--r--chromium/media/gpu/vaapi/vaapi_image_processor_backend.h2
-rw-r--r--chromium/media/gpu/vaapi/vaapi_unittest.cc37
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h1
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decoder.cc43
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decoder.h4
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc134
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h15
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc299
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.cc313
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.h69
-rw-r--r--chromium/media/gpu/vaapi/vp9_encoder.cc151
-rw-r--r--chromium/media/gpu/vaapi/vp9_encoder.h13
-rw-r--r--chromium/media/gpu/vaapi/vp9_encoder_unittest.cc381
-rw-r--r--chromium/media/gpu/vaapi/vp9_rate_control.cc53
-rw-r--r--chromium/media/gpu/vaapi/vp9_rate_control.h38
-rw-r--r--chromium/media/gpu/video_encode_accelerator_perf_tests.cc32
-rw-r--r--chromium/media/gpu/video_encode_accelerator_tests.cc275
-rw-r--r--chromium/media/gpu/video_encode_accelerator_unittest.cc158
-rw-r--r--chromium/media/gpu/vp8_decoder.cc3
-rw-r--r--chromium/media/gpu/vp9_reference_frame_vector.cc22
-rw-r--r--chromium/media/gpu/vp9_reference_frame_vector.h5
-rw-r--r--chromium/media/gpu/windows/av1_guids.h52
-rw-r--r--chromium/media/gpu/windows/d3d11_decoder_configurator.cc5
-rw-r--r--chromium/media/gpu/windows/d3d11_decoder_configurator.h4
-rw-r--r--chromium/media/gpu/windows/d3d11_h264_accelerator.cc16
-rw-r--r--chromium/media/gpu/windows/d3d11_h264_accelerator.h3
-rw-r--r--chromium/media/gpu/windows/d3d11_picture_buffer.cc13
-rw-r--r--chromium/media/gpu/windows/d3d11_picture_buffer.h15
-rw-r--r--chromium/media/gpu/windows/d3d11_texture_selector.cc2
-rw-r--r--chromium/media/gpu/windows/d3d11_texture_wrapper.cc19
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.cc418
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.h21
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder_client.h10
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc2
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_accelerator.cc25
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_accelerator.h3
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_picture.cc3
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_picture.h4
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc140
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h18
-rw-r--r--chromium/media/gpu/windows/supported_profile_helpers.cc218
-rw-r--r--chromium/media/gpu/windows/supported_profile_helpers.h56
-rw-r--r--chromium/media/gpu/windows/supported_profile_helpers_unittest.cc261
116 files changed, 4558 insertions, 1827 deletions
diff --git a/chromium/media/gpu/BUILD.gn b/chromium/media/gpu/BUILD.gn
index 767a3316092..ec65dea1c67 100644
--- a/chromium/media/gpu/BUILD.gn
+++ b/chromium/media/gpu/BUILD.gn
@@ -30,6 +30,7 @@ component("gpu") {
"//chrome/gpu",
"//chromecast/*",
"//components/arc/mojom:media",
+ "//components/arc/mojom:media_mojolpm",
"//components/arc/video_accelerator",
"//components/mirroring/service:mirroring_service",
"//components/chromeos_camera/*",
@@ -166,6 +167,7 @@ component("gpu") {
if (is_win) {
sources += [
+ "windows/av1_guids.h",
"windows/d3d11_com_defs.h",
"windows/d3d11_copying_texture_wrapper.cc",
"windows/d3d11_copying_texture_wrapper.h",
@@ -559,7 +561,9 @@ if (use_v4l2_codec || use_vaapi) {
data = [ "//media/test/data/" ]
deps = [
":buildflags",
+ "test:frame_validator",
"test:helpers",
+ "test:test_helpers",
"test:video_encoder",
"test:video_encoder_test_environment",
"//media:test_support",
diff --git a/chromium/media/gpu/OWNERS b/chromium/media/gpu/OWNERS
index 2b325453c62..c805b55640c 100644
--- a/chromium/media/gpu/OWNERS
+++ b/chromium/media/gpu/OWNERS
@@ -5,6 +5,7 @@ sandersd@chromium.org
# For chromeos/, linux/, v4l2/, and vaapi/ -specific changes.
acourbot@chromium.org
+frkoenig@chromium.org
hiroh@chromium.org
jcliang@chromium.org
jkardatzke@chromium.org
diff --git a/chromium/media/gpu/android/codec_image.cc b/chromium/media/gpu/android/codec_image.cc
index ae3a90da5c9..1a0b7d93184 100644
--- a/chromium/media/gpu/android/codec_image.cc
+++ b/chromium/media/gpu/android/codec_image.cc
@@ -25,11 +25,11 @@ CodecImage::~CodecImage() {
void CodecImage::Initialize(
std::unique_ptr<CodecOutputBufferRenderer> output_buffer_renderer,
- scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator,
+ bool is_texture_owner_backed,
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb) {
DCHECK(output_buffer_renderer);
output_buffer_renderer_ = std::move(output_buffer_renderer);
- codec_buffer_wait_coordinator_ = std::move(codec_buffer_wait_coordinator);
+ is_texture_owner_backed_ = is_texture_owner_backed;
promotion_hint_cb_ = std::move(promotion_hint_cb);
}
@@ -42,7 +42,6 @@ void CodecImage::NotifyUnused() {
// our reference to the TextureOwner (if any). In other words, undo anything
// that we did in Initialize.
ReleaseCodecBuffer();
- codec_buffer_wait_coordinator_.reset();
promotion_hint_cb_ = base::NullCallback();
for (auto& cb : unused_cbs_)
@@ -65,7 +64,7 @@ unsigned CodecImage::GetDataType() {
CodecImage::BindOrCopy CodecImage::ShouldBindOrCopy() {
// If we're using an overlay, then pretend it's bound. That way, we'll get
// calls to ScheduleOverlayPlane. Otherwise, CopyTexImage needs to be called.
- return !codec_buffer_wait_coordinator_ ? BIND : COPY;
+ return is_texture_owner_backed_ ? COPY : BIND;
}
bool CodecImage::BindTexImage(unsigned target) {
@@ -82,16 +81,17 @@ bool CodecImage::CopyTexImage(unsigned target) {
if (target != GL_TEXTURE_EXTERNAL_OES)
return false;
+ if (!output_buffer_renderer_)
+ return true;
+
GLint bound_service_id = 0;
glGetIntegerv(GL_TEXTURE_BINDING_EXTERNAL_OES, &bound_service_id);
// The currently bound texture should be the texture owner's texture.
if (bound_service_id !=
static_cast<GLint>(
- codec_buffer_wait_coordinator_->texture_owner()->GetTextureId()))
+ output_buffer_renderer_->texture_owner()->GetTextureId()))
return false;
- if (!output_buffer_renderer_)
- return true;
output_buffer_renderer_->RenderToTextureOwnerFrontBuffer(
BindingsMode::kEnsureTexImageBound);
@@ -113,7 +113,7 @@ bool CodecImage::ScheduleOverlayPlane(
bool enable_blend,
std::unique_ptr<gfx::GpuFence> gpu_fence) {
TRACE_EVENT0("media", "CodecImage::ScheduleOverlayPlane");
- if (codec_buffer_wait_coordinator_) {
+ if (is_texture_owner_backed_) {
DVLOG(1) << "Invalid call to ScheduleOverlayPlane; this image is "
"TextureOwner backed.";
return false;
@@ -131,7 +131,7 @@ void CodecImage::NotifyOverlayPromotion(bool promotion,
if (!promotion_hint_cb_)
return;
- if (!codec_buffer_wait_coordinator_ && promotion) {
+ if (!is_texture_owner_backed_ && promotion) {
// When |CodecImage| is already backed by SurfaceView, and it should be used
// as overlay.
@@ -157,16 +157,6 @@ void CodecImage::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) {}
-void CodecImage::GetTextureMatrix(float matrix[16]) {
- static constexpr float kIdentity[16]{
- 1, 0, 0, 0, //
- 0, 1, 0, 0, //
- 0, 0, 1, 0, //
- 0, 0, 0, 1 //
- };
- memcpy(matrix, kIdentity, sizeof(kIdentity));
-}
-
void CodecImage::NotifyPromotionHint(bool promotion_hint,
int display_x,
int display_y,
@@ -174,7 +164,7 @@ void CodecImage::NotifyPromotionHint(bool promotion_hint,
int display_height) {
// TODO(crbug.com/1004859): Add back early skip due to suspecting affecting
// video smoothness.
- if (promotion_hint && !codec_buffer_wait_coordinator_)
+ if (promotion_hint && !is_texture_owner_backed_)
return;
NotifyOverlayPromotion(
@@ -241,11 +231,11 @@ CodecImage::GetAHardwareBuffer() {
// as free when viz is still using us for drawing. This can happen if the
// renderer crashes before receiving returns. It's hard to catch elsewhere,
// so just handle it gracefully here.
- if (!codec_buffer_wait_coordinator_)
+ if (!output_buffer_renderer_)
return nullptr;
RenderToTextureOwnerFrontBuffer(BindingsMode::kDontRestoreIfBound);
- return codec_buffer_wait_coordinator_->texture_owner()->GetAHardwareBuffer();
+ return output_buffer_renderer_->texture_owner()->GetAHardwareBuffer();
}
gfx::Rect CodecImage::GetCropRect() {
diff --git a/chromium/media/gpu/android/codec_image.h b/chromium/media/gpu/android/codec_image.h
index 8693118f918..c765e24dbfe 100644
--- a/chromium/media/gpu/android/codec_image.h
+++ b/chromium/media/gpu/android/codec_image.h
@@ -15,7 +15,6 @@
#include "base/memory/ref_counted_delete_on_sequence.h"
#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/stream_texture_shared_image_interface.h"
-#include "media/gpu/android/codec_buffer_wait_coordinator.h"
#include "media/gpu/android/codec_output_buffer_renderer.h"
#include "media/gpu/android/promotion_hint_aggregator.h"
#include "media/gpu/media_gpu_export.h"
@@ -54,7 +53,7 @@ class MEDIA_GPU_EXPORT CodecImage
// not in use.
void Initialize(
std::unique_ptr<CodecOutputBufferRenderer> output_buffer_renderer,
- scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator,
+ bool is_texture_owner_backed,
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb);
// Add a callback that will be called when we're marked as unused. Does not
@@ -91,7 +90,6 @@ class MEDIA_GPU_EXPORT CodecImage
GetAHardwareBuffer() override;
gfx::Rect GetCropRect() override;
// gpu::gles2::GLStreamTextureMatrix implementation
- void GetTextureMatrix(float xform[16]) override;
// Currently this API is implemented by the NotifyOverlayPromotion, since this
// API is expected to be removed.
void NotifyPromotionHint(bool promotion_hint,
@@ -129,18 +127,11 @@ class MEDIA_GPU_EXPORT CodecImage
// Whether this image is backed by a texture owner.
- // We want to check for texture_owner owned by
- // |codec_buffer_wait_coordinator_| and hence only checking for
- // |codec_buffer_wait_coordinator_| is enough here.
- // TODO(vikassoni): Update the method name in future refactorings.
- bool is_texture_owner_backed() const {
- return !!codec_buffer_wait_coordinator_;
- }
+ bool is_texture_owner_backed() const { return is_texture_owner_backed_; }
scoped_refptr<gpu::TextureOwner> texture_owner() const {
- return codec_buffer_wait_coordinator_
- ? codec_buffer_wait_coordinator_->texture_owner()
- : nullptr;
+ return output_buffer_renderer_ ? output_buffer_renderer_->texture_owner()
+ : nullptr;
}
// Renders this image to the front buffer of its backing surface.
@@ -180,9 +171,8 @@ class MEDIA_GPU_EXPORT CodecImage
// frame available event before calling UpdateTexImage().
bool RenderToTextureOwnerFrontBuffer(BindingsMode bindings_mode);
- // The CodecBufferWaitCoordinator that |output_buffer_| will be rendered to.
- // Or null, if this image is backed by an overlay.
- scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator_;
+ // Whether this image is texture_owner or overlay backed.
+ bool is_texture_owner_backed_ = false;
// The bounds last sent to the overlay.
gfx::Rect most_recent_bounds_;
diff --git a/chromium/media/gpu/android/codec_image_unittest.cc b/chromium/media/gpu/android/codec_image_unittest.cc
index 7fcd7eeba13..87496e335c9 100644
--- a/chromium/media/gpu/android/codec_image_unittest.cc
+++ b/chromium/media/gpu/android/codec_image_unittest.cc
@@ -94,7 +94,7 @@ class CodecImageTest : public testing::Test {
scoped_refptr<CodecImage> image = new CodecImage(buffer_renderer->size());
image->Initialize(
- std::move(buffer_renderer), codec_buffer_wait_coordinator,
+ std::move(buffer_renderer), kind == kTextureOwner,
base::BindRepeating(&PromotionHintReceiver::OnPromotionHint,
base::Unretained(&promotion_hint_receiver_)));
@@ -144,7 +144,7 @@ TEST_F(CodecImageTest, UnusedCBRunsOnNotifyUnused) {
base::MockCallback<CodecImage::UnusedCB> cb_2;
auto i = NewImage(kTextureOwner);
ASSERT_TRUE(i->get_codec_output_buffer_for_testing());
- ASSERT_TRUE(i->is_texture_owner_backed());
+ ASSERT_TRUE(i->HasTextureOwner());
i->AddUnusedCB(cb_1.Get());
i->AddUnusedCB(cb_2.Get());
EXPECT_CALL(cb_1, Run(i.get()));
@@ -153,7 +153,7 @@ TEST_F(CodecImageTest, UnusedCBRunsOnNotifyUnused) {
// Also verify that the output buffer and texture owner are released.
i->NotifyUnused();
EXPECT_FALSE(i->get_codec_output_buffer_for_testing());
- EXPECT_FALSE(i->is_texture_owner_backed());
+ EXPECT_FALSE(i->HasTextureOwner());
// Verify that an additional call doesn't crash. It should do nothing.
i->NotifyUnused();
@@ -391,7 +391,7 @@ TEST_F(CodecImageTest, CodedSizeVsVisibleSize) {
std::make_unique<CodecOutputBufferRenderer>(std::move(buffer), nullptr);
scoped_refptr<CodecImage> image = new CodecImage(coded_size);
- image->Initialize(std::move(buffer_renderer), nullptr,
+ image->Initialize(std::move(buffer_renderer), false,
PromotionHintAggregator::NotifyPromotionHintCB());
// Verify that CodecImage::GetSize returns coded_size and not visible_size
diff --git a/chromium/media/gpu/android/frame_info_helper.cc b/chromium/media/gpu/android/frame_info_helper.cc
index efe1873cb1c..b3cf5299aa7 100644
--- a/chromium/media/gpu/android/frame_info_helper.cc
+++ b/chromium/media/gpu/android/frame_info_helper.cc
@@ -4,10 +4,13 @@
#include "media/gpu/android/frame_info_helper.h"
+#include "base/threading/sequence_bound.h"
#include "gpu/command_buffer/service/shared_image_video.h"
#include "gpu/ipc/service/command_buffer_stub.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/gpu/android/codec_output_buffer_renderer.h"
namespace media {
@@ -21,100 +24,188 @@ FrameInfoHelper::FrameInfo& FrameInfoHelper::FrameInfo::operator=(
// Concrete implementation of FrameInfoHelper that renders output buffers and
// gets the FrameInfo they need.
-class FrameInfoHelperImpl : public FrameInfoHelper,
- public gpu::CommandBufferStub::DestructionObserver {
+class FrameInfoHelperImpl : public FrameInfoHelper {
public:
- FrameInfoHelperImpl(SharedImageVideoProvider::GetStubCB get_stub_cb) {
- stub_ = get_stub_cb.Run();
- if (stub_)
- stub_->AddDestructionObserver(this);
+ FrameInfoHelperImpl(scoped_refptr<base::SequencedTaskRunner> gpu_task_runner,
+ SharedImageVideoProvider::GetStubCB get_stub_cb) {
+ on_gpu_ = base::SequenceBound<OnGpu>(std::move(gpu_task_runner),
+ std::move(get_stub_cb));
}
- ~FrameInfoHelperImpl() override {
- if (stub_)
- stub_->RemoveDestructionObserver(this);
+ ~FrameInfoHelperImpl() override = default;
+
+ void GetFrameInfo(std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
+ FrameInfoReadyCB callback) override {
+ Request request = {.buffer_renderer = std::move(buffer_renderer),
+ .callback = std::move(callback)};
+ requests_.push(std::move(request));
+ // If there were no pending requests start processing queue now.
+ if (requests_.size() == 1)
+ ProcessRequestsQueue();
}
- void GetFrameInfo(
- std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
- base::OnceCallback<
- void(std::unique_ptr<CodecOutputBufferRenderer>, FrameInfo, bool)> cb)
- override {
- if (!buffer_renderer) {
- std::move(cb).Run(nullptr, FrameInfo(), false);
- return;
+ private:
+ struct Request {
+ std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer;
+ FrameInfoReadyCB callback;
+ };
+
+ class OnGpu : public gpu::CommandBufferStub::DestructionObserver {
+ public:
+ OnGpu(SharedImageVideoProvider::GetStubCB get_stub_cb) {
+ stub_ = get_stub_cb.Run();
+ if (stub_)
+ stub_->AddDestructionObserver(this);
}
- auto texture_owner = buffer_renderer->texture_owner();
+ ~OnGpu() override {
+ if (stub_)
+ stub_->RemoveDestructionObserver(this);
+ }
- FrameInfo info;
+ void OnWillDestroyStub(bool have_context) override {
+ DCHECK(stub_);
+ stub_ = nullptr;
+ }
- // Indicates that the FrameInfo is reliable and can be cached by caller.
- // It's true if we either return cached values or we attempted to render
- // frame and succeeded.
- bool success = true;
-
- // We default to visible size if if we can't get real size
- info.coded_size = buffer_renderer->size();
- info.visible_rect = gfx::Rect(info.coded_size);
-
- if (texture_owner) {
- if (visible_size_ == buffer_renderer->size()) {
- info = frame_info_;
- } else if (buffer_renderer->RenderToTextureOwnerFrontBuffer(
- CodecOutputBufferRenderer::BindingsMode::
- kDontRestoreIfBound)) {
- visible_size_ = buffer_renderer->size();
- texture_owner->GetCodedSizeAndVisibleRect(
- visible_size_, &frame_info_.coded_size, &frame_info_.visible_rect);
-
- frame_info_.ycbcr_info = GetYCbCrInfo(texture_owner.get());
- info = frame_info_;
- } else {
- // We attempted to render frame and failed, mark request as failed so
- // caller won't cache best-guess values.
- success = false;
+ void GetFrameInfo(
+ std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
+ base::OnceCallback<void(std::unique_ptr<CodecOutputBufferRenderer>,
+ base::Optional<FrameInfo>)> cb) {
+ DCHECK(buffer_renderer);
+
+ auto texture_owner = buffer_renderer->texture_owner();
+ DCHECK(texture_owner);
+
+ base::Optional<FrameInfo> info;
+
+ if (buffer_renderer->RenderToTextureOwnerFrontBuffer(
+ CodecOutputBufferRenderer::BindingsMode::kDontRestoreIfBound)) {
+ gfx::Size coded_size;
+ gfx::Rect visible_rect;
+ if (texture_owner->GetCodedSizeAndVisibleRect(
+ buffer_renderer->size(), &coded_size, &visible_rect)) {
+ info.emplace();
+ info->coded_size = coded_size;
+ info->visible_rect = visible_rect;
+ info->ycbcr_info = GetYCbCrInfo(texture_owner.get());
+ }
}
+
+ std::move(cb).Run(std::move(buffer_renderer), info);
+ }
+
+ private:
+ // Gets YCbCrInfo from last rendered frame.
+ base::Optional<gpu::VulkanYCbCrInfo> GetYCbCrInfo(
+ gpu::TextureOwner* texture_owner) {
+ gpu::ContextResult result;
+
+ if (!stub_)
+ return base::nullopt;
+
+ auto shared_context =
+ stub_->channel()->gpu_channel_manager()->GetSharedContextState(
+ &result);
+ auto context_provider =
+ (result == gpu::ContextResult::kSuccess) ? shared_context : nullptr;
+ if (!context_provider)
+ return base::nullopt;
+
+ return gpu::SharedImageVideo::GetYcbcrInfo(texture_owner,
+ context_provider);
}
- std::move(cb).Run(std::move(buffer_renderer), frame_info_, success);
+ gpu::CommandBufferStub* stub_ = nullptr;
+ };
+
+ FrameInfo GetFrameInfoWithVisibleSize(const gfx::Size& visible_size) {
+ FrameInfo info;
+ info.coded_size = visible_size;
+ info.visible_rect = gfx::Rect(visible_size);
+ return info;
}
- void OnWillDestroyStub(bool have_context) override {
- DCHECK(stub_);
- stub_ = nullptr;
+ void OnFrameInfoReady(
+ std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
+ base::Optional<FrameInfo> frame_info) {
+ DCHECK(buffer_renderer);
+ DCHECK(!requests_.empty());
+
+ auto& request = requests_.front();
+
+ if (frame_info) {
+ visible_size_ = buffer_renderer->size();
+ frame_info_ = *frame_info;
+ std::move(request.callback).Run(std::move(buffer_renderer), frame_info_);
+ } else {
+ // It's possible that we will fail to render frame and so weren't able to
+ // obtain FrameInfo. In this case we don't cache new values and complete
+ // current request with visible size, we will attempt to render next frame
+ // with next request.
+ auto info = GetFrameInfoWithVisibleSize(buffer_renderer->size());
+ std::move(request.callback)
+ .Run(std::move(buffer_renderer), std::move(info));
+ }
+ requests_.pop();
+ ProcessRequestsQueue();
}
- private:
- // Gets YCbCrInfo from last rendered frame.
- base::Optional<gpu::VulkanYCbCrInfo> GetYCbCrInfo(
- gpu::TextureOwner* texture_owner) {
- gpu::ContextResult result;
- if (!stub_)
- return base::nullopt;
-
- auto shared_context =
- stub_->channel()->gpu_channel_manager()->GetSharedContextState(&result);
- auto context_provider =
- (result == gpu::ContextResult::kSuccess) ? shared_context : nullptr;
- if (!context_provider)
- return base::nullopt;
-
- return gpu::SharedImageVideo::GetYcbcrInfo(texture_owner, context_provider);
+ void ProcessRequestsQueue() {
+ while (!requests_.empty()) {
+ auto& request = requests_.front();
+
+ if (!request.buffer_renderer) {
+ // If we don't have buffer_renderer we can Run callback immediately.
+ std::move(request.callback).Run(nullptr, FrameInfo());
+ } else if (!request.buffer_renderer->texture_owner()) {
+ // If there is no texture_owner (SurfaceView case), we can't render
+ // frame and get proper size. But as Display Compositor won't render
+ // this frame the actual size is not important, assume coded_size =
+ // visible_size.
+ auto info =
+ GetFrameInfoWithVisibleSize(request.buffer_renderer->size());
+ std::move(request.callback)
+ .Run(std::move(request.buffer_renderer), std::move(info));
+ } else if (visible_size_ == request.buffer_renderer->size()) {
+ // We have cached the results of last frame info request with the same
+ // size. We assume that coded_size doesn't change if the visible_size
+ // stays the same.
+ std::move(request.callback)
+ .Run(std::move(request.buffer_renderer), frame_info_);
+ } else {
+ // We have texture_owner and we don't have cached value, so we need to
+ // hop to GPU thread and render the frame to get proper size.
+ auto cb = BindToCurrentLoop(
+ base::BindOnce(&FrameInfoHelperImpl::OnFrameInfoReady,
+ weak_factory_.GetWeakPtr()));
+
+ on_gpu_.Post(FROM_HERE, &OnGpu::GetFrameInfo,
+ std::move(request.buffer_renderer), std::move(cb));
+ // We didn't complete this request quite yet, so we can't process queue
+ // any further.
+ break;
+ }
+ requests_.pop();
+ }
}
- gpu::CommandBufferStub* stub_ = nullptr;
+ base::SequenceBound<OnGpu> on_gpu_;
+ std::queue<Request> requests_;
+ // Cached values.
FrameInfo frame_info_;
gfx::Size visible_size_;
+
+ base::WeakPtrFactory<FrameInfoHelperImpl> weak_factory_{this};
};
// static
-base::SequenceBound<FrameInfoHelper> FrameInfoHelper::Create(
+std::unique_ptr<FrameInfoHelper> FrameInfoHelper::Create(
scoped_refptr<base::SequencedTaskRunner> gpu_task_runner,
SharedImageVideoProvider::GetStubCB get_stub_cb) {
- return base::SequenceBound<FrameInfoHelperImpl>(std::move(gpu_task_runner),
- std::move(get_stub_cb));
+ return std::make_unique<FrameInfoHelperImpl>(std::move(gpu_task_runner),
+ std::move(get_stub_cb));
}
-} // namespace media
+} // namespace media \ No newline at end of file
diff --git a/chromium/media/gpu/android/frame_info_helper.h b/chromium/media/gpu/android/frame_info_helper.h
index 5fc4ffca328..1f60bceb094 100644
--- a/chromium/media/gpu/android/frame_info_helper.h
+++ b/chromium/media/gpu/android/frame_info_helper.h
@@ -6,12 +6,11 @@
#define MEDIA_GPU_ANDROID_FRAME_INFO_HELPER_H_
#include "base/optional.h"
-#include "base/threading/sequence_bound.h"
-#include "media/gpu/android/codec_image.h"
#include "media/gpu/android/shared_image_video_provider.h"
#include "media/gpu/media_gpu_export.h"
namespace media {
+class CodecOutputBufferRenderer;
// Helper class to fetch YCbCrInfo for Vulkan from a CodecImage.
class MEDIA_GPU_EXPORT FrameInfoHelper {
@@ -29,7 +28,11 @@ class MEDIA_GPU_EXPORT FrameInfoHelper {
base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info;
};
- static base::SequenceBound<FrameInfoHelper> Create(
+ using FrameInfoReadyCB =
+ base::OnceCallback<void(std::unique_ptr<CodecOutputBufferRenderer>,
+ FrameInfo)>;
+
+ static std::unique_ptr<FrameInfoHelper> Create(
scoped_refptr<base::SequencedTaskRunner> gpu_task_runner,
SharedImageVideoProvider::GetStubCB get_stub_cb);
@@ -40,9 +43,11 @@ class MEDIA_GPU_EXPORT FrameInfoHelper {
// attempt to get YCbCrInfo and cache it. If all necessary info is cached the
// call will leave buffer_renderer intact and it can be rendered later.
// Rendering can fail for reasons. This function will make best efforts to
- // fill FrameInfo which can be used to create VideoFrame, but shouldn't be
- // cached by caller. Last parameter in |cb| is bool that indicates that info
- // is reliable.
+ // fill FrameInfo which can be used to create VideoFrame.
+ //
+ // Callbacks will be executed and on callers sequence and guaranteed to be
+ // called in order of GetFrameInfo calls. Callback can be called before this
+ // function returns if all necessary info is available right away.
//
// While this API might seem to be out of its Vulkan mind, it's this
// complicated to (a) prevent rendering frames out of order to the front
@@ -50,9 +55,7 @@ class MEDIA_GPU_EXPORT FrameInfoHelper {
// can't get a YCbCrInfo from a CodecImage due to timeouts.
virtual void GetFrameInfo(
std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
- base::OnceCallback<void(std::unique_ptr<CodecOutputBufferRenderer>,
- FrameInfo,
- bool)> cb) = 0;
+ FrameInfoReadyCB callback) = 0;
protected:
FrameInfoHelper() = default;
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.cc b/chromium/media/gpu/android/media_codec_video_decoder.cc
index 6e127512fc2..848983075f6 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder.cc
@@ -19,6 +19,7 @@
#include "base/trace_event/trace_event.h"
#include "media/base/android/media_codec_bridge_impl.h"
#include "media/base/android/media_codec_util.h"
+#include "media/base/async_destroy_video_decoder.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
@@ -227,7 +228,9 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
overlay_factory_cb_(std::move(overlay_factory_cb)),
device_info_(device_info),
enable_threaded_texture_mailboxes_(
- gpu_preferences.enable_threaded_texture_mailboxes) {
+ gpu_preferences.enable_threaded_texture_mailboxes),
+ allow_nonsecure_overlays_(
+ base::FeatureList::IsEnabled(media::kAllowNonSecureOverlays)) {
DVLOG(2) << __func__;
surface_chooser_helper_.chooser()->SetClientCallbacks(
base::Bind(&MediaCodecVideoDecoder::OnSurfaceChosen,
@@ -236,44 +239,69 @@ MediaCodecVideoDecoder::MediaCodecVideoDecoder(
weak_factory_.GetWeakPtr(), nullptr));
}
+std::unique_ptr<VideoDecoder> MediaCodecVideoDecoder::Create(
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuFeatureInfo& gpu_feature_info,
+ std::unique_ptr<MediaLog> media_log,
+ DeviceInfo* device_info,
+ CodecAllocator* codec_allocator,
+ std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser,
+ AndroidOverlayMojoFactoryCB overlay_factory_cb,
+ RequestOverlayInfoCB request_overlay_info_cb,
+ std::unique_ptr<VideoFrameFactory> video_frame_factory) {
+ auto* decoder = new MediaCodecVideoDecoder(
+ gpu_preferences, gpu_feature_info, std::move(media_log), device_info,
+ codec_allocator, std::move(surface_chooser),
+ std::move(overlay_factory_cb), std::move(request_overlay_info_cb),
+ std::move(video_frame_factory));
+ return std::make_unique<AsyncDestroyVideoDecoder<MediaCodecVideoDecoder>>(
+ base::WrapUnique(decoder));
+}
+
MediaCodecVideoDecoder::~MediaCodecVideoDecoder() {
DVLOG(2) << __func__;
TRACE_EVENT0("media", "MediaCodecVideoDecoder::~MediaCodecVideoDecoder");
ReleaseCodec();
}
-void MediaCodecVideoDecoder::Destroy() {
+void MediaCodecVideoDecoder::DestroyAsync(
+ std::unique_ptr<MediaCodecVideoDecoder> decoder) {
DVLOG(1) << __func__;
TRACE_EVENT0("media", "MediaCodecVideoDecoder::Destroy");
+ DCHECK(decoder);
+
+ // This will be destroyed by a call to |DeleteSoon|
+ // in |OnCodecDrained|.
+ auto* self = decoder.release();
// Cancel pending callbacks.
//
// WARNING: This will lose the callback we've given to MediaCodecBridge for
// asynchronous notifications; so we must not leave this function with any
// work necessary from StartTimerOrPumpCodec().
- weak_factory_.InvalidateWeakPtrs();
+ self->weak_factory_.InvalidateWeakPtrs();
- if (media_crypto_context_) {
+ if (self->media_crypto_context_) {
// Cancel previously registered callback (if any).
- media_crypto_context_->SetMediaCryptoReadyCB(base::NullCallback());
- if (cdm_registration_id_)
- media_crypto_context_->UnregisterPlayer(cdm_registration_id_);
- media_crypto_context_ = nullptr;
- cdm_registration_id_ = 0;
+ self->media_crypto_context_->SetMediaCryptoReadyCB(base::NullCallback());
+ if (self->cdm_registration_id_)
+ self->media_crypto_context_->UnregisterPlayer(self->cdm_registration_id_);
+ self->media_crypto_context_ = nullptr;
+ self->cdm_registration_id_ = 0;
}
// Mojo callbacks require that they're run before destruction.
- if (reset_cb_)
- std::move(reset_cb_).Run();
+ if (self->reset_cb_)
+ std::move(self->reset_cb_).Run();
// Cancel callbacks we no longer want.
- codec_allocator_weak_factory_.InvalidateWeakPtrs();
- CancelPendingDecodes(DecodeStatus::ABORTED);
- StartDrainingCodec(DrainType::kForDestroy);
+ self->codec_allocator_weak_factory_.InvalidateWeakPtrs();
+ self->CancelPendingDecodes(DecodeStatus::ABORTED);
+ self->StartDrainingCodec(DrainType::kForDestroy);
// Per the WARNING above. Validate that no draining work remains.
- if (using_async_api_)
- DCHECK(!drain_type_.has_value());
+ if (self->using_async_api_)
+ DCHECK(!self->drain_type_.has_value());
}
void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
@@ -471,6 +499,12 @@ void MediaCodecVideoDecoder::StartLazyInit() {
overlay_mode = VideoFrameFactory::OverlayMode::kRequestPromotionHints;
}
+ // Regardless of whether we're using SurfaceControl or Dialog overlays, don't
+ // allow any overlays in A/B power testing mode, unless this requires a
+ // secure surface. Don't fail the playback for power testing.
+ if (!requires_secure_codec_ && !allow_nonsecure_overlays_)
+ overlay_mode = VideoFrameFactory::OverlayMode::kDontRequestPromotionHints;
+
video_frame_factory_->Initialize(
overlay_mode,
base::Bind(&MediaCodecVideoDecoder::OnVideoFrameFactoryInitialized,
@@ -488,10 +522,16 @@ void MediaCodecVideoDecoder::OnVideoFrameFactoryInitialized(
}
texture_owner_bundle_ = new CodecSurfaceBundle(std::move(texture_owner));
+ // This is for A/B power testing only. Turn off Dialog-based overlays in
+ // power testing mode, unless we need them for L1 content.
+ // See https://crbug.com/1081346 .
+ const bool allowed_for_experiment =
+ requires_secure_codec_ || allow_nonsecure_overlays_;
+
// Overlays are disabled when |enable_threaded_texture_mailboxes| is true
// (http://crbug.com/582170).
if (enable_threaded_texture_mailboxes_ ||
- !device_info_->SupportsOverlaySurfaces()) {
+ !device_info_->SupportsOverlaySurfaces() || !allowed_for_experiment) {
OnSurfaceChosen(nullptr);
return;
}
@@ -975,7 +1015,7 @@ void MediaCodecVideoDecoder::ForwardVideoFrame(
if (reset_generation == reset_generation_) {
// TODO(liberato): We might actually have a SW decoder. Consider setting
// this to false if so, especially for higher bitrates.
- frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
+ frame->metadata()->power_efficient = true;
output_cb_.Run(std::move(frame));
}
}
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.h b/chromium/media/gpu/android/media_codec_video_decoder.h
index 59055e4d359..7e87139ae32 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.h
+++ b/chromium/media/gpu/android/media_codec_video_decoder.h
@@ -58,11 +58,14 @@ struct PendingDecode {
// playbacks that need them.
// TODO: Lazy initialization should be handled at a higher layer of the media
// stack for both simplicity and cross platform support.
-class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
+class MEDIA_GPU_EXPORT MediaCodecVideoDecoder final : public VideoDecoder {
public:
static std::vector<SupportedVideoDecoderConfig> GetSupportedConfigs();
- MediaCodecVideoDecoder(
+ ~MediaCodecVideoDecoder() override;
+ static void DestroyAsync(std::unique_ptr<MediaCodecVideoDecoder>);
+
+ static std::unique_ptr<VideoDecoder> Create(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuFeatureInfo& gpu_feature_info,
std::unique_ptr<MediaLog> media_log,
@@ -87,9 +90,20 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
- protected:
- // Protected for testing.
- ~MediaCodecVideoDecoder() override;
+ private:
+ // The test has access for PumpCodec() and the constructor.
+ friend class MediaCodecVideoDecoderTest;
+
+ MediaCodecVideoDecoder(
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuFeatureInfo& gpu_feature_info,
+ std::unique_ptr<MediaLog> media_log,
+ DeviceInfo* device_info,
+ CodecAllocator* codec_allocator,
+ std::unique_ptr<AndroidVideoSurfaceChooser> surface_chooser,
+ AndroidOverlayMojoFactoryCB overlay_factory_cb,
+ RequestOverlayInfoCB request_overlay_info_cb,
+ std::unique_ptr<VideoFrameFactory> video_frame_factory);
// Set up |cdm_context| as part of initialization. Guarantees that |init_cb|
// will be called depending on the outcome, though not necessarily before this
@@ -102,11 +116,6 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
JavaObjectPtr media_crypto,
bool requires_secure_video_codec);
- private:
- // The test has access for PumpCodec().
- friend class MediaCodecVideoDecoderTest;
- friend class base::DeleteHelper<MediaCodecVideoDecoder>;
-
enum class State {
// Initializing resources required to create a codec.
kInitializing,
@@ -124,9 +133,6 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
enum class DrainType { kForReset, kForDestroy };
- // Starts teardown.
- void Destroy() override;
-
// Finishes initialization.
void StartLazyInit();
void OnVideoFrameFactoryInitialized(
@@ -327,6 +333,11 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder {
// Optional crypto object from the Cdm.
base::android::ScopedJavaGlobalRef<jobject> media_crypto_;
+ // For A/B power testing, this causes all non-L1 content to avoid overlays.
+ // This is only for A/B power testing, and can be removed after that.
+ // See https://crbug.com/1081346 .
+ bool allow_nonsecure_overlays_ = true;
+
base::WeakPtrFactory<MediaCodecVideoDecoder> weak_factory_{this};
base::WeakPtrFactory<MediaCodecVideoDecoder> codec_allocator_weak_factory_{
this};
diff --git a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
index 61d4c13f3ed..16f3a5f4871 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
@@ -17,6 +17,7 @@
#include "media/base/android/media_codec_util.h"
#include "media/base/android/mock_android_overlay.h"
#include "media/base/android/mock_media_crypto_context.h"
+#include "media/base/async_destroy_video_decoder.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_util.h"
#include "media/base/test_helpers.h"
@@ -52,12 +53,6 @@ std::unique_ptr<AndroidOverlay> CreateAndroidOverlayCb(
return nullptr;
}
-// Make MCVD's destruction observable for teardown tests.
-struct DestructionObservableMCVD : public DestructionObservable,
- public MediaCodecVideoDecoder {
- using MediaCodecVideoDecoder::MediaCodecVideoDecoder;
-};
-
} // namespace
class MockVideoFrameFactory : public VideoFrameFactory {
@@ -149,18 +144,16 @@ class MediaCodecVideoDecoderTest : public testing::TestWithParam<VideoCodec> {
ON_CALL(*video_frame_factory_, Initialize(ExpectedOverlayMode(), _))
.WillByDefault(RunCallback<1>(texture_owner));
- auto* observable_mcvd = new DestructionObservableMCVD(
+ auto* mcvd = new MediaCodecVideoDecoder(
gpu_preferences_, gpu_feature_info_, std::make_unique<NullMediaLog>(),
device_info_.get(), codec_allocator_.get(), std::move(surface_chooser),
base::BindRepeating(&CreateAndroidOverlayCb),
base::BindRepeating(&MediaCodecVideoDecoderTest::RequestOverlayInfoCb,
base::Unretained(this)),
std::move(video_frame_factory));
- mcvd_.reset(observable_mcvd);
- mcvd_raw_ = observable_mcvd;
- destruction_observer_ = observable_mcvd->CreateDestructionObserver();
- // Ensure MCVD doesn't leak by default.
- destruction_observer_->ExpectDestruction();
+ mcvd_ = std::make_unique<AsyncDestroyVideoDecoder<MediaCodecVideoDecoder>>(
+ base::WrapUnique(mcvd));
+ mcvd_raw_ = mcvd;
}
VideoFrameFactory::OverlayMode ExpectedOverlayMode() const {
@@ -291,7 +284,6 @@ class MediaCodecVideoDecoderTest : public testing::TestWithParam<VideoCodec> {
gpu::MockTextureOwner* texture_owner_;
MockVideoFrameFactory* video_frame_factory_;
NiceMock<base::MockCallback<VideoDecoder::DecodeCB>> decode_cb_;
- std::unique_ptr<DestructionObserver> destruction_observer_;
ProvideOverlayInfoCB provide_overlay_info_cb_;
bool restart_for_transitions_;
gpu::GpuPreferences gpu_preferences_;
@@ -308,7 +300,7 @@ class MediaCodecVideoDecoderTest : public testing::TestWithParam<VideoCodec> {
// |mcvd_raw_| lets us call PumpCodec() even after |mcvd_| is dropped, for
// testing the teardown path.
MediaCodecVideoDecoder* mcvd_raw_;
- std::unique_ptr<MediaCodecVideoDecoder> mcvd_;
+ std::unique_ptr<VideoDecoder> mcvd_;
};
// Tests which only work for a single codec.
@@ -687,9 +679,6 @@ TEST_P(MediaCodecVideoDecoderVp8Test, UnregisterPlayerBeforeAsyncDestruction) {
// before the decoder is actually destructed, asynchronously.
EXPECT_CALL(*cdm_, UnregisterPlayer(MockMediaCryptoContext::kRegistrationId));
mcvd_.reset();
-
- // Make sure the decoder has not been destroyed yet.
- destruction_observer_->DoNotAllowDestruction();
}
// A reference test for UnregisterPlayerBeforeAsyncDestruction.
@@ -704,9 +693,6 @@ TEST_P(MediaCodecVideoDecoderVp8Test, UnregisterPlayerBeforeSyncDestruction) {
// When |mcvd_| is reset, expect that it will unregister itself immediately.
EXPECT_CALL(*cdm_, UnregisterPlayer(MockMediaCryptoContext::kRegistrationId));
mcvd_.reset();
-
- // Make sure the decoder is now destroyed.
- destruction_observer_->ExpectDestruction();
}
TEST_P(MediaCodecVideoDecoderVp8Test, ResetDoesNotDrainVp8WithAsyncApi) {
@@ -818,18 +804,9 @@ TEST_P(MediaCodecVideoDecoderTest, EosDecodeCbIsRunAfterEosIsDequeued) {
std::move(video_frame_factory_->last_closure_).Run();
}
-TEST_P(MediaCodecVideoDecoderTest, TeardownBeforeInitWorks) {
- // Since we assert that MCVD is destructed by default, this test verifies that
- // MCVD is destructed safely before Initialize().
-}
-
TEST_P(MediaCodecVideoDecoderTest, TeardownInvalidatesCodecCreationWeakPtr) {
InitializeWithTextureOwner_OneDecodePending(TestVideoConfig::Large(codec_));
- destruction_observer_->DoNotAllowDestruction();
mcvd_.reset();
- // DeleteSoon() is now pending. Ensure it's safe if the codec creation
- // completes before it runs.
- destruction_observer_->ExpectDestruction();
EXPECT_CALL(*codec_allocator_, MockReleaseMediaCodec(NotNull()));
ASSERT_TRUE(codec_allocator_->ProvideMockCodecAsync());
}
@@ -837,11 +814,7 @@ TEST_P(MediaCodecVideoDecoderTest, TeardownInvalidatesCodecCreationWeakPtr) {
TEST_P(MediaCodecVideoDecoderTest,
TeardownInvalidatesCodecCreationWeakPtrButDoesNotCallReleaseMediaCodec) {
InitializeWithTextureOwner_OneDecodePending(TestVideoConfig::Large(codec_));
- destruction_observer_->DoNotAllowDestruction();
mcvd_.reset();
- // DeleteSoon() is now pending. Ensure it's safe if the codec creation
- // completes before it runs.
- destruction_observer_->ExpectDestruction();
// A null codec should not be released via ReleaseMediaCodec().
EXPECT_CALL(*codec_allocator_, MockReleaseMediaCodec(_)).Times(0);
@@ -880,7 +853,6 @@ TEST_P(MediaCodecVideoDecoderVp8Test,
PumpCodec();
// MCVD should not be destructed immediately.
- destruction_observer_->DoNotAllowDestruction();
mcvd_.reset();
base::RunLoop().RunUntilIdle();
@@ -888,7 +860,6 @@ TEST_P(MediaCodecVideoDecoderVp8Test,
codec->AcceptOneInput(MockMediaCodecBridge::kEos);
codec->ProduceOneOutput(MockMediaCodecBridge::kEos);
EXPECT_CALL(*codec, Flush()).Times(0);
- destruction_observer_->ExpectDestruction();
PumpCodec();
base::RunLoop().RunUntilIdle();
}
@@ -1000,10 +971,7 @@ TEST_P(MediaCodecVideoDecoderTest, VideoFramesArePowerEfficient) {
base::RunLoop().RunUntilIdle();
EXPECT_TRUE(!!most_recent_frame_);
- bool power_efficient = false;
- EXPECT_TRUE(most_recent_frame_->metadata()->GetBoolean(
- VideoFrameMetadata::POWER_EFFICIENT, &power_efficient));
- EXPECT_TRUE(power_efficient);
+ EXPECT_TRUE(most_recent_frame_->metadata()->power_efficient);
}
TEST_P(MediaCodecVideoDecoderH264Test, CsdIsIncludedInCodecConfig) {
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.cc b/chromium/media/gpu/android/video_frame_factory_impl.cc
index b7c768bae0c..1132f5995de 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.cc
+++ b/chromium/media/gpu/android/video_frame_factory_impl.cc
@@ -81,7 +81,7 @@ VideoFrameFactoryImpl::VideoFrameFactoryImpl(
const gpu::GpuPreferences& gpu_preferences,
std::unique_ptr<SharedImageVideoProvider> image_provider,
std::unique_ptr<MaybeRenderEarlyManager> mre_manager,
- base::SequenceBound<FrameInfoHelper> frame_info_helper)
+ std::unique_ptr<FrameInfoHelper> frame_info_helper)
: image_provider_(std::move(image_provider)),
gpu_task_runner_(std::move(gpu_task_runner)),
enable_threaded_texture_mailboxes_(
@@ -171,7 +171,7 @@ void VideoFrameFactoryImpl::CreateVideoFrame(
auto image_ready_cb =
base::BindOnce(&VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady,
weak_factory_.GetWeakPtr(), std::move(output_cb),
- timestamp, natural_size, codec_buffer_wait_coordinator_,
+ timestamp, natural_size, !!codec_buffer_wait_coordinator_,
std::move(promotion_hint_cb), pixel_format, overlay_mode_,
enable_threaded_texture_mailboxes_, gpu_task_runner_);
@@ -181,48 +181,20 @@ void VideoFrameFactoryImpl::CreateVideoFrame(
void VideoFrameFactoryImpl::RequestImage(
std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
ImageWithInfoReadyCB image_ready_cb) {
- if (buffer_renderer && visible_size_ == buffer_renderer->size()) {
- auto cb = base::BindOnce(std::move(image_ready_cb),
- std::move(buffer_renderer), frame_info_);
-
- image_provider_->RequestImage(
- std::move(cb), image_spec_,
- codec_buffer_wait_coordinator_
- ? codec_buffer_wait_coordinator_->texture_owner()
- : nullptr);
- return;
- }
-
- // We need to reset size to make sure VFFI pipeline is still ordered.
- // e.g: CreateVideoFrame is called with new size. We post task to GPU thread
- // to get new frame info. While we wait CreateVideoFrame might be called with
- // old size again and if we don't reset size here we will skip GPU hop and new
- // frame will be created earlier than first one.
- visible_size_ = gfx::Size();
-
- auto info_cb = BindToCurrentLoop(
+ auto info_cb =
base::BindOnce(&VideoFrameFactoryImpl::CreateVideoFrame_OnFrameInfoReady,
weak_factory_.GetWeakPtr(), std::move(image_ready_cb),
- codec_buffer_wait_coordinator_));
+ codec_buffer_wait_coordinator_);
- frame_info_helper_.Post(FROM_HERE, &FrameInfoHelper::GetFrameInfo,
- std::move(buffer_renderer), std::move(info_cb));
+ frame_info_helper_->GetFrameInfo(std::move(buffer_renderer),
+ std::move(info_cb));
}
void VideoFrameFactoryImpl::CreateVideoFrame_OnFrameInfoReady(
ImageWithInfoReadyCB image_ready_cb,
scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator,
std::unique_ptr<CodecOutputBufferRenderer> output_buffer_renderer,
- FrameInfoHelper::FrameInfo frame_info,
- bool success) {
- // To get frame info we need to render frame which might fail for variety of
- // reason. FrameInfoHelper will provide best values we can proceed with, but
- // we should not cache it and attempt to get info for next frame.
- if (success) {
- frame_info_ = frame_info;
- visible_size_ = output_buffer_renderer->size();
- }
-
+ FrameInfoHelper::FrameInfo frame_info) {
// If we don't have output buffer here we can't rely on reply from
// FrameInfoHelper as there might be not cached value and we can't render
// nothing. But in this case call comes from RunAfterPendingVideoFrames and we
@@ -246,7 +218,7 @@ void VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady(
OnceOutputCB output_cb,
base::TimeDelta timestamp,
gfx::Size natural_size,
- scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator,
+ bool is_texture_owner_backed,
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
VideoPixelFormat pixel_format,
OverlayMode overlay_mode,
@@ -268,7 +240,7 @@ void VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady(
// When we remove the output buffer management from CodecImage, then that's
// what we'd have a reference to here rather than CodecImage.
record.codec_image_holder->codec_image_raw()->Initialize(
- std::move(output_buffer_renderer), codec_buffer_wait_coordinator,
+ std::move(output_buffer_renderer), is_texture_owner_backed,
std::move(promotion_hint_cb));
// Send the CodecImage (via holder, since we can't touch the refcount here) to
@@ -301,7 +273,7 @@ void VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady(
// The frames must be copied when threaded texture mailboxes are in use
// (http://crbug.com/582170).
if (enable_threaded_texture_mailboxes)
- frame->metadata()->SetBoolean(VideoFrameMetadata::COPY_REQUIRED, true);
+ frame->metadata()->copy_required = true;
const bool is_surface_control =
overlay_mode == OverlayMode::kSurfaceControlSecure ||
@@ -309,25 +281,20 @@ void VideoFrameFactoryImpl::CreateVideoFrame_OnImageReady(
const bool wants_promotion_hints =
overlay_mode == OverlayMode::kRequestPromotionHints;
- // Remember that we can't access |codec_buffer_wait_coordinator|, but we can
- // check if we have one here.
bool allow_overlay = false;
if (is_surface_control) {
- DCHECK(codec_buffer_wait_coordinator);
+ DCHECK(is_texture_owner_backed);
allow_overlay = true;
} else {
// We unconditionally mark the picture as overlayable, even if
- // |!codec_buffer_wait_coordinator|, if we want to get hints. It's
+ // |!is_texture_owner_backed|, if we want to get hints. It's
// required, else we won't get hints.
- allow_overlay = !codec_buffer_wait_coordinator || wants_promotion_hints;
+ allow_overlay = !is_texture_owner_backed || wants_promotion_hints;
}
- frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY,
- allow_overlay);
- frame->metadata()->SetBoolean(VideoFrameMetadata::WANTS_PROMOTION_HINT,
- wants_promotion_hints);
- frame->metadata()->SetBoolean(VideoFrameMetadata::TEXTURE_OWNER,
- !!codec_buffer_wait_coordinator);
+ frame->metadata()->allow_overlay = allow_overlay;
+ frame->metadata()->wants_promotion_hint = wants_promotion_hints;
+ frame->metadata()->texture_owner = is_texture_owner_backed;
// TODO(liberato): if this is run via being dropped, then it would be nice
// to find that out rather than treating the image as unused. If the renderer
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.h b/chromium/media/gpu/android/video_frame_factory_impl.h
index 624d7d2b650..489149eb765 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.h
+++ b/chromium/media/gpu/android/video_frame_factory_impl.h
@@ -10,7 +10,6 @@
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
-#include "base/threading/sequence_bound.h"
#include "gpu/config/gpu_preferences.h"
#include "media/base/video_frame.h"
#include "media/gpu/android/codec_buffer_wait_coordinator.h"
@@ -52,7 +51,7 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
const gpu::GpuPreferences& gpu_preferences,
std::unique_ptr<SharedImageVideoProvider> image_provider,
std::unique_ptr<MaybeRenderEarlyManager> mre_manager,
- base::SequenceBound<FrameInfoHelper> frame_info_helper);
+ std::unique_ptr<FrameInfoHelper> frame_info_helper);
~VideoFrameFactoryImpl() override;
void Initialize(OverlayMode overlay_mode, InitCB init_cb) override;
@@ -91,7 +90,7 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
OnceOutputCB output_cb,
base::TimeDelta timestamp,
gfx::Size natural_size,
- scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator,
+ bool is_texture_owner_backed,
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
VideoPixelFormat pixel_format,
OverlayMode overlay_mode,
@@ -105,8 +104,7 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
ImageWithInfoReadyCB image_ready_cb,
scoped_refptr<CodecBufferWaitCoordinator> codec_buffer_wait_coordinator,
std::unique_ptr<CodecOutputBufferRenderer> output_buffer_renderer,
- FrameInfoHelper::FrameInfo frame_info,
- bool success);
+ FrameInfoHelper::FrameInfo frame_info);
MaybeRenderEarlyManager* mre_manager() const { return mre_manager_.get(); }
@@ -128,12 +126,8 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
std::unique_ptr<MaybeRenderEarlyManager> mre_manager_;
- // Caches FrameInfo and visible size it was cached for.
- gfx::Size visible_size_;
- FrameInfoHelper::FrameInfo frame_info_;
-
- // Optional helper to get the Vulkan YCbCrInfo.
- base::SequenceBound<FrameInfoHelper> frame_info_helper_;
+ // Helper to get coded_size and optional Vulkan YCbCrInfo.
+ std::unique_ptr<FrameInfoHelper> frame_info_helper_;
// The current image spec that we'll use to request images.
SharedImageVideoProvider::ImageSpec image_spec_;
diff --git a/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc b/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc
index ade0a27c05d..13231efe252 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc
+++ b/chromium/media/gpu/android/video_frame_factory_impl_unittest.cc
@@ -44,46 +44,14 @@ class MockMaybeRenderEarlyManager : public MaybeRenderEarlyManager {
class MockFrameInfoHelper : public FrameInfoHelper,
public DestructionObservable {
public:
- MockFrameInfoHelper(MockFrameInfoHelper** thiz) { *thiz = this; }
-
- void GetFrameInfo(
- std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
- base::OnceCallback<
- void(std::unique_ptr<CodecOutputBufferRenderer>, FrameInfo, bool)> cb)
- override {
- MockGetFrameInfo(buffer_renderer.get());
- cb_ = std::move(cb);
- buffer_renderer_ = std::move(buffer_renderer);
-
- if (run_callback_automatically_) {
- RunWithYcbCrInfo(true);
- base::RunLoop().RunUntilIdle();
- }
- }
-
- void RunWithYcbCrInfo(bool success) {
- DCHECK(buffer_renderer_);
-
+ void GetFrameInfo(std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer,
+ FrameInfoReadyCB cb) override {
FrameInfo info;
- info.coded_size = buffer_renderer_->size();
+ info.coded_size = buffer_renderer->size();
info.visible_rect = gfx::Rect(info.coded_size);
- std::move(cb_).Run(std::move(buffer_renderer_), info, success);
- }
-
- void set_run_callback_automatically(bool run_callback_automatically) {
- run_callback_automatically_ = run_callback_automatically;
+ std::move(cb).Run(std::move(buffer_renderer), info);
}
-
- MOCK_METHOD1(MockGetFrameInfo,
- void(CodecOutputBufferRenderer* buffer_renderer));
-
- private:
- bool run_callback_automatically_ = true;
- base::OnceCallback<
- void(std::unique_ptr<CodecOutputBufferRenderer>, FrameInfo, bool)>
- cb_;
- std::unique_ptr<CodecOutputBufferRenderer> buffer_renderer_;
};
class VideoFrameFactoryImplTest : public testing::Test {
@@ -96,15 +64,11 @@ class VideoFrameFactoryImplTest : public testing::Test {
auto mre_manager = std::make_unique<MockMaybeRenderEarlyManager>();
mre_manager_raw_ = mre_manager.get();
- auto ycbcr_helper = base::SequenceBound<MockFrameInfoHelper>(
- task_runner_, &ycbcr_helper_raw_);
- base::RunLoop().RunUntilIdle(); // Init |ycbcr_helper_raw_|.
- ycbcr_destruction_observer_ =
- ycbcr_helper_raw_->CreateDestructionObserver();
+ auto info_helper = std::make_unique<MockFrameInfoHelper>();
impl_ = std::make_unique<VideoFrameFactoryImpl>(
task_runner_, gpu_preferences_, std::move(image_provider),
- std::move(mre_manager), std::move(ycbcr_helper));
+ std::move(mre_manager), std::move(info_helper));
auto texture_owner = base::MakeRefCounted<NiceMock<gpu::MockTextureOwner>>(
0, nullptr, nullptr, true);
auto codec_buffer_wait_coordinator =
@@ -177,7 +141,6 @@ class VideoFrameFactoryImplTest : public testing::Test {
// Sent to |impl_| by RequestVideoFrame..
base::MockCallback<VideoFrameFactory::OnceOutputCB> output_cb_;
- MockFrameInfoHelper* ycbcr_helper_raw_ = nullptr;
std::unique_ptr<DestructionObserver> ycbcr_destruction_observer_;
gpu::GpuPreferences gpu_preferences_;
@@ -272,75 +235,4 @@ TEST_F(VideoFrameFactoryImplTest,
impl_ = nullptr;
base::RunLoop().RunUntilIdle();
}
-
-TEST_F(VideoFrameFactoryImplTest, DoesCallFrameInfoHelperIfVulkan) {
- // We will be driving callback by ourselves in this test.
- ycbcr_helper_raw_->set_run_callback_automatically(false);
- // Expect call to get info for the first frame.
- EXPECT_CALL(*ycbcr_helper_raw_, MockGetFrameInfo(_)).Times(1);
-
- RequestVideoFrame();
-
- // Provide info. It should send image request.
- ycbcr_helper_raw_->RunWithYcbCrInfo(true);
- base::RunLoop().RunUntilIdle();
-
- testing::Mock::VerifyAndClearExpectations(ycbcr_helper_raw_);
-
- // Fulfilling image request should provide video frame.
- EXPECT_CALL(output_cb_, Run(_)).Times(1);
-
- auto image_record = MakeImageRecord();
- image_provider_raw_->ProvideOneRequestedImage(&image_record);
- base::RunLoop().RunUntilIdle();
-
- // Verify that no more calls happen, since we don't want thread hops on every
- // frame. Note that multiple could be dispatched before now. It should still
- // send along a VideoFrame, though.
- EXPECT_CALL(*ycbcr_helper_raw_, MockGetFrameInfo(_)).Times(0);
- EXPECT_CALL(output_cb_, Run(_)).Times(1);
-
- RequestVideoFrame();
- auto other_image_record = MakeImageRecord();
- // If the helper hasn't been destroyed, then we don't expect it to be called.
- image_provider_raw_->ProvideOneRequestedImage(&other_image_record);
- base::RunLoop().RunUntilIdle();
-}
-
-TEST_F(VideoFrameFactoryImplTest, NullYCbCrInfoDoesntCrash) {
- // We will be driving callback by ourselves in this test.
- ycbcr_helper_raw_->set_run_callback_automatically(false);
-
- // Expect call to get info for the first frame.
- EXPECT_CALL(*ycbcr_helper_raw_, MockGetFrameInfo(_)).Times(1);
-
- RequestVideoFrame();
-
- // Provide info. It should send image request.
- ycbcr_helper_raw_->RunWithYcbCrInfo(false);
- base::RunLoop().RunUntilIdle();
-
- testing::Mock::VerifyAndClearExpectations(ycbcr_helper_raw_);
-
- // Fulfilling image request should provide video frame.
- EXPECT_CALL(output_cb_, Run(_)).Times(1);
-
- auto image_record = MakeImageRecord();
- image_provider_raw_->ProvideOneRequestedImage(&image_record);
- base::RunLoop().RunUntilIdle();
-
- // Verify that we will get call to GetFrameInfo as previous one failed.
- EXPECT_CALL(*ycbcr_helper_raw_, MockGetFrameInfo(_)).Times(1);
- EXPECT_CALL(output_cb_, Run(_)).Times(1);
-
- RequestVideoFrame();
- ycbcr_helper_raw_->RunWithYcbCrInfo(true);
- base::RunLoop().RunUntilIdle();
-
- auto other_image_record = MakeImageRecord();
- // If the helper hasn't been destroyed, then we don't expect it to be called.
- image_provider_raw_->ProvideOneRequestedImage(&other_image_record);
- base::RunLoop().RunUntilIdle();
-}
-
} // namespace media
diff --git a/chromium/media/gpu/chromeos/BUILD.gn b/chromium/media/gpu/chromeos/BUILD.gn
index a209dbf6652..ce07f94b380 100644
--- a/chromium/media/gpu/chromeos/BUILD.gn
+++ b/chromium/media/gpu/chromeos/BUILD.gn
@@ -149,6 +149,7 @@ source_set("unit_tests") {
"mailbox_video_frame_converter_unittest.cc",
"platform_video_frame_pool_unittest.cc",
"platform_video_frame_utils_unittest.cc",
+ "video_decoder_pipeline_unittest.cc",
]
}
diff --git a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc
index b0c1595e6b5..9e2367128f3 100644
--- a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc
+++ b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/sequenced_task_runner.h"
+#include "media/base/media_log.h"
#include "media/base/video_decoder.h"
#include "media/gpu/buildflags.h"
#include "media/gpu/chromeos/mailbox_video_frame_converter.h"
@@ -18,32 +19,27 @@
#endif
#if BUILDFLAG(USE_V4L2_CODEC)
-#include "media/gpu/v4l2/v4l2_slice_video_decoder.h"
+#include "media/gpu/v4l2/v4l2_video_decoder.h"
#endif
namespace media {
namespace {
-// Get a list of the available functions for creating VideoDeocoder.
-base::queue<VideoDecoderPipeline::CreateVDFunc> GetCreateVDFunctions(
- VideoDecoderPipeline::CreateVDFunc cur_create_vd_func) {
- static constexpr VideoDecoderPipeline::CreateVDFunc kCreateVDFuncs[] = {
+// Gets a list of the available functions for creating VideoDecoders.
+VideoDecoderPipeline::CreateDecoderFunctions GetCreateDecoderFunctions() {
+ constexpr VideoDecoderPipeline::CreateDecoderFunction kCreateVDFuncs[] = {
#if BUILDFLAG(USE_VAAPI)
&VaapiVideoDecoder::Create,
#endif // BUILDFLAG(USE_VAAPI)
#if BUILDFLAG(USE_V4L2_CODEC)
- &V4L2SliceVideoDecoder::Create,
+ &V4L2VideoDecoder::Create,
#endif // BUILDFLAG(USE_V4L2_CODEC)
};
- base::queue<VideoDecoderPipeline::CreateVDFunc> ret;
- for (const auto& func : kCreateVDFuncs) {
- if (func != cur_create_vd_func)
- ret.push(func);
- }
- return ret;
+ return VideoDecoderPipeline::CreateDecoderFunctions(
+ kCreateVDFuncs, kCreateVDFuncs + base::size(kCreateVDFuncs));
}
} // namespace
@@ -61,7 +57,7 @@ ChromeosVideoDecoderFactory::GetSupportedConfigs() {
#endif // BUILDFLAG(USE_VAAPI)
#if BUILDFLAG(USE_V4L2_CODEC)
- configs = V4L2SliceVideoDecoder::GetSupportedConfigs();
+ configs = V4L2VideoDecoder::GetSupportedConfigs();
supported_configs.insert(supported_configs.end(), configs.begin(),
configs.end());
#endif // BUILDFLAG(USE_V4L2_CODEC)
@@ -74,11 +70,11 @@ std::unique_ptr<VideoDecoder> ChromeosVideoDecoderFactory::Create(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory) {
+ std::unique_ptr<MediaLog> media_log) {
return VideoDecoderPipeline::Create(
std::move(client_task_runner), std::move(frame_pool),
- std::move(frame_converter), gpu_memory_buffer_factory,
- base::BindRepeating(&GetCreateVDFunctions));
+ std::move(frame_converter), std::move(media_log),
+ base::BindRepeating(&GetCreateDecoderFunctions));
}
} // namespace media
diff --git a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h
index 15d4e5830c9..ee61ce2a5db 100644
--- a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h
+++ b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h
@@ -15,13 +15,10 @@ namespace base {
class SequencedTaskRunner;
} // namespace base
-namespace gpu {
-class GpuMemoryBufferFactory;
-} // namespace gpu
-
namespace media {
class DmabufVideoFramePool;
+class MediaLog;
class VideoDecoder;
class VideoFrameConverter;
@@ -31,13 +28,11 @@ class MEDIA_GPU_EXPORT ChromeosVideoDecoderFactory {
// Create VideoDecoder instance that allocates VideoFrame from |frame_pool|
// and converts the output VideoFrame |frame_converter|.
- // Note the caller is responsible for keeping |gpu_memory_buffer_factory|
- // alive during the returned VideoDecoder lifetime.
static std::unique_ptr<VideoDecoder> Create(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory);
+ std::unique_ptr<MediaLog> media_log);
};
} // namespace media
diff --git a/chromium/media/gpu/chromeos/fourcc.cc b/chromium/media/gpu/chromeos/fourcc.cc
index e8d514df9fa..fb179e65bb1 100644
--- a/chromium/media/gpu/chromeos/fourcc.cc
+++ b/chromium/media/gpu/chromeos/fourcc.cc
@@ -5,6 +5,7 @@
#include "media/gpu/chromeos/fourcc.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/strings/stringprintf.h"
#include "media/gpu/macros.h"
@@ -42,6 +43,7 @@ base::Optional<Fourcc> Fourcc::FromUint32(uint32_t fourcc) {
case YM16:
case MT21:
case MM21:
+ case P010:
return Fourcc(static_cast<Value>(fourcc));
}
DVLOGF(3) << "Unmapped fourcc: " << FourccToString(fourcc);
@@ -74,6 +76,8 @@ base::Optional<Fourcc> Fourcc::FromVideoPixelFormat(
return Fourcc(NV12);
case PIXEL_FORMAT_NV21:
return Fourcc(NV21);
+ case PIXEL_FORMAT_P016LE:
+ return Fourcc(P010);
case PIXEL_FORMAT_UYVY:
NOTREACHED();
FALLTHROUGH;
@@ -92,7 +96,6 @@ base::Optional<Fourcc> Fourcc::FromVideoPixelFormat(
case PIXEL_FORMAT_YUV422P12:
case PIXEL_FORMAT_YUV444P12:
case PIXEL_FORMAT_Y16:
- case PIXEL_FORMAT_P016LE:
case PIXEL_FORMAT_XR30:
case PIXEL_FORMAT_XB30:
case PIXEL_FORMAT_UNKNOWN:
@@ -186,6 +189,8 @@ VideoPixelFormat Fourcc::ToVideoPixelFormat() const {
// be mapped to PIXEL_FORMAT_NV12.
case MM21:
return PIXEL_FORMAT_NV12;
+ case P010:
+ return PIXEL_FORMAT_P016LE;
}
NOTREACHED() << "Unmapped Fourcc: " << ToString();
return PIXEL_FORMAT_UNKNOWN;
@@ -230,6 +235,8 @@ base::Optional<Fourcc> Fourcc::FromVAFourCC(uint32_t va_fourcc) {
return Fourcc(XR24);
case VA_FOURCC_ARGB:
return Fourcc(RGB4);
+ case VA_FOURCC_P010:
+ return Fourcc(P010);
}
DVLOGF(3) << "Unmapped VAFourCC: " << FourccToString(va_fourcc);
return base::nullopt;
@@ -257,6 +264,8 @@ base::Optional<uint32_t> Fourcc::ToVAFourCC() const {
return VA_FOURCC_BGRX;
case RGB4:
return VA_FOURCC_ARGB;
+ case P010:
+ return VA_FOURCC_P010;
case YM12:
case YM21:
case NM12:
@@ -287,6 +296,7 @@ base::Optional<Fourcc> Fourcc::ToSinglePlanar() const {
case YUYV:
case NV12:
case NV21:
+ case P010:
return Fourcc(value_);
case YM12:
return Fourcc(YU12);
@@ -319,6 +329,7 @@ bool Fourcc::IsMultiPlanar() const {
case YUYV:
case NV12:
case NV21:
+ case P010:
return false;
case YM12:
case YM21:
diff --git a/chromium/media/gpu/chromeos/fourcc.h b/chromium/media/gpu/chromeos/fourcc.h
index 85172e16d52..652f203e02a 100644
--- a/chromium/media/gpu/chromeos/fourcc.h
+++ b/chromium/media/gpu/chromeos/fourcc.h
@@ -108,6 +108,10 @@ class MEDIA_GPU_EXPORT Fourcc {
// Maps to V4L2_PIX_FMT_MM21.
// It is used for MT8183 hardware video decoder.
MM21 = ComposeFourcc('M', 'M', '2', '1'),
+
+ // Two-plane 10-bit YUV 4:2:0. Each sample is a two-byte little-endian value
+ // with the bottom six bits ignored.
+ P010 = ComposeFourcc('P', '0', '1', '0'),
};
explicit Fourcc(Fourcc::Value fourcc);
diff --git a/chromium/media/gpu/chromeos/fourcc_unittests.cc b/chromium/media/gpu/chromeos/fourcc_unittests.cc
index ade4a4b663c..d59b317ee0b 100644
--- a/chromium/media/gpu/chromeos/fourcc_unittests.cc
+++ b/chromium/media/gpu/chromeos/fourcc_unittests.cc
@@ -32,11 +32,11 @@ TEST(FourccTest, V4L2PixFmtToV4L2PixFmt) {
CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_ABGR32);
#ifdef V4L2_PIX_FMT_RGBA32
- V4L2PixFmtIsEqual(V4L2_PIX_FMT_RGBA32);
+ CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_RGBA32);
#endif
CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_XBGR32);
#ifdef V4L2_PIX_FMT_RGBX32
- V4L2PixFmtIsEqual(V4L2_PIX_FMT_RGBX32);
+ CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_RGBX32);
#endif
CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_RGB32);
CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_YUV420);
@@ -133,6 +133,7 @@ TEST(FourccTest, FromVaFourCCAndBack) {
CheckFromVAFourCCAndBack(VA_FOURCC_BGRA);
CheckFromVAFourCCAndBack(VA_FOURCC_BGRX);
CheckFromVAFourCCAndBack(VA_FOURCC_ARGB);
+ CheckFromVAFourCCAndBack(VA_FOURCC_P010);
}
TEST(FourccTest, VAFourCCToVideoPixelFormat) {
@@ -154,6 +155,8 @@ TEST(FourccTest, VAFourCCToVideoPixelFormat) {
Fourcc::FromVAFourCC(VA_FOURCC_BGRA)->ToVideoPixelFormat());
EXPECT_EQ(PIXEL_FORMAT_XRGB,
Fourcc::FromVAFourCC(VA_FOURCC_BGRX)->ToVideoPixelFormat());
+ EXPECT_EQ(PIXEL_FORMAT_P016LE,
+ Fourcc::FromVAFourCC(VA_FOURCC_P010)->ToVideoPixelFormat());
}
TEST(FourccTest, VideoPixelFormatToVAFourCC) {
@@ -175,6 +178,8 @@ TEST(FourccTest, VideoPixelFormatToVAFourCC) {
*Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_ARGB)->ToVAFourCC());
EXPECT_EQ(static_cast<uint32_t>(VA_FOURCC_BGRX),
*Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_XRGB)->ToVAFourCC());
+ EXPECT_EQ(static_cast<uint32_t>(VA_FOURCC_P010),
+ *Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_P016LE)->ToVAFourCC());
}
#endif // BUILDFLAG(USE_VAAPI)
@@ -189,6 +194,7 @@ TEST(FourccTest, FourccToSinglePlanar) {
EXPECT_EQ(Fourcc(Fourcc::YUYV).ToSinglePlanar(), Fourcc(Fourcc::YUYV));
EXPECT_EQ(Fourcc(Fourcc::NV12).ToSinglePlanar(), Fourcc(Fourcc::NV12));
EXPECT_EQ(Fourcc(Fourcc::NV21).ToSinglePlanar(), Fourcc(Fourcc::NV21));
+ EXPECT_EQ(Fourcc(Fourcc::P010).ToSinglePlanar(), Fourcc(Fourcc::P010));
EXPECT_EQ(Fourcc(Fourcc::YM12).ToSinglePlanar(),
Fourcc(Fourcc::YU12).ToSinglePlanar());
EXPECT_EQ(Fourcc(Fourcc::YM21).ToSinglePlanar(),
diff --git a/chromium/media/gpu/chromeos/image_processor.cc b/chromium/media/gpu/chromeos/image_processor.cc
index cde32f09a80..c3227c88154 100644
--- a/chromium/media/gpu/chromeos/image_processor.cc
+++ b/chromium/media/gpu/chromeos/image_processor.cc
@@ -70,6 +70,7 @@ std::unique_ptr<ImageProcessor> ImageProcessor::Create(
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> client_task_runner) {
scoped_refptr<base::SequencedTaskRunner> backend_task_runner =
@@ -77,9 +78,9 @@ std::unique_ptr<ImageProcessor> ImageProcessor::Create(
auto wrapped_error_cb = base::BindRepeating(
base::IgnoreResult(&base::SequencedTaskRunner::PostTask),
client_task_runner, FROM_HERE, std::move(error_cb));
- std::unique_ptr<ImageProcessorBackend> backend =
- create_backend_cb.Run(input_config, output_config, preferred_output_modes,
- std::move(wrapped_error_cb), backend_task_runner);
+ std::unique_ptr<ImageProcessorBackend> backend = create_backend_cb.Run(
+ input_config, output_config, preferred_output_modes, relative_rotation,
+ std::move(wrapped_error_cb), backend_task_runner);
if (!backend)
return nullptr;
diff --git a/chromium/media/gpu/chromeos/image_processor.h b/chromium/media/gpu/chromeos/image_processor.h
index d0ce7acc8e3..ac62dbaf8cd 100644
--- a/chromium/media/gpu/chromeos/image_processor.h
+++ b/chromium/media/gpu/chromeos/image_processor.h
@@ -42,6 +42,7 @@ class MEDIA_GPU_EXPORT ImageProcessor {
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner)>;
@@ -50,6 +51,7 @@ class MEDIA_GPU_EXPORT ImageProcessor {
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> client_task_runner);
diff --git a/chromium/media/gpu/chromeos/image_processor_backend.cc b/chromium/media/gpu/chromeos/image_processor_backend.cc
index 27c5a056e81..0d7924766ba 100644
--- a/chromium/media/gpu/chromeos/image_processor_backend.cc
+++ b/chromium/media/gpu/chromeos/image_processor_backend.cc
@@ -63,11 +63,13 @@ ImageProcessorBackend::ImageProcessorBackend(
const PortConfig& input_config,
const PortConfig& output_config,
OutputMode output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner)
: input_config_(input_config),
output_config_(output_config),
output_mode_(output_mode),
+ relative_rotation_(relative_rotation),
error_cb_(error_cb),
backend_task_runner_(std::move(backend_task_runner)) {
DETACH_FROM_SEQUENCE(backend_sequence_checker_);
diff --git a/chromium/media/gpu/chromeos/image_processor_backend.h b/chromium/media/gpu/chromeos/image_processor_backend.h
index 85fcdf76f59..6b0c86f5bc8 100644
--- a/chromium/media/gpu/chromeos/image_processor_backend.h
+++ b/chromium/media/gpu/chromeos/image_processor_backend.h
@@ -113,6 +113,7 @@ class MEDIA_GPU_EXPORT ImageProcessorBackend {
const PortConfig& input_config,
const PortConfig& output_config,
OutputMode output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
virtual ~ImageProcessorBackend();
@@ -125,6 +126,10 @@ class MEDIA_GPU_EXPORT ImageProcessorBackend {
// works as IMPORT mode for output.
const OutputMode output_mode_;
+ // ImageProcessor performs a rotation if the |relative_rotation_| is not equal
+ // to VIDEO_ROTATION_0.
+ const VideoRotation relative_rotation_;
+
// Call this callback when any error occurs.
const ErrorCB error_cb_;
diff --git a/chromium/media/gpu/chromeos/image_processor_factory.cc b/chromium/media/gpu/chromeos/image_processor_factory.cc
index 0daaab910f5..ccdfcf4c1ca 100644
--- a/chromium/media/gpu/chromeos/image_processor_factory.cc
+++ b/chromium/media/gpu/chromeos/image_processor_factory.cc
@@ -81,7 +81,8 @@ std::unique_ptr<ImageProcessor> CreateV4L2ImageProcessorWithInputCandidates(
return v4l2_vda_helpers::CreateImageProcessor(
input_fourcc, *output_fourcc, input_size, output_size, visible_size,
- num_buffers, V4L2Device::Create(), ImageProcessor::OutputMode::IMPORT,
+ VideoFrame::StorageType::STORAGE_GPU_MEMORY_BUFFER, num_buffers,
+ V4L2Device::Create(), ImageProcessor::OutputMode::IMPORT,
std::move(client_task_runner), std::move(error_cb));
}
return nullptr;
@@ -96,6 +97,7 @@ std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create(
const ImageProcessor::PortConfig& output_config,
const std::vector<ImageProcessor::OutputMode>& preferred_output_modes,
size_t num_buffers,
+ VideoRotation relative_rotation,
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
ImageProcessor::ErrorCB error_cb) {
std::vector<ImageProcessor::CreateBackendCB> create_funcs;
@@ -112,9 +114,10 @@ std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create(
std::unique_ptr<ImageProcessor> image_processor;
for (auto& create_func : create_funcs) {
- image_processor = ImageProcessor::Create(
- std::move(create_func), input_config, output_config,
- preferred_output_modes, error_cb, client_task_runner);
+ image_processor =
+ ImageProcessor::Create(std::move(create_func), input_config,
+ output_config, preferred_output_modes,
+ relative_rotation, error_cb, client_task_runner);
if (image_processor)
return image_processor;
}
diff --git a/chromium/media/gpu/chromeos/image_processor_factory.h b/chromium/media/gpu/chromeos/image_processor_factory.h
index 7ab5b4cf56b..a81eddde318 100644
--- a/chromium/media/gpu/chromeos/image_processor_factory.h
+++ b/chromium/media/gpu/chromeos/image_processor_factory.h
@@ -54,6 +54,7 @@ class MEDIA_GPU_EXPORT ImageProcessorFactory {
const ImageProcessor::PortConfig& output_config,
const std::vector<ImageProcessor::OutputMode>& preferred_output_modes,
size_t num_buffers,
+ VideoRotation relative_rotation,
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
ImageProcessor::ErrorCB error_cb);
diff --git a/chromium/media/gpu/chromeos/image_processor_test.cc b/chromium/media/gpu/chromeos/image_processor_test.cc
index a4192c90397..84c3535203b 100644
--- a/chromium/media/gpu/chromeos/image_processor_test.cc
+++ b/chromium/media/gpu/chromeos/image_processor_test.cc
@@ -76,6 +76,14 @@ const base::FilePath::CharType* kNV12Image180P =
const base::FilePath::CharType* kNV12Image360PIn480P =
FILE_PATH_LITERAL("puppets-640x360_in_640x480.nv12.yuv");
+// Files for rotation test.
+const base::FilePath::CharType* kNV12Image90 =
+ FILE_PATH_LITERAL("bear_192x320_90.nv12.yuv");
+const base::FilePath::CharType* kNV12Image180 =
+ FILE_PATH_LITERAL("bear_320x192_180.nv12.yuv");
+const base::FilePath::CharType* kNV12Image270 =
+ FILE_PATH_LITERAL("bear_192x320_270.nv12.yuv");
+
class ImageProcessorParamTest
: public ::testing::Test,
public ::testing::WithParamInterface<
@@ -115,6 +123,26 @@ class ImageProcessorParamTest
ImageProcessor::PortConfig output_config(
output_fourcc, output_image->Size(), output_layout->planes(),
output_image->VisibleRect(), output_storage_types);
+ int rotation =
+ ((output_image->Rotation() - input_image.Rotation() + 4) % 4) * 90;
+ VideoRotation relative_rotation = VIDEO_ROTATION_0;
+ switch (rotation) {
+ case 0:
+ relative_rotation = VIDEO_ROTATION_0;
+ break;
+ case 90:
+ relative_rotation = VIDEO_ROTATION_90;
+ break;
+ case 180:
+ relative_rotation = VIDEO_ROTATION_180;
+ break;
+ case 270:
+ relative_rotation = VIDEO_ROTATION_270;
+ break;
+ default:
+ NOTREACHED() << "Invalid rotation: " << rotation;
+ return nullptr;
+ }
// TODO(crbug.com/917951): Select more appropriate number of buffers.
constexpr size_t kNumBuffers = 1;
LOG_ASSERT(output_image->IsMetadataLoaded());
@@ -156,7 +184,8 @@ class ImageProcessorParamTest
}
auto ip_client = test::ImageProcessorClient::Create(
- input_config, output_config, kNumBuffers, std::move(frame_processors));
+ input_config, output_config, kNumBuffers, relative_rotation,
+ std::move(frame_processors));
return ip_client;
}
@@ -294,6 +323,17 @@ INSTANTIATE_TEST_SUITE_P(NV12CroppingAndScaling,
::testing::Values(std::make_tuple(kNV12Image360PIn480P,
kNV12Image270P)));
+// Rotate frame to specified rotation.
+// Now only VaapiIP maybe support rotaion.
+INSTANTIATE_TEST_SUITE_P(
+ NV12Rotation,
+ ImageProcessorParamTest,
+ ::testing::Values(std::make_tuple(kNV12Image, kNV12Image90),
+ std::make_tuple(kNV12Image, kNV12Image180),
+ std::make_tuple(kNV12Image, kNV12Image270),
+ std::make_tuple(kNV12Image180, kNV12Image90),
+ std::make_tuple(kNV12Image180, kNV12Image)));
+
#if defined(OS_CHROMEOS)
// TODO(hiroh): Add more tests.
// MEM->DMABUF (V4L2VideoEncodeAccelerator),
diff --git a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc
index ab55071a330..a9de6bc113d 100644
--- a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc
+++ b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc
@@ -12,6 +12,7 @@
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/convert_from.h"
#include "third_party/libyuv/include/libyuv/convert_from_argb.h"
+#include "third_party/libyuv/include/libyuv/rotate.h"
#include "third_party/libyuv/include/libyuv/scale.h"
namespace media {
@@ -72,6 +73,57 @@ void NV12Scale(uint8_t* tmp_buffer,
dst_stride_uv, dst_chroma_width, dst_chroma_height);
}
+// TODO(https://bugs.chromium.org/p/libyuv/issues/detail?id=840): Remove
+// this once libyuv implements NV12Rotate() and use the libyuv::NV12Rotate().
+bool NV12Rotate(uint8_t* tmp_buffer,
+ const uint8_t* src_y,
+ int src_stride_y,
+ const uint8_t* src_uv,
+ int src_stride_uv,
+ int src_width,
+ int src_height,
+ uint8_t* dst_y,
+ int dst_stride_y,
+ uint8_t* dst_uv,
+ int dst_stride_uv,
+ int dst_width,
+ int dst_height,
+ VideoRotation relative_rotation) {
+ libyuv::RotationModeEnum rotation = libyuv::kRotate0;
+ switch (relative_rotation) {
+ case VIDEO_ROTATION_0:
+ NOTREACHED() << "Unexpected rotation: " << rotation;
+ return false;
+ case VIDEO_ROTATION_90:
+ rotation = libyuv::kRotate90;
+ break;
+ case VIDEO_ROTATION_180:
+ rotation = libyuv::kRotate180;
+ break;
+ case VIDEO_ROTATION_270:
+ rotation = libyuv::kRotate270;
+ break;
+ }
+
+ // Rotating.
+ const int tmp_uv_width = (dst_width + 1) / 2;
+ const int tmp_uv_height = (dst_height + 1) / 2;
+ uint8_t* const tmp_u = tmp_buffer;
+ uint8_t* const tmp_v = tmp_u + tmp_uv_width * tmp_uv_height;
+
+ // Rotate the NV12 planes to I420.
+ int ret = libyuv::NV12ToI420Rotate(
+ src_y, src_stride_y, src_uv, src_stride_uv, dst_y, dst_stride_y, tmp_u,
+ tmp_uv_width, tmp_v, tmp_uv_width, src_width, src_height, rotation);
+ if (ret != 0)
+ return false;
+
+ // Merge the UV planes into the destination.
+ libyuv::MergeUVPlane(tmp_u, tmp_uv_width, tmp_v, tmp_uv_width, dst_uv,
+ dst_stride_uv, tmp_uv_width, tmp_uv_height);
+ return true;
+}
+
enum class SupportResult {
Supported,
SupportedWithPivot,
@@ -90,7 +142,7 @@ SupportResult IsFormatSupported(Fourcc input_fourcc, Fourcc output_fourcc) {
{Fourcc::YV12, Fourcc::NV12, false},
{Fourcc::AB24, Fourcc::NV12, true},
{Fourcc::XB24, Fourcc::NV12, true},
- // Scaling.
+ // Scaling or Rotating.
{Fourcc::NV12, Fourcc::NV12, true},
};
@@ -128,6 +180,7 @@ std::unique_ptr<ImageProcessorBackend> LibYUVImageProcessorBackend::Create(
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
VLOGF(2);
@@ -206,7 +259,8 @@ std::unique_ptr<ImageProcessorBackend> LibYUVImageProcessorBackend::Create(
// used as |tmp_buffer| in NV12Scale().
// TODO(hiroh): Remove this restriction once libyuv:NV12Scale() is arrived.
if (!gfx::Rect(input_config.visible_rect.size())
- .Contains(gfx::Rect(output_config.visible_rect.size()))) {
+ .Contains(gfx::Rect(output_config.visible_rect.size())) &&
+ relative_rotation == VIDEO_ROTATION_0) {
VLOGF(2) << "Down-scaling support only, input_config.visible_rect="
<< input_config.visible_rect.ToString()
<< ", output_config.visible_rect="
@@ -237,7 +291,7 @@ std::unique_ptr<ImageProcessorBackend> LibYUVImageProcessorBackend::Create(
PortConfig(output_config.fourcc, output_config.size,
output_config.planes, output_config.visible_rect,
{output_storage_type}),
- OutputMode::IMPORT, std::move(error_cb),
+ OutputMode::IMPORT, relative_rotation, std::move(error_cb),
std::move(backend_task_runner)));
VLOGF(2) << "LibYUVImageProcessorBackend created for converting from "
<< input_config.ToString() << " to " << output_config.ToString();
@@ -251,11 +305,13 @@ LibYUVImageProcessorBackend::LibYUVImageProcessorBackend(
const PortConfig& input_config,
const PortConfig& output_config,
OutputMode output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner)
: ImageProcessorBackend(input_config,
output_config,
output_mode,
+ relative_rotation,
std::move(error_cb),
std::move(backend_task_runner)),
input_frame_mapper_(std::move(input_frame_mapper)),
@@ -353,6 +409,26 @@ int LibYUVImageProcessorBackend::DoConversion(const VideoFrame* const input,
return LIBYUV_FUNC(I420ToNV12, Y_U_V_DATA(intermediate_frame_),
Y_UV_DATA(output));
case PIXEL_FORMAT_NV12:
+ // Rotation mode.
+ if (relative_rotation_ != VIDEO_ROTATION_0) {
+ // The size of |tmp_buffer| of NV12Rotate() should be
+ // output_visible_rect().GetArea() / 2, which used to store temporary
+ // U and V planes for I420 data. Although
+ // |intermediate_frame_->data(0)| is much larger than the required
+ // size, we use the frame to simplify the code.
+ NV12Rotate(intermediate_frame_->data(0),
+ input->visible_data(VideoFrame::kYPlane),
+ input->stride(VideoFrame::kYPlane),
+ input->visible_data(VideoFrame::kUPlane),
+ input->stride(VideoFrame::kUPlane),
+ input->visible_rect().width(),
+ input->visible_rect().height(), Y_UV_DATA(output),
+ output->visible_rect().width(),
+ output->visible_rect().height(), relative_rotation_);
+ return 0;
+ }
+
+ // Scaling mode.
// The size of |tmp_buffer| of NV12Scale() should be
// input_visible_rect().GetArea() / 2 +
// output_visible_rect().GetArea() / 2. Although |intermediate_frame_|
diff --git a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h
index f8836096bdd..cd6562bbf82 100644
--- a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h
+++ b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h
@@ -32,6 +32,7 @@ class MEDIA_GPU_EXPORT LibYUVImageProcessorBackend
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
@@ -48,6 +49,7 @@ class MEDIA_GPU_EXPORT LibYUVImageProcessorBackend
const PortConfig& input_config,
const PortConfig& output_config,
OutputMode output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
~LibYUVImageProcessorBackend() override;
diff --git a/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc b/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc
index 8bb25386dce..19c3829afa8 100644
--- a/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc
+++ b/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc
@@ -14,6 +14,7 @@
#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/scheduler.h"
+#include "gpu/ipc/common/gpu_client_ids.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/shared_image_stub.h"
#include "media/base/format_utils.h"
@@ -154,7 +155,7 @@ void MailboxVideoFrameConverter::ConvertFrame(scoped_refptr<VideoFrame> frame) {
DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
DVLOGF(4);
- if (!frame || !frame->HasDmaBufs())
+ if (!frame || frame->storage_type() != VideoFrame::STORAGE_GPU_MEMORY_BUFFER)
return OnError(FROM_HERE, "Invalid frame.");
VideoFrame* origin_frame = unwrap_frame_cb_.Run(*frame);
@@ -225,9 +226,9 @@ void MailboxVideoFrameConverter::WrapMailboxAndVideoFrameAndOutput(
frame->format(), mailbox_holders, std::move(release_mailbox_cb),
frame->coded_size(), frame->visible_rect(), frame->natural_size(),
frame->timestamp());
- mailbox_frame->metadata()->MergeMetadataFrom(frame->metadata());
- mailbox_frame->metadata()->SetBoolean(
- VideoFrameMetadata::READ_LOCK_FENCES_ENABLED, true);
+ mailbox_frame->set_color_space(frame->ColorSpace());
+ mailbox_frame->set_metadata(*(frame->metadata()));
+ mailbox_frame->metadata()->read_lock_fences_enabled = true;
output_cb_.Run(mailbox_frame);
}
@@ -336,7 +337,7 @@ bool MailboxVideoFrameConverter::GenerateSharedImageOnGPUThread(
const uint32_t shared_image_usage =
gpu::SHARED_IMAGE_USAGE_DISPLAY | gpu::SHARED_IMAGE_USAGE_SCANOUT;
const bool success = shared_image_stub->CreateSharedImage(
- mailbox, shared_image_stub->channel()->client_id(),
+ mailbox, gpu::kPlatformVideoFramePoolClientId,
std::move(gpu_memory_buffer_handle), *buffer_format,
gpu::kNullSurfaceHandle, destination_visible_rect.size(),
video_frame->ColorSpace(), shared_image_usage);
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool.cc b/chromium/media/gpu/chromeos/platform_video_frame_pool.cc
index 90a7db5ae52..eebdcb4d5b8 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_pool.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_pool.cc
@@ -25,9 +25,9 @@ scoped_refptr<VideoFrame> DefaultCreateFrame(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- return CreatePlatformVideoFrame(gpu_memory_buffer_factory, format, coded_size,
- visible_rect, natural_size, timestamp,
- gfx::BufferUsage::SCANOUT_VDA_WRITE);
+ return CreateGpuMemoryBufferVideoFrame(
+ gpu_memory_buffer_factory, format, coded_size, visible_rect, natural_size,
+ timestamp, gfx::BufferUsage::SCANOUT_VDA_WRITE);
}
} // namespace
@@ -51,6 +51,15 @@ PlatformVideoFramePool::~PlatformVideoFramePool() {
weak_this_factory_.InvalidateWeakPtrs();
}
+// static
+gfx::GpuMemoryBufferId PlatformVideoFramePool::GetGpuMemoryBufferId(
+ const VideoFrame& frame) {
+ DCHECK_EQ(frame.storage_type(),
+ VideoFrame::StorageType::STORAGE_GPU_MEMORY_BUFFER);
+ DCHECK(frame.GetGpuMemoryBuffer());
+ return frame.GetGpuMemoryBuffer()->GetId();
+}
+
scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() {
DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
DVLOGF(4);
@@ -61,7 +70,7 @@ scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() {
return nullptr;
}
- VideoPixelFormat format = frame_layout_->fourcc().ToVideoPixelFormat();
+ const VideoPixelFormat format = frame_layout_->fourcc().ToVideoPixelFormat();
const gfx::Size& coded_size = frame_layout_->size();
if (free_frames_.empty()) {
if (GetTotalNumFrames_Locked() >= max_num_frames_)
@@ -88,14 +97,15 @@ scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() {
scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
origin_frame, format, visible_rect_, natural_size_);
DCHECK(wrapped_frame);
- frames_in_use_.emplace(GetDmabufId(*wrapped_frame), origin_frame.get());
+ frames_in_use_.emplace(GetGpuMemoryBufferId(*wrapped_frame),
+ origin_frame.get());
wrapped_frame->AddDestructionObserver(
base::BindOnce(&PlatformVideoFramePool::OnFrameReleasedThunk, weak_this_,
parent_task_runner_, std::move(origin_frame)));
// Clear all metadata before returning to client, in case origin frame has any
// unrelated metadata.
- wrapped_frame->metadata()->Clear();
+ wrapped_frame->clear_metadata();
return wrapped_frame;
}
@@ -134,7 +144,8 @@ base::Optional<GpuBufferLayout> PlatformVideoFramePool::Initialize(
create_frame_cb_.Run(gpu_memory_buffer_factory_, format, coded_size,
visible_rect_, natural_size_, base::TimeDelta());
if (!frame) {
- VLOGF(1) << "Failed to create video frame";
+ VLOGF(1) << "Failed to create video frame " << format << " (fourcc "
+ << fourcc.ToString() << ")";
return base::nullopt;
}
frame_layout_ = GpuBufferLayout::Create(fourcc, frame->coded_size(),
@@ -168,7 +179,7 @@ VideoFrame* PlatformVideoFramePool::UnwrapFrame(
DVLOGF(4);
base::AutoLock auto_lock(lock_);
- auto it = frames_in_use_.find(GetDmabufId(wrapped_frame));
+ auto it = frames_in_use_.find(GetGpuMemoryBufferId(wrapped_frame));
return (it == frames_in_use_.end()) ? nullptr : it->second;
}
@@ -203,7 +214,7 @@ void PlatformVideoFramePool::OnFrameReleased(
DVLOGF(4);
base::AutoLock auto_lock(lock_);
- DmabufId frame_id = GetDmabufId(*origin_frame);
+ gfx::GpuMemoryBufferId frame_id = GetGpuMemoryBufferId(*origin_frame);
auto it = frames_in_use_.find(frame_id);
DCHECK(it != frames_in_use_.end());
frames_in_use_.erase(it);
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool.h b/chromium/media/gpu/chromeos/platform_video_frame_pool.h
index b983f7c3393..b594d107c51 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_pool.h
+++ b/chromium/media/gpu/chromeos/platform_video_frame_pool.h
@@ -21,6 +21,7 @@
#include "media/base/video_types.h"
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
class GpuMemoryBufferFactory;
@@ -43,6 +44,9 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool {
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory);
~PlatformVideoFramePool() override;
+ // Returns the ID of the GpuMemoryBuffer wrapped by |frame|.
+ static gfx::GpuMemoryBufferId GetGpuMemoryBufferId(const VideoFrame& frame);
+
// DmabufVideoFramePool implementation.
base::Optional<GpuBufferLayout> Initialize(const Fourcc& fourcc,
const gfx::Size& coded_size,
@@ -58,12 +62,12 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool {
// recycling, and bind destruction callback at original frames.
VideoFrame* UnwrapFrame(const VideoFrame& wrapped_frame);
- private:
- friend class PlatformVideoFramePoolTest;
-
// Returns the number of frames in the pool for testing purposes.
size_t GetPoolSizeForTesting();
+ private:
+ friend class PlatformVideoFramePoolTest;
+
// Thunk to post OnFrameReleased() to |task_runner|.
// Because this thunk may be called in any thread, We don't want to
// dereference WeakPtr. Therefore we wrap the WeakPtr by base::Optional to
@@ -116,8 +120,9 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool {
// should be the same as |format_| and |coded_size_|.
base::circular_deque<scoped_refptr<VideoFrame>> free_frames_
GUARDED_BY(lock_);
- // Mapping from the unique_id of the wrapped frame to the original frame.
- std::map<DmabufId, VideoFrame*> frames_in_use_ GUARDED_BY(lock_);
+ // Mapping from the frame's GpuMemoryBuffer's ID to the original frame.
+ std::map<gfx::GpuMemoryBufferId, VideoFrame*> frames_in_use_
+ GUARDED_BY(lock_);
// The maximum number of frames created by the pool.
size_t max_num_frames_ GUARDED_BY(lock_) = 0;
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc b/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc
index 19b03688c81..ac7bb4ae5b1 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc
@@ -9,44 +9,34 @@
#include <memory>
#include <vector>
-#include "base/files/file.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
+#include "base/bind_helpers.h"
#include "base/test/task_environment.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "media/base/format_utils.h"
#include "media/gpu/chromeos/fourcc.h"
+#include "media/video/fake_gpu_memory_buffer.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace {
-base::ScopedFD CreateTmpHandle() {
- base::FilePath path;
- DCHECK(CreateTemporaryFile(&path));
- base::File file(path, base::File::FLAG_OPEN | base::File::FLAG_READ);
- DCHECK(file.IsValid());
- return base::ScopedFD(file.TakePlatformFile());
-}
-
-scoped_refptr<VideoFrame> CreateDmabufVideoFrame(
+scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
gpu::GpuMemoryBufferFactory* factory,
VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- base::Optional<VideoFrameLayout> layout =
- VideoFrameLayout::Create(format, coded_size);
- DCHECK(layout);
-
- std::vector<base::ScopedFD> dmabuf_fds;
- for (size_t i = 0; i < VideoFrame::NumPlanes(format); ++i)
- dmabuf_fds.push_back(CreateTmpHandle());
-
- return VideoFrame::WrapExternalDmabufs(*layout, visible_rect, natural_size,
- std::move(dmabuf_fds), timestamp);
+ base::Optional<gfx::BufferFormat> gfx_format =
+ VideoPixelFormatToGfxBufferFormat(format);
+ DCHECK(gfx_format);
+ const gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes] = {};
+ return VideoFrame::WrapExternalGpuMemoryBuffer(
+ visible_rect, natural_size,
+ std::make_unique<FakeGpuMemoryBuffer>(coded_size, *gfx_format),
+ mailbox_holders, base::NullCallback(), timestamp);
}
} // namespace
@@ -54,16 +44,14 @@ scoped_refptr<VideoFrame> CreateDmabufVideoFrame(
class PlatformVideoFramePoolTest
: public ::testing::TestWithParam<VideoPixelFormat> {
public:
- using DmabufId = DmabufVideoFramePool::DmabufId;
-
PlatformVideoFramePoolTest()
: task_environment_(base::test::TaskEnvironment::TimeSource::MOCK_TIME),
pool_(new PlatformVideoFramePool(nullptr)) {
- pool_->create_frame_cb_ = base::BindRepeating(&CreateDmabufVideoFrame);
+ SetCreateFrameCB(base::BindRepeating(&CreateGpuMemoryBufferVideoFrame));
pool_->set_parent_task_runner(base::ThreadTaskRunnerHandle::Get());
}
- void Initialize(const Fourcc& fourcc) {
+ bool Initialize(const Fourcc& fourcc) {
constexpr gfx::Size kCodedSize(320, 240);
constexpr size_t kNumFrames = 10;
@@ -72,7 +60,7 @@ class PlatformVideoFramePoolTest
layout_ = pool_->Initialize(fourcc, kCodedSize, visible_rect_,
natural_size_, kNumFrames);
- EXPECT_TRUE(layout_);
+ return !!layout_;
}
scoped_refptr<VideoFrame> GetFrame(int timestamp_ms) {
@@ -88,8 +76,8 @@ class PlatformVideoFramePoolTest
return frame;
}
- void CheckPoolSize(size_t size) const {
- EXPECT_EQ(size, pool_->GetPoolSizeForTesting());
+ void SetCreateFrameCB(PlatformVideoFramePool::CreateFrameCB cb) {
+ pool_->create_frame_cb_ = cb;
}
protected:
@@ -103,17 +91,18 @@ class PlatformVideoFramePoolTest
INSTANTIATE_TEST_SUITE_P(All,
PlatformVideoFramePoolTest,
- testing::Values(PIXEL_FORMAT_I420,
- PIXEL_FORMAT_YV12,
+ testing::Values(PIXEL_FORMAT_YV12,
PIXEL_FORMAT_NV12,
- PIXEL_FORMAT_ARGB));
+ PIXEL_FORMAT_ARGB,
+ PIXEL_FORMAT_P016LE));
TEST_P(PlatformVideoFramePoolTest, SingleFrameReuse) {
const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
ASSERT_TRUE(fourcc.has_value());
- Initialize(fourcc.value());
+ ASSERT_TRUE(Initialize(fourcc.value()));
scoped_refptr<VideoFrame> frame = GetFrame(10);
- DmabufId id = DmabufVideoFramePool::GetDmabufId(*frame);
+ gfx::GpuMemoryBufferId id =
+ PlatformVideoFramePool::GetGpuMemoryBufferId(*frame);
// Clear frame reference to return the frame to the pool.
frame = nullptr;
@@ -121,38 +110,40 @@ TEST_P(PlatformVideoFramePoolTest, SingleFrameReuse) {
// Verify that the next frame from the pool uses the same memory.
scoped_refptr<VideoFrame> new_frame = GetFrame(20);
- EXPECT_EQ(id, DmabufVideoFramePool::GetDmabufId(*new_frame));
+ EXPECT_EQ(id, PlatformVideoFramePool::GetGpuMemoryBufferId(*new_frame));
}
TEST_P(PlatformVideoFramePoolTest, MultipleFrameReuse) {
const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
ASSERT_TRUE(fourcc.has_value());
- Initialize(fourcc.value());
+ ASSERT_TRUE(Initialize(fourcc.value()));
scoped_refptr<VideoFrame> frame1 = GetFrame(10);
scoped_refptr<VideoFrame> frame2 = GetFrame(20);
- DmabufId id1 = DmabufVideoFramePool::GetDmabufId(*frame1);
- DmabufId id2 = DmabufVideoFramePool::GetDmabufId(*frame2);
+ gfx::GpuMemoryBufferId id1 =
+ PlatformVideoFramePool::GetGpuMemoryBufferId(*frame1);
+ gfx::GpuMemoryBufferId id2 =
+ PlatformVideoFramePool::GetGpuMemoryBufferId(*frame2);
frame1 = nullptr;
task_environment_.RunUntilIdle();
frame1 = GetFrame(30);
- EXPECT_EQ(id1, DmabufVideoFramePool::GetDmabufId(*frame1));
+ EXPECT_EQ(id1, PlatformVideoFramePool::GetGpuMemoryBufferId(*frame1));
frame2 = nullptr;
task_environment_.RunUntilIdle();
frame2 = GetFrame(40);
- EXPECT_EQ(id2, DmabufVideoFramePool::GetDmabufId(*frame2));
+ EXPECT_EQ(id2, PlatformVideoFramePool::GetGpuMemoryBufferId(*frame2));
frame1 = nullptr;
frame2 = nullptr;
task_environment_.RunUntilIdle();
- CheckPoolSize(2u);
+ EXPECT_EQ(2u, pool_->GetPoolSizeForTesting());
}
TEST_P(PlatformVideoFramePoolTest, InitializeWithDifferentFourcc) {
const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
ASSERT_TRUE(fourcc.has_value());
- Initialize(fourcc.value());
+ ASSERT_TRUE(Initialize(fourcc.value()));
scoped_refptr<VideoFrame> frame_a = GetFrame(10);
scoped_refptr<VideoFrame> frame_b = GetFrame(10);
@@ -162,52 +153,68 @@ TEST_P(PlatformVideoFramePoolTest, InitializeWithDifferentFourcc) {
task_environment_.RunUntilIdle();
// Verify that both frames are in the pool.
- CheckPoolSize(2u);
+ EXPECT_EQ(2u, pool_->GetPoolSizeForTesting());
// Verify that requesting a frame with a different format causes the pool
// to get drained.
- const Fourcc different_fourcc(Fourcc::NV21);
+ const Fourcc different_fourcc(Fourcc::XR24);
ASSERT_NE(fourcc, different_fourcc);
- Initialize(different_fourcc);
+ ASSERT_TRUE(Initialize(different_fourcc));
scoped_refptr<VideoFrame> new_frame = GetFrame(10);
- CheckPoolSize(0u);
+ EXPECT_EQ(0u, pool_->GetPoolSizeForTesting());
}
TEST_P(PlatformVideoFramePoolTest, UnwrapVideoFrame) {
const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
ASSERT_TRUE(fourcc.has_value());
- Initialize(fourcc.value());
+ ASSERT_TRUE(Initialize(fourcc.value()));
scoped_refptr<VideoFrame> frame_1 = GetFrame(10);
scoped_refptr<VideoFrame> frame_2 = VideoFrame::WrapVideoFrame(
frame_1, frame_1->format(), frame_1->visible_rect(),
frame_1->natural_size());
EXPECT_EQ(pool_->UnwrapFrame(*frame_1), pool_->UnwrapFrame(*frame_2));
- EXPECT_TRUE(frame_1->IsSameDmaBufsAs(*frame_2));
+ EXPECT_EQ(frame_1->GetGpuMemoryBuffer(), frame_2->GetGpuMemoryBuffer());
scoped_refptr<VideoFrame> frame_3 = GetFrame(20);
EXPECT_NE(pool_->UnwrapFrame(*frame_1), pool_->UnwrapFrame(*frame_3));
- EXPECT_FALSE(frame_1->IsSameDmaBufsAs(*frame_3));
+ EXPECT_NE(frame_1->GetGpuMemoryBuffer(), frame_3->GetGpuMemoryBuffer());
}
TEST_P(PlatformVideoFramePoolTest, InitializeWithSameFourcc) {
const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
ASSERT_TRUE(fourcc.has_value());
- Initialize(fourcc.value());
+ ASSERT_TRUE(Initialize(fourcc.value()));
scoped_refptr<VideoFrame> frame1 = GetFrame(10);
- DmabufId id1 = DmabufVideoFramePool::GetDmabufId(*frame1);
+ gfx::GpuMemoryBufferId id1 =
+ PlatformVideoFramePool::GetGpuMemoryBufferId(*frame1);
// Clear frame references to return the frames to the pool.
frame1 = nullptr;
task_environment_.RunUntilIdle();
// Request frame with the same format. The pool should not request new frames.
- Initialize(fourcc.value());
+ ASSERT_TRUE(Initialize(fourcc.value()));
scoped_refptr<VideoFrame> frame2 = GetFrame(20);
- DmabufId id2 = DmabufVideoFramePool::GetDmabufId(*frame2);
+ gfx::GpuMemoryBufferId id2 =
+ PlatformVideoFramePool::GetGpuMemoryBufferId(*frame2);
EXPECT_EQ(id1, id2);
}
+TEST_P(PlatformVideoFramePoolTest, InitializeFail) {
+ const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam());
+ ASSERT_TRUE(fourcc.has_value());
+ SetCreateFrameCB(base::BindRepeating(
+ [](gpu::GpuMemoryBufferFactory* factory, VideoPixelFormat format,
+ const gfx::Size& coded_size, const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size, base::TimeDelta timestamp) {
+ auto frame = scoped_refptr<VideoFrame>(nullptr);
+ return frame;
+ }));
+
+ EXPECT_FALSE(Initialize(fourcc.value()));
+}
+
// TODO(akahuang): Add a testcase to verify calling Initialize() only with
// different |max_num_frames|.
diff --git a/chromium/media/gpu/chromeos/platform_video_frame_utils.cc b/chromium/media/gpu/chromeos/platform_video_frame_utils.cc
index 9e7994040b8..ce559f9a0c0 100644
--- a/chromium/media/gpu/chromeos/platform_video_frame_utils.cc
+++ b/chromium/media/gpu/chromeos/platform_video_frame_utils.cc
@@ -142,8 +142,7 @@ scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
dmabuf_fds.emplace_back(plane.fd.release());
auto frame = VideoFrame::WrapExternalDmabufs(
- *layout, visible_rect, visible_rect.size(), std::move(dmabuf_fds),
- timestamp);
+ *layout, visible_rect, natural_size, std::move(dmabuf_fds), timestamp);
if (!frame)
return nullptr;
@@ -174,6 +173,11 @@ gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferHandle(
switch (video_frame->storage_type()) {
case VideoFrame::STORAGE_GPU_MEMORY_BUFFER:
handle = video_frame->GetGpuMemoryBuffer()->CloneHandle();
+ // TODO(crbug.com/1097956): handle a failure gracefully.
+ CHECK_EQ(handle.type, gfx::NATIVE_PIXMAP)
+ << "The cloned handle has an unexpected type: " << handle.type;
+ CHECK(!handle.native_pixmap_handle.planes.empty())
+ << "The cloned handle has no planes";
break;
case VideoFrame::STORAGE_DMABUFS: {
const size_t num_planes = VideoFrame::NumPlanes(video_frame->format());
@@ -185,10 +189,8 @@ gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferHandle(
while (num_planes != duped_fds.size()) {
int duped_fd = -1;
duped_fd = HANDLE_EINTR(dup(duped_fds.back().get()));
- if (duped_fd == -1) {
- DLOG(ERROR) << "Failed duplicating dmabuf fd";
- return handle;
- }
+ // TODO(crbug.com/1097956): handle a failure gracefully.
+ PCHECK(duped_fd >= 0) << "Failed duplicating a dma-buf fd";
duped_fds.emplace_back(duped_fd);
}
diff --git a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc
index 9373a3d26d4..5dd05705614 100644
--- a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc
+++ b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/macros.h"
+#include "media/base/media_util.h"
#include "media/base/video_color_space.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
@@ -137,7 +138,7 @@ bool VdVideoDecodeAccelerator::Initialize(const Config& config,
std::make_unique<VdaVideoFramePool>(weak_this_, client_task_runner_);
vd_ = create_vd_cb_.Run(client_task_runner_, std::move(frame_pool),
std::make_unique<VideoFrameConverter>(),
- nullptr /* gpu_memory_buffer_factory */);
+ std::make_unique<NullMediaLog>());
if (!vd_)
return false;
@@ -385,9 +386,7 @@ base::Optional<Picture> VdVideoDecodeAccelerator::GetPicture(
}
int32_t picture_buffer_id = it->second;
int32_t bitstream_id = FakeTimestampToBitstreamId(frame.timestamp());
- bool allow_overlay = false;
- ignore_result(frame.metadata()->GetBoolean(VideoFrameMetadata::ALLOW_OVERLAY,
- &allow_overlay));
+ bool allow_overlay = frame.metadata()->allow_overlay;
return base::make_optional(Picture(picture_buffer_id, bitstream_id,
frame.visible_rect(), frame.ColorSpace(),
allow_overlay));
diff --git a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h
index 9b9481ca60a..ffdb43c8eb8 100644
--- a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h
+++ b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h
@@ -24,12 +24,9 @@
#include "media/gpu/media_gpu_export.h"
#include "media/video/video_decode_accelerator.h"
-namespace gpu {
-class GpuMemoryBufferFactory;
-} // namespace gpu
-
namespace media {
+class MediaLog;
class VideoFrame;
// Implements the VideoDecodeAccelerator backed by a VideoDecoder.
@@ -52,7 +49,7 @@ class MEDIA_GPU_EXPORT VdVideoDecodeAccelerator
scoped_refptr<base::SequencedTaskRunner>,
std::unique_ptr<DmabufVideoFramePool>,
std::unique_ptr<VideoFrameConverter>,
- gpu::GpuMemoryBufferFactory* const)>;
+ std::unique_ptr<MediaLog>)>;
// Create VdVideoDecodeAccelerator instance, and call Initialize().
// Return nullptr if Initialize() failed.
diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline.cc b/chromium/media/gpu/chromeos/video_decoder_pipeline.cc
index 906861ba788..3759f9f994f 100644
--- a/chromium/media/gpu/chromeos/video_decoder_pipeline.cc
+++ b/chromium/media/gpu/chromeos/video_decoder_pipeline.cc
@@ -14,7 +14,9 @@
#include "base/task/task_traits.h"
#include "base/task/thread_pool.h"
#include "build/build_config.h"
+#include "media/base/async_destroy_video_decoder.h"
#include "media/base/limits.h"
+#include "media/base/media_log.h"
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "media/gpu/chromeos/image_processor.h"
#include "media/gpu/chromeos/image_processor_factory.h"
@@ -54,6 +56,14 @@ base::Optional<Fourcc> PickRenderableFourcc(
return base::nullopt;
}
+// Appends |new_status| to |parent_status| unless |parent_status| is kOk, in
+// that case we cannot append, just forward |new_status| then.
+Status AppendOrForwardStatus(Status parent_status, Status new_status) {
+ if (parent_status.is_ok())
+ return new_status;
+ return std::move(parent_status).AddCause(std::move(new_status));
+}
+
} // namespace
DecoderInterface::DecoderInterface(
@@ -68,38 +78,36 @@ std::unique_ptr<VideoDecoder> VideoDecoderPipeline::Create(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
- GetCreateVDFunctionsCB get_create_vd_functions_cb) {
+ std::unique_ptr<MediaLog> /*media_log*/,
+ GetCreateDecoderFunctionsCB get_create_decoder_functions_cb) {
if (!client_task_runner || !frame_pool || !frame_converter) {
VLOGF(1) << "One of arguments is nullptr.";
return nullptr;
}
- if (get_create_vd_functions_cb.Run(nullptr).empty()) {
+ if (get_create_decoder_functions_cb.Run().empty()) {
VLOGF(1) << "No available function to create video decoder.";
return nullptr;
}
- return base::WrapUnique<VideoDecoder>(new VideoDecoderPipeline(
+ auto* decoder = new VideoDecoderPipeline(
std::move(client_task_runner), std::move(frame_pool),
- std::move(frame_converter), gpu_memory_buffer_factory,
- std::move(get_create_vd_functions_cb)));
+ std::move(frame_converter), std::move(get_create_decoder_functions_cb));
+ return std::make_unique<AsyncDestroyVideoDecoder<VideoDecoderPipeline>>(
+ base::WrapUnique(decoder));
}
VideoDecoderPipeline::VideoDecoderPipeline(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
- GetCreateVDFunctionsCB get_create_vd_functions_cb)
+ GetCreateDecoderFunctionsCB get_create_decoder_functions_cb)
: client_task_runner_(std::move(client_task_runner)),
decoder_task_runner_(base::ThreadPool::CreateSingleThreadTaskRunner(
{base::WithBaseSyncPrimitives(), base::TaskPriority::USER_VISIBLE},
base::SingleThreadTaskRunnerThreadMode::DEDICATED)),
main_frame_pool_(std::move(frame_pool)),
- gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
- frame_converter_(std::move(frame_converter)),
- get_create_vd_functions_cb_(std::move(get_create_vd_functions_cb)) {
+ frame_converter_(std::move(frame_converter)) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
DETACH_FROM_SEQUENCE(decoder_sequence_checker_);
DCHECK(main_frame_pool_);
@@ -110,6 +118,8 @@ VideoDecoderPipeline::VideoDecoderPipeline(
client_weak_this_ = client_weak_this_factory_.GetWeakPtr();
decoder_weak_this_ = decoder_weak_this_factory_.GetWeakPtr();
+ remaining_create_decoder_functions_ = get_create_decoder_functions_cb.Run();
+
main_frame_pool_->set_parent_task_runner(decoder_task_runner_);
frame_converter_->Initialize(
decoder_task_runner_,
@@ -118,37 +128,30 @@ VideoDecoderPipeline::VideoDecoderPipeline(
}
VideoDecoderPipeline::~VideoDecoderPipeline() {
- // We have to destroy |main_frame_pool_| on |decoder_task_runner_|, so the
- // destructor is also called on |decoder_task_runner_|.
- DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
- DVLOGF(3);
-}
-
-void VideoDecoderPipeline::Destroy() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
- DVLOGF(2);
-
- client_weak_this_factory_.InvalidateWeakPtrs();
-
- decoder_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&VideoDecoderPipeline::DestroyTask, decoder_weak_this_));
-}
-
-void VideoDecoderPipeline::DestroyTask() {
+ // We have to destroy |main_frame_pool_| and |frame_converter_| on
+ // |decoder_task_runner_|, so the destructor must be called on
+ // |decoder_task_runner_|.
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
decoder_weak_this_factory_.InvalidateWeakPtrs();
- // The frame pool and converter should be destroyed on |decoder_task_runner_|.
main_frame_pool_.reset();
frame_converter_.reset();
decoder_.reset();
- used_create_vd_func_ = nullptr;
+ remaining_create_decoder_functions_.clear();
+}
+
+void VideoDecoderPipeline::DestroyAsync(
+ std::unique_ptr<VideoDecoderPipeline> decoder) {
+ DVLOGF(2);
+ DCHECK(decoder);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(decoder->client_sequence_checker_);
- delete this;
+ decoder->client_weak_this_factory_.InvalidateWeakPtrs();
+ auto* decoder_task_runner = decoder->decoder_task_runner_.get();
+ decoder_task_runner->DeleteSoon(FROM_HERE, std::move(decoder));
}
std::string VideoDecoderPipeline::GetDisplayName() const {
@@ -182,11 +185,11 @@ bool VideoDecoderPipeline::CanReadWithoutStalling() const {
}
void VideoDecoderPipeline::Initialize(const VideoDecoderConfig& config,
- bool low_delay,
+ bool /* low_delay */,
CdmContext* cdm_context,
InitCB init_cb,
const OutputCB& output_cb,
- const WaitingCB& waiting_cb) {
+ const WaitingCB& /* waiting_cb */) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
VLOGF(2) << "config: " << config.AsHumanReadableString();
@@ -217,79 +220,76 @@ void VideoDecoderPipeline::Initialize(const VideoDecoderConfig& config,
void VideoDecoderPipeline::InitializeTask(const VideoDecoderConfig& config,
InitCB init_cb,
const OutputCB& output_cb) {
+ DVLOGF(3);
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(!init_cb_);
client_output_cb_ = std::move(output_cb);
init_cb_ = std::move(init_cb);
- base::queue<VideoDecoderPipeline::CreateVDFunc> create_vd_funcs =
- get_create_vd_functions_cb_.Run(used_create_vd_func_);
+ // Initialize() and correspondingly InitializeTask(), are called both on first
+ // initialization and on subsequent stream |config| changes, e.g. change of
+ // resolution. Subsequent initializations are marked by |decoder_| already
+ // existing.
if (!decoder_) {
- CreateAndInitializeVD(std::move(create_vd_funcs), config,
- StatusCode::kChromeOSVideoDecoderNoDecoders);
+ CreateAndInitializeVD(config, Status());
} else {
decoder_->Initialize(
config,
- // If it fails to re-initialize current |decoder_|, it will create
- // another decoder instance by trying available VD creation functions
- // again. See |OnInitializeDone| for detail.
base::BindOnce(&VideoDecoderPipeline::OnInitializeDone,
- decoder_weak_this_, std::move(create_vd_funcs), config,
- StatusCode::kChromeOSVideoDecoderNoDecoders),
+ decoder_weak_this_, config, Status()),
base::BindRepeating(&VideoDecoderPipeline::OnFrameDecoded,
decoder_weak_this_));
}
}
-void VideoDecoderPipeline::CreateAndInitializeVD(
- base::queue<VideoDecoderPipeline::CreateVDFunc> create_vd_funcs,
- VideoDecoderConfig config,
- ::media::Status parent_error) {
+void VideoDecoderPipeline::CreateAndInitializeVD(VideoDecoderConfig config,
+ Status parent_error) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(init_cb_);
DCHECK(!decoder_);
- DCHECK(!used_create_vd_func_);
DVLOGF(3);
- if (create_vd_funcs.empty()) {
- DVLOGF(2) << "No available video decoder.";
+ if (remaining_create_decoder_functions_.empty()) {
+ DVLOGF(2) << "No remaining video decoder create functions to try";
client_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(std::move(init_cb_), parent_error));
+ FROM_HERE,
+ base::BindOnce(
+ std::move(init_cb_),
+ AppendOrForwardStatus(
+ parent_error, StatusCode::kChromeOSVideoDecoderNoDecoders)));
return;
}
- used_create_vd_func_ = create_vd_funcs.front();
- create_vd_funcs.pop();
- decoder_ = used_create_vd_func_(decoder_task_runner_, decoder_weak_this_);
+ decoder_ = remaining_create_decoder_functions_.front()(decoder_task_runner_,
+ decoder_weak_this_);
+ remaining_create_decoder_functions_.pop_front();
+
if (!decoder_) {
- DVLOGF(2) << "Failed to create VideoDecoder.";
- used_create_vd_func_ = nullptr;
+ DVLOGF(2) << "|decoder_| creation failed, trying again with the next "
+ "available create function.";
return CreateAndInitializeVD(
- std::move(create_vd_funcs), config,
- std::move(parent_error).AddCause(StatusCode::kDecoderFailedCreation));
+ config, AppendOrForwardStatus(parent_error,
+ StatusCode::kDecoderFailedCreation));
}
decoder_->Initialize(
config,
base::BindOnce(&VideoDecoderPipeline::OnInitializeDone,
- decoder_weak_this_, std::move(create_vd_funcs), config,
- std::move(parent_error)),
+ decoder_weak_this_, config, std::move(parent_error)),
base::BindRepeating(&VideoDecoderPipeline::OnFrameDecoded,
decoder_weak_this_));
}
-void VideoDecoderPipeline::OnInitializeDone(
- base::queue<VideoDecoderPipeline::CreateVDFunc> create_vd_funcs,
- VideoDecoderConfig config,
- ::media::Status parent_error,
- ::media::Status status) {
+void VideoDecoderPipeline::OnInitializeDone(VideoDecoderConfig config,
+ Status parent_error,
+ Status status) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(init_cb_);
DVLOGF(4) << "Initialization status = " << status.code();
if (status.is_ok()) {
- DVLOGF(2) << "Initialize VD successfully.";
+ DVLOGF(2) << "|decoder_| successfully initialized.";
// TODO(tmathmeyer) consider logging the causes of |parent_error| as they
// might have infor about why other decoders failed.
client_task_runner_->PostTask(
@@ -297,11 +297,11 @@ void VideoDecoderPipeline::OnInitializeDone(
return;
}
- DVLOGF(3) << "Reset VD, try the next create function.";
+ DVLOGF(3) << "|decoder_| initialization failed, trying again with the next "
+ "available create function.";
decoder_ = nullptr;
- used_create_vd_func_ = nullptr;
- CreateAndInitializeVD(std::move(create_vd_funcs), config,
- std::move(parent_error).AddCause(std::move(status)));
+ CreateAndInitializeVD(config,
+ AppendOrForwardStatus(parent_error, std::move(status)));
}
void VideoDecoderPipeline::Reset(base::OnceClosure closure) {
@@ -417,9 +417,9 @@ void VideoDecoderPipeline::OnFrameConverted(scoped_refptr<VideoFrame> frame) {
}
// Flag that the video frame is capable of being put in an overlay.
- frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
+ frame->metadata()->allow_overlay = true;
// Flag that the video frame was decoded in a power efficient way.
- frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
+ frame->metadata()->power_efficient = true;
// MojoVideoDecoderService expects the |output_cb_| to be called on the client
// task runner, even though media::VideoDecoder states frames should be output
diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline.h b/chromium/media/gpu/chromeos/video_decoder_pipeline.h
index 030ed9058e1..c0c6ac10c5f 100644
--- a/chromium/media/gpu/chromeos/video_decoder_pipeline.h
+++ b/chromium/media/gpu/chromeos/video_decoder_pipeline.h
@@ -8,7 +8,6 @@
#include <memory>
#include "base/callback_forward.h"
-#include "base/containers/queue.h"
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/sequence_checker.h"
@@ -24,13 +23,10 @@ namespace base {
class SequencedTaskRunner;
}
-namespace gpu {
-class GpuMemoryBufferFactory;
-} // namespace gpu
-
namespace media {
class DmabufVideoFramePool;
+class MediaLog;
// An interface that defines methods to operate on video decoder components
// inside the VideoDecoderPipeline. The interface is similar to
@@ -42,7 +38,7 @@ class DmabufVideoFramePool;
// Note: All methods and callbacks should be called on the same sequence.
class MEDIA_GPU_EXPORT DecoderInterface {
public:
- using InitCB = base::OnceCallback<void(::media::Status status)>;
+ using InitCB = base::OnceCallback<void(Status status)>;
// TODO(crbug.com/998413): Replace VideoFrame to GpuMemoryBuffer-based
// instance.
using OutputCB = base::RepeatingCallback<void(scoped_refptr<VideoFrame>)>;
@@ -130,21 +126,22 @@ class MEDIA_GPU_EXPORT DecoderInterface {
class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
public DecoderInterface::Client {
public:
- // Function signature for creating VideoDecoder.
- using CreateVDFunc = std::unique_ptr<DecoderInterface> (*)(
+ using CreateDecoderFunction = std::unique_ptr<DecoderInterface> (*)(
scoped_refptr<base::SequencedTaskRunner>,
base::WeakPtr<DecoderInterface::Client>);
- using GetCreateVDFunctionsCB =
- base::RepeatingCallback<base::queue<CreateVDFunc>(CreateVDFunc)>;
+ using CreateDecoderFunctions = std::list<CreateDecoderFunction>;
+ using GetCreateDecoderFunctionsCB =
+ base::RepeatingCallback<CreateDecoderFunctions()>;
static std::unique_ptr<VideoDecoder> Create(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
- GetCreateVDFunctionsCB get_create_vd_functions_cb);
+ std::unique_ptr<MediaLog> media_log,
+ GetCreateDecoderFunctionsCB get_create_decoder_functions_cb);
~VideoDecoderPipeline() override;
+ static void DestroyAsync(std::unique_ptr<VideoDecoderPipeline>);
// VideoDecoder implementation
std::string GetDisplayName() const override;
@@ -152,7 +149,6 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
int GetMaxDecodeRequests() const override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
-
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
@@ -173,19 +169,13 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
const gfx::Rect& visible_rect) override;
private:
- // Get a list of the available functions for creating VideoDeocoder except
- // |current_func| one.
- static base::queue<CreateVDFunc> GetCreateVDFunctions(
- CreateVDFunc current_func);
+ friend class VideoDecoderPipelineTest;
VideoDecoderPipeline(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
std::unique_ptr<DmabufVideoFramePool> frame_pool,
std::unique_ptr<VideoFrameConverter> frame_converter,
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
- GetCreateVDFunctionsCB get_create_vd_functions_cb);
- void Destroy() override;
- void DestroyTask();
+ GetCreateDecoderFunctionsCB get_create_decoder_functions_cb);
void InitializeTask(const VideoDecoderConfig& config,
InitCB init_cb,
@@ -193,13 +183,10 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
void ResetTask(base::OnceClosure closure);
void DecodeTask(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb);
- void CreateAndInitializeVD(base::queue<CreateVDFunc> create_vd_funcs,
- VideoDecoderConfig config,
- ::media::Status parent_error);
- void OnInitializeDone(base::queue<CreateVDFunc> create_vd_funcs,
- VideoDecoderConfig config,
- ::media::Status parent_error,
- ::media::Status success);
+ void CreateAndInitializeVD(VideoDecoderConfig config, Status parent_error);
+ void OnInitializeDone(VideoDecoderConfig config,
+ Status parent_error,
+ Status status);
void OnDecodeDone(bool eos_buffer, DecodeCB decode_cb, DecodeStatus status);
void OnResetDone();
@@ -241,10 +228,6 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
// the client should be created using this pool.
// Used on |decoder_task_runner_|.
std::unique_ptr<DmabufVideoFramePool> main_frame_pool_;
- // Used to generate additional frame pools for intermediate results if
- // required. The instance is indirectly owned by GpuChildThread, therefore
- // alive as long as the GPU process is.
- gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory_;
// The image processor is only created when the decoder cannot output frames
// with renderable format.
@@ -254,14 +237,14 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
// |client_task_runner_|.
std::unique_ptr<VideoFrameConverter> frame_converter_;
- // The callback to get a list of function for creating DecoderInterface.
- GetCreateVDFunctionsCB get_create_vd_functions_cb_;
-
// The current video decoder implementation. Valid after initialization is
// successfully done.
std::unique_ptr<DecoderInterface> decoder_;
- // The create function of |decoder_|. nullptr iff |decoder_| is nullptr.
- CreateVDFunc used_create_vd_func_ = nullptr;
+
+ // |remaining_create_decoder_functions_| holds all the potential video decoder
+ // creation functions. We try them all in the given order until one succeeds.
+ // Only used after initialization on |decoder_sequence_checker_|.
+ CreateDecoderFunctions remaining_create_decoder_functions_;
// Callback from the client. These callback are called on
// |client_task_runner_|.
diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc b/chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc
new file mode 100644
index 00000000000..b95a52d7e64
--- /dev/null
+++ b/chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc
@@ -0,0 +1,229 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/chromeos/video_decoder_pipeline.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/test/gmock_callback_support.h"
+#include "base/test/task_environment.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "media/base/media_util.h"
+#include "media/base/status.h"
+#include "media/base/video_decoder_config.h"
+#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
+#include "media/gpu/chromeos/mailbox_video_frame_converter.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::test::RunClosure;
+using ::testing::_;
+using ::testing::TestWithParam;
+
+namespace media {
+
+MATCHER_P(MatchesStatusCode, status_code, "") {
+ // media::Status doesn't provide an operator==(...), we add here a simple one.
+ return arg.code() == status_code;
+}
+
+class MockVideoFramePool : public DmabufVideoFramePool {
+ public:
+ MockVideoFramePool() = default;
+ ~MockVideoFramePool() override = default;
+
+ // DmabufVideoFramePool implementation.
+ MOCK_METHOD5(Initialize,
+ base::Optional<GpuBufferLayout>(const Fourcc&,
+ const gfx::Size&,
+ const gfx::Rect&,
+ const gfx::Size&,
+ size_t));
+ MOCK_METHOD0(GetFrame, scoped_refptr<VideoFrame>());
+ MOCK_METHOD0(IsExhausted, bool());
+ MOCK_METHOD1(NotifyWhenFrameAvailable, void(base::OnceClosure));
+};
+
+constexpr gfx::Size kCodedSize(48, 36);
+
+class MockDecoder : public DecoderInterface {
+ public:
+ MockDecoder()
+ : DecoderInterface(base::ThreadTaskRunnerHandle::Get(),
+ base::WeakPtr<DecoderInterface::Client>(nullptr)) {}
+ ~MockDecoder() override = default;
+
+ MOCK_METHOD3(Initialize,
+ void(const VideoDecoderConfig&, InitCB, const OutputCB&));
+ MOCK_METHOD2(Decode, void(scoped_refptr<DecoderBuffer>, DecodeCB));
+ MOCK_METHOD1(Reset, void(base::OnceClosure));
+ MOCK_METHOD0(ApplyResolutionChange, void());
+};
+
+struct DecoderPipelineTestParams {
+ VideoDecoderPipeline::CreateDecoderFunctions create_decoder_functions;
+ StatusCode status_code;
+};
+
+class VideoDecoderPipelineTest
+ : public testing::TestWithParam<DecoderPipelineTestParams> {
+ public:
+ VideoDecoderPipelineTest()
+ : config_(kCodecVP8,
+ VP8PROFILE_ANY,
+ VideoDecoderConfig::AlphaMode::kIsOpaque,
+ VideoColorSpace(),
+ kNoTransformation,
+ kCodedSize,
+ gfx::Rect(kCodedSize),
+ kCodedSize,
+ EmptyExtraData(),
+ EncryptionScheme::kUnencrypted),
+ pool_(new MockVideoFramePool),
+ converter_(new VideoFrameConverter),
+ decoder_(new VideoDecoderPipeline(
+ base::ThreadTaskRunnerHandle::Get(),
+ std::move(pool_),
+ std::move(converter_),
+ base::BindRepeating([]() {
+ // This callback needs to be configured in the individual tests.
+ return VideoDecoderPipeline::CreateDecoderFunctions();
+ }))) {}
+ ~VideoDecoderPipelineTest() override = default;
+
+ void TearDown() override {
+ VideoDecoderPipeline::DestroyAsync(std::move(decoder_));
+ task_environment_.RunUntilIdle();
+ }
+ MOCK_METHOD1(OnInit, void(Status));
+ MOCK_METHOD1(OnOutput, void(scoped_refptr<VideoFrame>));
+
+ void SetCreateDecoderFunctions(
+ VideoDecoderPipeline::CreateDecoderFunctions functions) {
+ decoder_->remaining_create_decoder_functions_ = functions;
+ }
+
+ void InitializeDecoder() {
+ decoder_->Initialize(
+ config_, false /* low_delay */, nullptr /* cdm_context */,
+ base::BindOnce(&VideoDecoderPipelineTest::OnInit,
+ base::Unretained(this)),
+ base::BindRepeating(&VideoDecoderPipelineTest::OnOutput,
+ base::Unretained(this)),
+ base::DoNothing());
+ }
+
+ static std::unique_ptr<DecoderInterface> CreateNullMockDecoder(
+ scoped_refptr<base::SequencedTaskRunner> /* decoder_task_runner */,
+ base::WeakPtr<DecoderInterface::Client> /* client */) {
+ return nullptr;
+ }
+
+ // Creates a MockDecoder with an EXPECT_CALL on Initialize that returns ok.
+ static std::unique_ptr<DecoderInterface> CreateGoodMockDecoder(
+ scoped_refptr<base::SequencedTaskRunner> /* decoder_task_runner */,
+ base::WeakPtr<DecoderInterface::Client> /* client */) {
+ std::unique_ptr<MockDecoder> decoder(new MockDecoder());
+ EXPECT_CALL(*decoder, Initialize(_, _, _))
+ .WillOnce(::testing::WithArgs<1>([](VideoDecoder::InitCB init_cb) {
+ std::move(init_cb).Run(OkStatus());
+ }));
+ return std::move(decoder);
+ }
+
+ // Creates a MockDecoder with an EXPECT_CALL on Initialize that returns error.
+ static std::unique_ptr<DecoderInterface> CreateBadMockDecoder(
+ scoped_refptr<base::SequencedTaskRunner> /* decoder_task_runner */,
+ base::WeakPtr<DecoderInterface::Client> /* client */) {
+ std::unique_ptr<MockDecoder> decoder(new MockDecoder());
+ EXPECT_CALL(*decoder, Initialize(_, _, _))
+ .WillOnce(::testing::WithArgs<1>([](VideoDecoder::InitCB init_cb) {
+ std::move(init_cb).Run(StatusCode::kDecoderFailedInitialization);
+ }));
+ return std::move(decoder);
+ }
+
+ DecoderInterface* GetUnderlyingDecoder() { return decoder_->decoder_.get(); }
+
+ base::test::TaskEnvironment task_environment_;
+ const VideoDecoderConfig config_;
+ DecoderInterface* underlying_decoder_ptr_ = nullptr;
+
+ std::unique_ptr<MockVideoFramePool> pool_;
+ std::unique_ptr<VideoFrameConverter> converter_;
+ std::unique_ptr<VideoDecoderPipeline> decoder_;
+};
+
+// Verifies the status code for several typical CreateDecoderFunctions cases.
+TEST_P(VideoDecoderPipelineTest, Initialize) {
+ SetCreateDecoderFunctions(GetParam().create_decoder_functions);
+
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ EXPECT_CALL(*this, OnInit(MatchesStatusCode(GetParam().status_code)))
+ .WillOnce(RunClosure(quit_closure));
+
+ InitializeDecoder();
+ run_loop.Run();
+
+ EXPECT_EQ(GetParam().status_code == StatusCode::kOk,
+ !!GetUnderlyingDecoder());
+}
+
+const struct DecoderPipelineTestParams kDecoderPipelineTestParams[] = {
+ // An empty set of CreateDecoderFunctions.
+ {{}, StatusCode::kChromeOSVideoDecoderNoDecoders},
+
+ // Just one CreateDecoderFunctions that fails to Create() (i.e. returns a
+ // null Decoder)
+ {{&VideoDecoderPipelineTest::CreateNullMockDecoder},
+ StatusCode::kDecoderFailedCreation},
+
+ // Just one CreateDecoderFunctions that works fine, i.e. Create()s and
+ // Initialize()s correctly.
+ {{&VideoDecoderPipelineTest::CreateGoodMockDecoder}, StatusCode::kOk},
+
+ // One CreateDecoderFunctions that Create()s ok but fails to Initialize()
+ // correctly
+ {{&VideoDecoderPipelineTest::CreateBadMockDecoder},
+ StatusCode::kDecoderFailedInitialization},
+
+ // Two CreateDecoderFunctions, one that fails to Create() (i.e. returns a
+ // null Decoder), and one that works. The first error StatusCode is lost
+ // because VideoDecoderPipeline::OnInitializeDone() throws it away.
+ {{&VideoDecoderPipelineTest::CreateNullMockDecoder,
+ &VideoDecoderPipelineTest::CreateGoodMockDecoder},
+ StatusCode::kOk},
+
+ // Two CreateDecoderFunctions, one that Create()s ok but fails to
+ // Initialize(), and one that works. The first error StatusCode is lost
+ // because VideoDecoderPipeline::OnInitializeDone() throws it away.
+ {{&VideoDecoderPipelineTest::CreateBadMockDecoder,
+ &VideoDecoderPipelineTest::CreateGoodMockDecoder},
+ StatusCode::kOk},
+
+ // Two CreateDecoderFunctions, one that fails to Create() (i.e. returns a
+ // null Decoder), and one that fails to Initialize(). The first error
+ // StatusCode is the only one we can check here: a Status object is created
+ // with a "primary" StatusCode, archiving subsequent ones in a private
+ // member.
+ {{&VideoDecoderPipelineTest::CreateNullMockDecoder,
+ &VideoDecoderPipelineTest::CreateBadMockDecoder},
+ StatusCode::kDecoderFailedCreation},
+ // Previous one in reverse order.
+ {{&VideoDecoderPipelineTest::CreateBadMockDecoder,
+ &VideoDecoderPipelineTest::CreateNullMockDecoder},
+ StatusCode::kDecoderFailedInitialization},
+
+ {{&VideoDecoderPipelineTest::CreateBadMockDecoder,
+ &VideoDecoderPipelineTest::CreateBadMockDecoder,
+ &VideoDecoderPipelineTest::CreateGoodMockDecoder},
+ StatusCode::kOk},
+};
+
+INSTANTIATE_TEST_SUITE_P(All,
+ VideoDecoderPipelineTest,
+ testing::ValuesIn(kDecoderPipelineTestParams));
+
+} // namespace media
diff --git a/chromium/media/gpu/h264_decoder.cc b/chromium/media/gpu/h264_decoder.cc
index 59ab81d16ba..93abea8c715 100644
--- a/chromium/media/gpu/h264_decoder.cc
+++ b/chromium/media/gpu/h264_decoder.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
+#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/optional.h"
#include "base/stl_util.h"
@@ -567,6 +568,13 @@ bool H264Decoder::ModifyReferencePicList(const H264SliceHeader* slice_hdr,
DVLOG(1) << "Malformed stream, no pic num " << pic_num_lx;
return false;
}
+
+ if (ref_idx_lx > num_ref_idx_lX_active_minus1) {
+ DVLOG(1) << "Bounds mismatch: expected " << ref_idx_lx
+ << " <= " << num_ref_idx_lX_active_minus1;
+ return false;
+ }
+
ShiftRightAndInsert(ref_pic_listx, ref_idx_lx,
num_ref_idx_lX_active_minus1, pic);
ref_idx_lx++;
diff --git a/chromium/media/gpu/h264_dpb.cc b/chromium/media/gpu/h264_dpb.cc
index 02031457883..8ef3bafb255 100644
--- a/chromium/media/gpu/h264_dpb.cc
+++ b/chromium/media/gpu/h264_dpb.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/stl_util.h"
#include "media/gpu/h264_dpb.h"
diff --git a/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc b/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc
index 464c10626ea..bd818a3e178 100644
--- a/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc
+++ b/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc
@@ -292,8 +292,8 @@ void GpuVideoDecodeAcceleratorHost::OnNotifyError(uint32_t error) {
// Client::NotifyError() may Destroy() |this|, so calling it needs to be the
// last thing done on this stack!
- VideoDecodeAccelerator::Client* client = nullptr;
- std::swap(client, client_);
+ VideoDecodeAccelerator::Client* client = client_;
+ client_ = nullptr;
client->NotifyError(static_cast<VideoDecodeAccelerator::Error>(error));
}
diff --git a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
index efe9f6031cf..77a7881304d 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
+++ b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
@@ -121,9 +121,9 @@ class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
MessageFilter(GpuVideoDecodeAccelerator* owner, int32_t host_route_id)
: owner_(owner), host_route_id_(host_route_id) {}
- void OnChannelError() override { sender_ = NULL; }
+ void OnChannelError() override { sender_ = nullptr; }
- void OnChannelClosing() override { sender_ = NULL; }
+ void OnChannelClosing() override { sender_ = nullptr; }
void OnFilterAdded(IPC::Channel* channel) override { sender_ = channel; }
diff --git a/chromium/media/gpu/ipc/service/picture_buffer_manager.cc b/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
index c9963a4ff87..d6164bb3820 100644
--- a/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
+++ b/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
@@ -218,15 +218,12 @@ class PictureBufferManagerImpl : public PictureBufferManager {
frame->set_color_space(picture.color_space());
- if (picture.allow_overlay())
- frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
- if (picture.read_lock_fences_enabled()) {
- frame->metadata()->SetBoolean(
- VideoFrameMetadata::READ_LOCK_FENCES_ENABLED, true);
- }
+ frame->metadata()->allow_overlay = picture.allow_overlay();
+ frame->metadata()->read_lock_fences_enabled =
+ picture.read_lock_fences_enabled();
// TODO(sandersd): Provide an API for VDAs to control this.
- frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
+ frame->metadata()->power_efficient = true;
return frame;
}
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder.cc b/chromium/media/gpu/ipc/service/vda_video_decoder.cc
index 5dbc5214002..f925028013a 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder.cc
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder.cc
@@ -16,6 +16,7 @@
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_preferences.h"
+#include "media/base/async_destroy_video_decoder.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_log.h"
@@ -102,8 +103,7 @@ bool IsProfileSupported(
} // namespace
// static
-std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>>
-VdaVideoDecoder::Create(
+std::unique_ptr<VideoDecoder> VdaVideoDecoder::Create(
scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
std::unique_ptr<MediaLog> media_log,
@@ -111,21 +111,19 @@ VdaVideoDecoder::Create(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
GetStubCB get_stub_cb) {
- // Constructed in a variable to avoid _CheckUniquePtr() PRESUBMIT.py regular
- // expressions, which do not understand custom deleters.
- // TODO(sandersd): Extend base::WrapUnique() to handle this.
- std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>> ptr(
- new VdaVideoDecoder(
- std::move(parent_task_runner), std::move(gpu_task_runner),
- std::move(media_log), target_color_space,
- base::BindOnce(&PictureBufferManager::Create),
- base::BindOnce(&CreateCommandBufferHelper, std::move(get_stub_cb)),
- base::BindRepeating(&CreateAndInitializeVda, gpu_preferences,
- gpu_workarounds),
- GpuVideoAcceleratorUtil::ConvertGpuToMediaDecodeCapabilities(
- GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
- gpu_preferences, gpu_workarounds))));
- return ptr;
+ auto* decoder = new VdaVideoDecoder(
+ std::move(parent_task_runner), std::move(gpu_task_runner),
+ std::move(media_log), target_color_space,
+ base::BindOnce(&PictureBufferManager::Create),
+ base::BindOnce(&CreateCommandBufferHelper, std::move(get_stub_cb)),
+ base::BindRepeating(&CreateAndInitializeVda, gpu_preferences,
+ gpu_workarounds),
+ GpuVideoAcceleratorUtil::ConvertGpuToMediaDecodeCapabilities(
+ GpuVideoDecodeAcceleratorFactory::GetDecoderCapabilities(
+ gpu_preferences, gpu_workarounds)));
+
+ return std::make_unique<AsyncDestroyVideoDecoder<VdaVideoDecoder>>(
+ base::WrapUnique(decoder));
}
VdaVideoDecoder::VdaVideoDecoder(
@@ -160,38 +158,40 @@ VdaVideoDecoder::VdaVideoDecoder(
gpu_weak_this_));
}
-void VdaVideoDecoder::Destroy() {
+void VdaVideoDecoder::DestroyAsync(std::unique_ptr<VdaVideoDecoder> decoder) {
DVLOG(1) << __func__;
- DCHECK(parent_task_runner_->BelongsToCurrentThread());
+ DCHECK(decoder);
+ DCHECK(decoder->parent_task_runner_->BelongsToCurrentThread());
- // TODO(sandersd): The documentation says that Destroy() fires any pending
- // callbacks.
+ // TODO(sandersd): The documentation says that DestroyAsync() fires any
+ // pending callbacks.
// Prevent any more callbacks to this thread.
- parent_weak_this_factory_.InvalidateWeakPtrs();
+ decoder->parent_weak_this_factory_.InvalidateWeakPtrs();
// Pass ownership of the destruction process over to the GPU thread.
- gpu_task_runner_->PostTask(
+ auto* gpu_task_runner = decoder->gpu_task_runner_.get();
+ gpu_task_runner->PostTask(
FROM_HERE,
- base::BindOnce(&VdaVideoDecoder::DestroyOnGpuThread, gpu_weak_this_));
+ base::BindOnce(&VdaVideoDecoder::CleanupOnGpuThread, std::move(decoder)));
}
-void VdaVideoDecoder::DestroyOnGpuThread() {
+void VdaVideoDecoder::CleanupOnGpuThread(
+ std::unique_ptr<VdaVideoDecoder> decoder) {
DVLOG(2) << __func__;
- DCHECK(gpu_task_runner_->BelongsToCurrentThread());
+ DCHECK(decoder);
+ DCHECK(decoder->gpu_task_runner_->BelongsToCurrentThread());
// VDA destruction is likely to result in reentrant calls to
// NotifyEndOfBitstreamBuffer(). Invalidating |gpu_weak_vda_| ensures that we
// don't call back into |vda_| during its destruction.
- gpu_weak_vda_factory_ = nullptr;
- vda_ = nullptr;
- media_log_ = nullptr;
+ decoder->gpu_weak_vda_factory_ = nullptr;
+ decoder->vda_ = nullptr;
+ decoder->media_log_ = nullptr;
// Because |parent_weak_this_| was invalidated in Destroy(), picture buffer
// dismissals since then have been dropped on the floor.
- picture_buffer_manager_->DismissAllPictureBuffers();
-
- delete this;
+ decoder->picture_buffer_manager_->DismissAllPictureBuffers();
}
VdaVideoDecoder::~VdaVideoDecoder() {
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder.h b/chromium/media/gpu/ipc/service/vda_video_decoder.h
index 07f475b7d43..72f5cf73ebb 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder.h
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder.h
@@ -64,14 +64,34 @@ class VdaVideoDecoder : public VideoDecoder,
// called on the GPU thread.
//
// See VdaVideoDecoder() for other arguments.
- static std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>>
- Create(scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
- scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- std::unique_ptr<MediaLog> media_log,
- const gfx::ColorSpace& target_color_space,
- const gpu::GpuPreferences& gpu_preferences,
- const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
- GetStubCB get_stub_cb);
+ static std::unique_ptr<VideoDecoder> Create(
+ scoped_refptr<base::SingleThreadTaskRunner> parent_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ std::unique_ptr<MediaLog> media_log,
+ const gfx::ColorSpace& target_color_space,
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
+ GetStubCB get_stub_cb);
+
+ ~VdaVideoDecoder() override;
+ static void DestroyAsync(std::unique_ptr<VdaVideoDecoder>);
+
+ // media::VideoDecoder implementation.
+ std::string GetDisplayName() const override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ InitCB init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
+ bool NeedsBitstreamConversion() const override;
+ bool CanReadWithoutStalling() const override;
+ int GetMaxDecodeRequests() const override;
+
+ private:
+ friend class VdaVideoDecoderTest;
// |parent_task_runner|: Task runner that |this| should operate on. All
// methods must be called on |parent_task_runner| (should be the Mojo
@@ -95,30 +115,6 @@ class VdaVideoDecoder : public VideoDecoder,
CreateAndInitializeVdaCB create_and_initialize_vda_cb,
const VideoDecodeAccelerator::Capabilities& vda_capabilities);
- // media::VideoDecoder implementation.
- std::string GetDisplayName() const override;
- void Initialize(const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- InitCB init_cb,
- const OutputCB& output_cb,
- const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
- void Reset(base::OnceClosure reset_cb) override;
- bool NeedsBitstreamConversion() const override;
- bool CanReadWithoutStalling() const override;
- int GetMaxDecodeRequests() const override;
-
- private:
- void Destroy() override;
-
- protected:
- // Owners should call Destroy(). This is automatic via
- // std::default_delete<media::VideoDecoder> when held by a
- // std::unique_ptr<media::VideoDecoder>.
- ~VdaVideoDecoder() override;
-
- private:
// media::VideoDecodeAccelerator::Client implementation.
void NotifyInitializationComplete(Status status) override;
void ProvidePictureBuffers(uint32_t requested_num_of_buffers,
@@ -134,7 +130,7 @@ class VdaVideoDecoder : public VideoDecoder,
void NotifyError(VideoDecodeAccelerator::Error error) override;
// Tasks and thread hopping.
- void DestroyOnGpuThread();
+ static void CleanupOnGpuThread(std::unique_ptr<VdaVideoDecoder>);
void InitializeOnGpuThread();
void ReinitializeOnGpuThread();
void InitializeDone(Status status);
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc b/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
index 0bbf1b38a24..6bff33cdc50 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
@@ -16,6 +16,7 @@
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "gpu/command_buffer/common/sync_token.h"
+#include "media/base/async_destroy_video_decoder.h"
#include "media/base/decode_status.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_util.h"
@@ -97,7 +98,7 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
// In either case, vda_->Destroy() should be called once.
EXPECT_CALL(*vda_, Destroy());
- vdavd_.reset(new VdaVideoDecoder(
+ auto* vdavd = new VdaVideoDecoder(
parent_task_runner, gpu_task_runner, media_log_.Clone(),
gfx::ColorSpace(),
base::BindOnce(&VdaVideoDecoderTest::CreatePictureBufferManager,
@@ -106,8 +107,10 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
base::Unretained(this)),
base::BindRepeating(&VdaVideoDecoderTest::CreateAndInitializeVda,
base::Unretained(this)),
- GetCapabilities()));
- client_ = vdavd_.get();
+ GetCapabilities());
+ vdavd_ = std::make_unique<AsyncDestroyVideoDecoder<VdaVideoDecoder>>(
+ base::WrapUnique(vdavd));
+ client_ = vdavd;
}
~VdaVideoDecoderTest() override {
@@ -137,7 +140,7 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
}
void Initialize() {
- EXPECT_CALL(*vda_, Initialize(_, vdavd_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*vda_, Initialize(_, client_)).WillOnce(Return(true));
EXPECT_CALL(*vda_, TryToSetupDecodeOnSeparateThread(_, _))
.WillOnce(Return(GetParam()));
EXPECT_CALL(init_cb_, Run(IsOkStatus()));
@@ -304,7 +307,7 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
testing::StrictMock<MockVideoDecodeAccelerator>* vda_;
std::unique_ptr<VideoDecodeAccelerator> owned_vda_;
scoped_refptr<PictureBufferManager> pbm_;
- std::unique_ptr<VdaVideoDecoder, std::default_delete<VideoDecoder>> vdavd_;
+ std::unique_ptr<AsyncDestroyVideoDecoder<VdaVideoDecoder>> vdavd_;
VideoDecodeAccelerator::Client* client_;
uint64_t next_release_count_ = 1;
@@ -341,7 +344,7 @@ TEST_P(VdaVideoDecoderTest, Initialize_UnsupportedCodec) {
}
TEST_P(VdaVideoDecoderTest, Initialize_RejectedByVda) {
- EXPECT_CALL(*vda_, Initialize(_, vdavd_.get())).WillOnce(Return(false));
+ EXPECT_CALL(*vda_, Initialize(_, client_)).WillOnce(Return(false));
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, VideoDecoderConfig::AlphaMode::kIsOpaque,
VideoColorSpace::REC709(), kNoTransformation, gfx::Size(1920, 1088),
@@ -423,7 +426,7 @@ TEST_P(VdaVideoDecoderTest, Decode_OutputAndDismiss) {
TEST_P(VdaVideoDecoderTest, Decode_Output_MaintainsAspect) {
// Initialize with a config that has a 2:1 pixel aspect ratio.
- EXPECT_CALL(*vda_, Initialize(_, vdavd_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*vda_, Initialize(_, client_)).WillOnce(Return(true));
EXPECT_CALL(*vda_, TryToSetupDecodeOnSeparateThread(_, _))
.WillOnce(Return(GetParam()));
InitializeWithConfig(VideoDecoderConfig(
diff --git a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
index 84ba11e6f85..2fa16cecbe4 100644
--- a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
+++ b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "base/logging.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -267,11 +268,8 @@ void VTVideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
kVTEncodeFrameOptionKey_ForceKeyFrame,
force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
- base::TimeTicks ref_time;
- if (!frame->metadata()->GetTimeTicks(VideoFrameMetadata::REFERENCE_TIME,
- &ref_time)) {
- ref_time = base::TimeTicks::Now();
- }
+ base::TimeTicks ref_time =
+ frame->metadata()->reference_time.value_or(base::TimeTicks::Now());
auto timestamp_cm =
CMTimeMake(frame->timestamp().InMicroseconds(), USEC_PER_SEC);
// Wrap information we'll need after the frame is encoded in a heap object.
diff --git a/chromium/media/gpu/test/BUILD.gn b/chromium/media/gpu/test/BUILD.gn
index d1465599fa5..dde96cc47fe 100644
--- a/chromium/media/gpu/test/BUILD.gn
+++ b/chromium/media/gpu/test/BUILD.gn
@@ -69,10 +69,14 @@ source_set("test_helpers") {
"video_test_helpers.cc",
"video_test_helpers.h",
]
- public_deps = [ ":helpers" ]
+ public_deps = [
+ ":helpers",
+ "//media:test_support",
+ ]
deps = [
"//media/gpu",
"//testing/gtest",
+ "//third_party/libyuv",
]
if (use_ozone) {
deps += [ "//ui/ozone" ]
@@ -134,7 +138,14 @@ static_library("video_player_test_environment") {
static_library("video_encoder") {
testonly = true
sources = [
+ "bitstream_helpers.cc",
"bitstream_helpers.h",
+ "video_encoder/bitstream_file_writer.cc",
+ "video_encoder/bitstream_file_writer.h",
+ "video_encoder/bitstream_validator.cc",
+ "video_encoder/bitstream_validator.h",
+ "video_encoder/decoder_buffer_validator.cc",
+ "video_encoder/decoder_buffer_validator.h",
"video_encoder/video_encoder.cc",
"video_encoder/video_encoder.h",
"video_encoder/video_encoder_client.cc",
@@ -143,6 +154,7 @@ static_library("video_encoder") {
deps = [
":test_helpers",
"//media/gpu",
+ "//media/parsers",
"//testing/gtest:gtest",
]
}
diff --git a/chromium/media/gpu/v4l2/BUILD.gn b/chromium/media/gpu/v4l2/BUILD.gn
index 88b72e36308..38d0bb24ef5 100644
--- a/chromium/media/gpu/v4l2/BUILD.gn
+++ b/chromium/media/gpu/v4l2/BUILD.gn
@@ -41,16 +41,18 @@ source_set("v4l2") {
"v4l2_image_processor_backend.h",
"v4l2_slice_video_decode_accelerator.cc",
"v4l2_slice_video_decode_accelerator.h",
- "v4l2_slice_video_decoder.cc",
- "v4l2_slice_video_decoder.h",
"v4l2_stateful_workaround.cc",
"v4l2_stateful_workaround.h",
"v4l2_vda_helpers.cc",
"v4l2_vda_helpers.h",
"v4l2_video_decode_accelerator.cc",
"v4l2_video_decode_accelerator.h",
+ "v4l2_video_decoder.cc",
+ "v4l2_video_decoder.h",
"v4l2_video_decoder_backend.cc",
"v4l2_video_decoder_backend.h",
+ "v4l2_video_decoder_backend_stateful.cc",
+ "v4l2_video_decoder_backend_stateful.h",
"v4l2_video_decoder_backend_stateless.cc",
"v4l2_video_decoder_backend_stateless.h",
"v4l2_video_encode_accelerator.cc",
diff --git a/chromium/media/gpu/v4l2/v4l2_decode_surface.cc b/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
index d4593868a6c..77206ba81c1 100644
--- a/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
+++ b/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
@@ -127,7 +127,7 @@ bool V4L2ConfigStoreDecodeSurface::Submit() {
case V4L2_MEMORY_MMAP:
return std::move(output_buffer()).QueueMMap();
case V4L2_MEMORY_DMABUF:
- return std::move(output_buffer()).QueueDMABuf(video_frame()->DmabufFds());
+ return std::move(output_buffer()).QueueDMABuf(video_frame());
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
}
@@ -174,8 +174,7 @@ bool V4L2RequestDecodeSurface::Submit() {
result = std::move(output_buffer()).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
- result = std::move(output_buffer())
- .QueueDMABuf(video_frame()->DmabufFds());
+ result = std::move(output_buffer()).QueueDMABuf(video_frame());
break;
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
diff --git a/chromium/media/gpu/v4l2/v4l2_device.cc b/chromium/media/gpu/v4l2/v4l2_device.cc
index 9b81f8046f2..ba9b5184914 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device.cc
@@ -27,6 +27,7 @@
#include "media/base/color_plane_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/fourcc.h"
+#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/generic_v4l2_device.h"
#include "ui/gfx/native_pixmap_handle.h"
@@ -313,7 +314,7 @@ class V4L2BufferRefBase {
base::WeakPtr<V4L2Queue> queue);
~V4L2BufferRefBase();
- bool QueueBuffer();
+ bool QueueBuffer(scoped_refptr<VideoFrame> video_frame);
void* GetPlaneMapping(const size_t plane);
scoped_refptr<VideoFrame> GetVideoFrame();
@@ -368,13 +369,13 @@ V4L2BufferRefBase::~V4L2BufferRefBase() {
return_to_->ReturnBuffer(BufferId());
}
-bool V4L2BufferRefBase::QueueBuffer() {
+bool V4L2BufferRefBase::QueueBuffer(scoped_refptr<VideoFrame> video_frame) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!queue_)
return false;
- queued = queue_->QueueBuffer(&v4l2_buffer_);
+ queued = queue_->QueueBuffer(&v4l2_buffer_, std::move(video_frame));
return queued;
}
@@ -484,14 +485,15 @@ enum v4l2_memory V4L2WritableBufferRef::Memory() const {
return static_cast<enum v4l2_memory>(buffer_data_->v4l2_buffer_.memory);
}
-bool V4L2WritableBufferRef::DoQueue(V4L2RequestRef* request_ref) && {
+bool V4L2WritableBufferRef::DoQueue(V4L2RequestRef* request_ref,
+ scoped_refptr<VideoFrame> video_frame) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
if (request_ref && buffer_data_->queue_->SupportsRequests())
request_ref->ApplyQueueBuffer(&(buffer_data_->v4l2_buffer_));
- bool queued = buffer_data_->QueueBuffer();
+ bool queued = buffer_data_->QueueBuffer(std::move(video_frame));
// Clear our own reference.
buffer_data_.reset();
@@ -512,7 +514,7 @@ bool V4L2WritableBufferRef::QueueMMap(
return false;
}
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
}
bool V4L2WritableBufferRef::QueueUserPtr(
@@ -539,7 +541,7 @@ bool V4L2WritableBufferRef::QueueUserPtr(
self.buffer_data_->v4l2_buffer_.m.planes[i].m.userptr =
reinterpret_cast<unsigned long>(ptrs[i]);
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
}
bool V4L2WritableBufferRef::QueueDMABuf(
@@ -563,7 +565,52 @@ bool V4L2WritableBufferRef::QueueDMABuf(
for (size_t i = 0; i < num_planes; i++)
self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = fds[i].get();
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
+}
+
+bool V4L2WritableBufferRef::QueueDMABuf(scoped_refptr<VideoFrame> video_frame,
+ V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ // Move ourselves so our data gets freed no matter when we return
+ V4L2WritableBufferRef self(std::move(*this));
+
+ if (self.Memory() != V4L2_MEMORY_DMABUF) {
+ VLOGF(1) << "Called on invalid buffer type!";
+ return false;
+ }
+
+ // TODO(andrescj): consider replacing this by a DCHECK.
+ if (video_frame->storage_type() != VideoFrame::STORAGE_GPU_MEMORY_BUFFER &&
+ video_frame->storage_type() != VideoFrame::STORAGE_DMABUFS) {
+ VLOGF(1) << "Only GpuMemoryBuffer and dma-buf VideoFrames are supported";
+ return false;
+ }
+
+ // The FDs duped by CreateGpuMemoryBufferHandle() will be closed after the
+ // call to DoQueue() which uses the VIDIOC_QBUF ioctl and so ends up
+ // increasing the reference count of the dma-buf. Thus, closing the FDs is
+ // safe.
+ // TODO(andrescj): for dma-buf VideoFrames, duping the FDs is unnecessary.
+ // Consider handling that path separately.
+ gfx::GpuMemoryBufferHandle gmb_handle =
+ CreateGpuMemoryBufferHandle(video_frame.get());
+ if (gmb_handle.type != gfx::GpuMemoryBufferType::NATIVE_PIXMAP) {
+ VLOGF(1) << "Failed to create GpuMemoryBufferHandle for frame!";
+ return false;
+ }
+ const std::vector<gfx::NativePixmapPlane>& planes =
+ gmb_handle.native_pixmap_handle.planes;
+
+ if (!self.buffer_data_->CheckNumFDsForFormat(planes.size()))
+ return false;
+
+ size_t num_planes = self.PlanesCount();
+ for (size_t i = 0; i < num_planes; i++)
+ self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = planes[i].fd.get();
+
+ return std::move(self).DoQueue(request_ref, std::move(video_frame));
}
bool V4L2WritableBufferRef::QueueDMABuf(
@@ -587,7 +634,7 @@ bool V4L2WritableBufferRef::QueueDMABuf(
for (size_t i = 0; i < num_planes; i++)
self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = planes[i].fd.get();
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
}
size_t V4L2WritableBufferRef::PlanesCount() const {
@@ -709,14 +756,20 @@ void V4L2WritableBufferRef::SetConfigStore(uint32_t config_store) {
}
V4L2ReadableBuffer::V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
- base::WeakPtr<V4L2Queue> queue)
+ base::WeakPtr<V4L2Queue> queue,
+ scoped_refptr<VideoFrame> video_frame)
: buffer_data_(
- std::make_unique<V4L2BufferRefBase>(v4l2_buffer, std::move(queue))) {
+ std::make_unique<V4L2BufferRefBase>(v4l2_buffer, std::move(queue))),
+ video_frame_(std::move(video_frame)) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
scoped_refptr<VideoFrame> V4L2ReadableBuffer::GetVideoFrame() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (buffer_data_->v4l2_buffer_.memory == V4L2_MEMORY_DMABUF && video_frame_)
+ return video_frame_;
return buffer_data_->GetVideoFrame();
}
@@ -806,8 +859,10 @@ class V4L2BufferRefFactory {
static V4L2ReadableBufferRef CreateReadableRef(
const struct v4l2_buffer& v4l2_buffer,
- base::WeakPtr<V4L2Queue> queue) {
- return new V4L2ReadableBuffer(v4l2_buffer, std::move(queue));
+ base::WeakPtr<V4L2Queue> queue,
+ scoped_refptr<VideoFrame> video_frame) {
+ return new V4L2ReadableBuffer(v4l2_buffer, std::move(queue),
+ std::move(video_frame));
}
};
@@ -1070,7 +1125,8 @@ base::Optional<V4L2WritableBufferRef> V4L2Queue::GetFreeBuffer() {
weak_this_factory_.GetWeakPtr());
}
-bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer) {
+bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer,
+ scoped_refptr<VideoFrame> video_frame) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
int ret = device_->Ioctl(VIDIOC_QBUF, v4l2_buffer);
@@ -1079,7 +1135,8 @@ bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer) {
return false;
}
- auto inserted = queued_buffers_.emplace(v4l2_buffer->index);
+ auto inserted =
+ queued_buffers_.emplace(v4l2_buffer->index, std::move(video_frame));
DCHECK_EQ(inserted.second, true);
device_->SchedulePoll();
@@ -1127,15 +1184,16 @@ std::pair<bool, V4L2ReadableBufferRef> V4L2Queue::DequeueBuffer() {
auto it = queued_buffers_.find(v4l2_buffer.index);
DCHECK(it != queued_buffers_.end());
- queued_buffers_.erase(*it);
+ scoped_refptr<VideoFrame> queued_frame = std::move(it->second);
+ queued_buffers_.erase(it);
if (QueuedBuffersCount() > 0)
device_->SchedulePoll();
DCHECK(free_buffers_);
- return std::make_pair(true,
- V4L2BufferRefFactory::CreateReadableRef(
- v4l2_buffer, weak_this_factory_.GetWeakPtr()));
+ return std::make_pair(true, V4L2BufferRefFactory::CreateReadableRef(
+ v4l2_buffer, weak_this_factory_.GetWeakPtr(),
+ std::move(queued_frame)));
}
bool V4L2Queue::IsStreaming() const {
@@ -1176,9 +1234,9 @@ bool V4L2Queue::Streamoff() {
return false;
}
- for (const auto& buffer_id : queued_buffers_) {
+ for (const auto& it : queued_buffers_) {
DCHECK(free_buffers_);
- free_buffers_->ReturnBuffer(buffer_id);
+ free_buffers_->ReturnBuffer(it.first);
}
queued_buffers_.clear();
@@ -1332,6 +1390,10 @@ VideoCodecProfile V4L2Device::V4L2ProfileToVideoCodecProfile(VideoCodec codec,
return H264PROFILE_EXTENDED;
case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
return H264PROFILE_HIGH;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH:
+ return H264PROFILE_STEREOHIGH;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH:
+ return H264PROFILE_MULTIVIEWHIGH;
}
break;
case kCodecVP8:
diff --git a/chromium/media/gpu/v4l2/v4l2_device.h b/chromium/media/gpu/v4l2/v4l2_device.h
index 310d4a4a1a5..bdd8585aacd 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.h
+++ b/chromium/media/gpu/v4l2/v4l2_device.h
@@ -129,6 +129,15 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
// list.
bool QueueDMABuf(const std::vector<gfx::NativePixmapPlane>& planes,
V4L2RequestRef* request_ref = nullptr) &&;
+ // Queue a |video_frame| using its file descriptors as DMABUFs. The VideoFrame
+ // must have been constructed from its file descriptors.
+ // The particularity of this method is that a reference to |video_frame| is
+ // kept and made available again when the buffer is dequeued through
+ // |V4L2ReadableBufferRef::GetVideoFrame()|. |video_frame| is thus guaranteed
+ // to be alive until either all the |V4L2ReadableBufferRef| from the dequeued
+ // buffer get out of scope, or |V4L2Queue::Streamoff()| is called.
+ bool QueueDMABuf(scoped_refptr<VideoFrame> video_frame,
+ V4L2RequestRef* request_ref = nullptr) &&;
// Returns the number of planes in this buffer.
size_t PlanesCount() const;
@@ -180,7 +189,8 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
// filled.
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
- bool DoQueue(V4L2RequestRef* request_ref) &&;
+ bool DoQueue(V4L2RequestRef* request_ref,
+ scoped_refptr<VideoFrame> video_frame) &&;
V4L2WritableBufferRef(const struct v4l2_buffer& v4l2_buffer,
base::WeakPtr<V4L2Queue> queue);
@@ -245,9 +255,14 @@ class MEDIA_GPU_EXPORT V4L2ReadableBuffer
~V4L2ReadableBuffer();
V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
- base::WeakPtr<V4L2Queue> queue);
+ base::WeakPtr<V4L2Queue> queue,
+ scoped_refptr<VideoFrame> video_frame);
std::unique_ptr<V4L2BufferRefBase> buffer_data_;
+ // If this buffer was a DMABUF buffer queued with
+ // QueueDMABuf(scoped_refptr<VideoFrame>), then this will hold the VideoFrame
+ // that has been passed at the time of queueing.
+ scoped_refptr<VideoFrame> video_frame_;
SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(V4L2ReadableBuffer);
@@ -386,7 +401,8 @@ class MEDIA_GPU_EXPORT V4L2Queue
~V4L2Queue();
// Called when clients request a buffer to be queued.
- bool QueueBuffer(struct v4l2_buffer* v4l2_buffer);
+ bool QueueBuffer(struct v4l2_buffer* v4l2_buffer,
+ scoped_refptr<VideoFrame> video_frame);
const enum v4l2_buf_type type_;
enum v4l2_memory memory_ = V4L2_MEMORY_MMAP;
@@ -402,8 +418,10 @@ class MEDIA_GPU_EXPORT V4L2Queue
// Buffers that are available for client to get and submit.
// Buffers in this list are not referenced by anyone else than ourselves.
scoped_refptr<V4L2BuffersList> free_buffers_;
- // Buffers that have been queued by the client, and not dequeued yet.
- std::set<size_t> queued_buffers_;
+ // Buffers that have been queued by the client, and not dequeued yet. The
+ // value will be set to the VideoFrame that has been passed when we queued
+ // the buffer, if any.
+ std::map<size_t, scoped_refptr<VideoFrame>> queued_buffers_;
scoped_refptr<V4L2Device> device_;
// Callback to call in this queue's destructor.
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
index 6498537e426..2a062f8b1d4 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
@@ -126,11 +126,13 @@ V4L2ImageProcessorBackend::V4L2ImageProcessorBackend(
v4l2_memory input_memory_type,
v4l2_memory output_memory_type,
OutputMode output_mode,
+ VideoRotation relative_rotation,
size_t num_buffers,
ErrorCB error_cb)
: ImageProcessorBackend(input_config,
output_config,
output_mode,
+ relative_rotation,
std::move(error_cb),
std::move(backend_task_runner)),
input_memory_type_(input_memory_type),
@@ -228,12 +230,13 @@ std::unique_ptr<ImageProcessorBackend> V4L2ImageProcessorBackend::Create(
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
for (const auto& output_mode : preferred_output_modes) {
auto image_processor = V4L2ImageProcessorBackend::CreateWithOutputMode(
- device, num_buffers, input_config, output_config, output_mode, error_cb,
- backend_task_runner);
+ device, num_buffers, input_config, output_config, output_mode,
+ relative_rotation, error_cb, backend_task_runner);
if (image_processor)
return image_processor;
}
@@ -249,6 +252,7 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
const PortConfig& input_config,
const PortConfig& output_config,
const OutputMode& output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
VLOGF(2);
@@ -308,6 +312,12 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
return nullptr;
}
+ // V4L2IP now doesn't support rotation case, so return nullptr.
+ if (relative_rotation != VIDEO_ROTATION_0) {
+ VLOGF(1) << "Currently V4L2IP doesn't support rotation";
+ return nullptr;
+ }
+
if (!device->Open(V4L2Device::Type::kImageProcessor,
input_config.fourcc.ToV4L2PixFmt())) {
VLOGF(1) << "Failed to open device with input fourcc: "
@@ -390,8 +400,8 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
PortConfig(output_config.fourcc, negotiated_output_size,
output_planes, output_config.visible_rect,
{output_storage_type}),
- input_memory_type, output_memory_type, output_mode, num_buffers,
- std::move(error_cb)));
+ input_memory_type, output_memory_type, output_mode, relative_rotation,
+ num_buffers, std::move(error_cb)));
// Initialize at |backend_task_runner_|.
bool success = false;
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
index bd1c78ac4e9..4652bda62b7 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
@@ -49,6 +49,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
@@ -104,6 +105,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
const PortConfig& input_config,
const PortConfig& output_config,
const OutputMode& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
@@ -115,6 +117,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
v4l2_memory input_memory_type,
v4l2_memory output_memory_type,
OutputMode output_mode,
+ VideoRotation relative_rotation,
size_t num_buffers,
ErrorCB error_cb);
~V4L2ImageProcessorBackend() override;
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
index dd2c2e853eb..594081c44f8 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
@@ -592,8 +592,8 @@ bool V4L2SliceVideoDecodeAccelerator::CreateImageProcessor() {
image_processor_ = v4l2_vda_helpers::CreateImageProcessor(
*output_format_fourcc_, *gl_image_format_fourcc_, coded_size_,
gl_image_size_, GetRectSizeFromOrigin(decoder_->GetVisibleRect()),
- output_buffer_map_.size(), image_processor_device_,
- image_processor_output_mode,
+ VideoFrame::StorageType::STORAGE_DMABUFS, output_buffer_map_.size(),
+ image_processor_device_, image_processor_output_mode,
// Unretained(this) is safe for ErrorCB because |decoder_thread_| is owned
// by this V4L2VideoDecodeAccelerator and |this| must be valid when
// ErrorCB is executed.
diff --git a/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc b/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc
index f520d93be0f..558b694af86 100644
--- a/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc
+++ b/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc
@@ -71,6 +71,7 @@ std::unique_ptr<ImageProcessor> CreateImageProcessor(
const gfx::Size& vda_output_coded_size,
const gfx::Size& ip_output_coded_size,
const gfx::Size& visible_size,
+ VideoFrame::StorageType output_storage_type,
size_t nb_buffers,
scoped_refptr<V4L2Device> image_processor_device,
ImageProcessor::OutputMode image_processor_output_mode,
@@ -86,8 +87,8 @@ std::unique_ptr<ImageProcessor> CreateImageProcessor(
{VideoFrame::STORAGE_DMABUFS}),
ImageProcessor::PortConfig(ip_output_format, ip_output_coded_size, {},
gfx::Rect(visible_size),
- {VideoFrame::STORAGE_DMABUFS}),
- {image_processor_output_mode}, std::move(error_cb),
+ {output_storage_type}),
+ {image_processor_output_mode}, VIDEO_ROTATION_0, std::move(error_cb),
std::move(client_task_runner));
if (!image_processor)
return nullptr;
@@ -174,6 +175,8 @@ bool InputBufferFragmentSplitter::IsPartialFramePending() const {
H264InputBufferFragmentSplitter::H264InputBufferFragmentSplitter()
: h264_parser_(new H264Parser()) {}
+H264InputBufferFragmentSplitter::~H264InputBufferFragmentSplitter() = default;
+
bool H264InputBufferFragmentSplitter::AdvanceFrameFragment(const uint8_t* data,
size_t size,
size_t* endpos) {
diff --git a/chromium/media/gpu/v4l2/v4l2_vda_helpers.h b/chromium/media/gpu/v4l2/v4l2_vda_helpers.h
index b0c780cd734..05b74a3205d 100644
--- a/chromium/media/gpu/v4l2/v4l2_vda_helpers.h
+++ b/chromium/media/gpu/v4l2/v4l2_vda_helpers.h
@@ -41,6 +41,7 @@ base::Optional<Fourcc> FindImageProcessorOutputFormat(V4L2Device* ip_device);
// |ip_output_coded_size| is the coded size of the output buffers that the IP
// must produce.
// |visible_size| is the visible size of both the input and output buffers.
+// |output_storage_type| indicates what type of VideoFrame is used for output.
// |nb_buffers| is the exact number of output buffers that the IP must create.
// |image_processor_output_mode| specifies whether the IP must allocate its
// own buffers or rely on imported ones.
@@ -53,6 +54,7 @@ std::unique_ptr<ImageProcessor> CreateImageProcessor(
const gfx::Size& vda_output_coded_size,
const gfx::Size& ip_output_coded_size,
const gfx::Size& visible_size,
+ VideoFrame::StorageType output_storage_type,
size_t nb_buffers,
scoped_refptr<V4L2Device> image_processor_device,
ImageProcessor::OutputMode image_processor_output_mode,
@@ -97,6 +99,7 @@ class InputBufferFragmentSplitter {
class H264InputBufferFragmentSplitter : public InputBufferFragmentSplitter {
public:
explicit H264InputBufferFragmentSplitter();
+ ~H264InputBufferFragmentSplitter() override;
bool AdvanceFrameFragment(const uint8_t* data,
size_t size,
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
index 4a581cab841..e844687937b 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
@@ -1523,8 +1523,7 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord(
ret = std::move(buffer).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
- ret = std::move(buffer).QueueDMABuf(
- output_record.output_frame->DmabufFds());
+ ret = std::move(buffer).QueueDMABuf(output_record.output_frame);
break;
default:
NOTREACHED();
@@ -1880,6 +1879,10 @@ bool V4L2VideoDecodeAccelerator::StartDevicePoll() {
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
+ cancelable_service_device_task_.Reset(base::BindRepeating(
+ &V4L2VideoDecodeAccelerator::ServiceDeviceTask, base::Unretained(this)));
+ cancelable_service_device_task_callback_ =
+ cancelable_service_device_task_.callback();
device_poll_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&V4L2VideoDecodeAccelerator::DevicePollTask,
base::Unretained(this), 0));
@@ -1901,6 +1904,10 @@ bool V4L2VideoDecodeAccelerator::StopDevicePoll() {
return false;
}
device_poll_thread_.Stop();
+ // Must be done after the Stop() above to ensure
+ // |cancelable_service_device_task_callback_| is not copied.
+ cancelable_service_device_task_.Cancel();
+ cancelable_service_device_task_callback_ = {};
// Clear the interrupt now, to be sure.
if (!device_->ClearDevicePollInterrupt()) {
PLOG(ERROR) << "ClearDevicePollInterrupt: failed";
@@ -2027,8 +2034,8 @@ void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) {
// All processing should happen on ServiceDeviceTask(), since we shouldn't
// touch decoder state from this thread.
decoder_thread_.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&V4L2VideoDecodeAccelerator::ServiceDeviceTask,
- base::Unretained(this), event_pending));
+ FROM_HERE,
+ base::BindOnce(cancelable_service_device_task_callback_, event_pending));
}
bool V4L2VideoDecodeAccelerator::IsDestroyPending() {
@@ -2314,9 +2321,9 @@ bool V4L2VideoDecodeAccelerator::CreateImageProcessor() {
image_processor_ = v4l2_vda_helpers::CreateImageProcessor(
*output_format_fourcc_, *egl_image_format_fourcc_, coded_size_,
- egl_image_size_, visible_size_, output_buffer_map_.size(),
- image_processor_device_, image_processor_output_mode,
- decoder_thread_.task_runner(),
+ egl_image_size_, visible_size_, VideoFrame::StorageType::STORAGE_DMABUFS,
+ output_buffer_map_.size(), image_processor_device_,
+ image_processor_output_mode, decoder_thread_.task_runner(),
// Unretained(this) is safe for ErrorCB because |decoder_thread_| is owned
// by this V4L2VideoDecodeAccelerator and |this| must be valid when
// ErrorCB is executed.
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
index e4d27c1284b..96a23510f18 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/callback_forward.h"
+#include "base/cancelable_callback.h"
#include "base/containers/queue.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -465,6 +466,15 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
// Decoder state machine state.
State decoder_state_;
+ // Cancelable callback for running ServiceDeviceTask(). Must only be accessed
+ // on |decoder_thread_|.
+ base::CancelableRepeatingCallback<void(bool)> cancelable_service_device_task_;
+ // Concrete callback from |cancelable_service_device_task_| that can be copied
+ // on |device_poll_thread_|. This exists because
+ // CancelableRepeatingCallback::callback() creates a WeakPtr internally, which
+ // must be created/destroyed from the same thread.
+ base::RepeatingCallback<void(bool)> cancelable_service_device_task_callback_;
+
// Waitable event signaled when the decoder is destroying.
base::WaitableEvent destroy_pending_;
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
index 28e1b3b7e4a..4c747eb86f3 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/gpu/v4l2/v4l2_slice_video_decoder.h"
+#include "media/gpu/v4l2/v4l2_video_decoder.h"
#include <algorithm>
@@ -17,6 +17,7 @@
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h"
namespace media {
@@ -33,15 +34,14 @@ constexpr size_t kNumInputBuffers = 16;
// Input format V4L2 fourccs this class supports.
constexpr uint32_t kSupportedInputFourccs[] = {
- V4L2_PIX_FMT_H264_SLICE,
- V4L2_PIX_FMT_VP8_FRAME,
- V4L2_PIX_FMT_VP9_FRAME,
+ V4L2_PIX_FMT_H264_SLICE, V4L2_PIX_FMT_VP8_FRAME, V4L2_PIX_FMT_VP9_FRAME,
+ V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9,
};
} // namespace
// static
-std::unique_ptr<DecoderInterface> V4L2SliceVideoDecoder::Create(
+std::unique_ptr<DecoderInterface> V4L2VideoDecoder::Create(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<DecoderInterface::Client> client) {
DCHECK(decoder_task_runner->RunsTasksInCurrentSequence());
@@ -53,12 +53,12 @@ std::unique_ptr<DecoderInterface> V4L2SliceVideoDecoder::Create(
return nullptr;
}
- return base::WrapUnique<DecoderInterface>(new V4L2SliceVideoDecoder(
+ return base::WrapUnique<DecoderInterface>(new V4L2VideoDecoder(
std::move(decoder_task_runner), std::move(client), std::move(device)));
}
// static
-SupportedVideoDecoderConfigs V4L2SliceVideoDecoder::GetSupportedConfigs() {
+SupportedVideoDecoderConfigs V4L2VideoDecoder::GetSupportedConfigs() {
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (!device)
return SupportedVideoDecoderConfigs();
@@ -69,7 +69,7 @@ SupportedVideoDecoderConfigs V4L2SliceVideoDecoder::GetSupportedConfigs() {
false);
}
-V4L2SliceVideoDecoder::V4L2SliceVideoDecoder(
+V4L2VideoDecoder::V4L2VideoDecoder(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<DecoderInterface::Client> client,
scoped_refptr<V4L2Device> device)
@@ -82,7 +82,7 @@ V4L2SliceVideoDecoder::V4L2SliceVideoDecoder(
weak_this_ = weak_this_factory_.GetWeakPtr();
}
-V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
+V4L2VideoDecoder::~V4L2VideoDecoder() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(2);
@@ -93,7 +93,7 @@ V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
}
// Stop and Destroy device.
- StopStreamV4L2Queue();
+ StopStreamV4L2Queue(true);
if (input_queue_) {
input_queue_->DeallocateBuffers();
input_queue_ = nullptr;
@@ -106,9 +106,9 @@ V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
weak_this_factory_.InvalidateWeakPtrs();
}
-void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
- InitCB init_cb,
- const OutputCB& output_cb) {
+void V4L2VideoDecoder::Initialize(const VideoDecoderConfig& config,
+ InitCB init_cb,
+ const OutputCB& output_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(config.IsValidConfig());
DCHECK(state_ == State::kUninitialized || state_ == State::kDecoding);
@@ -116,7 +116,7 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Reset V4L2 device and queue if reinitializing decoder.
if (state_ != State::kUninitialized) {
- if (!StopStreamV4L2Queue()) {
+ if (!StopStreamV4L2Queue(true)) {
std::move(init_cb).Run(StatusCode::kV4l2FailedToStopStreamQueue);
return;
}
@@ -141,12 +141,33 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Open V4L2 device.
VideoCodecProfile profile = config.profile();
- uint32_t input_format_fourcc =
+ uint32_t input_format_fourcc_stateless =
V4L2Device::VideoCodecProfileToV4L2PixFmt(profile, true);
- if (!input_format_fourcc ||
- !device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc)) {
+ if (!input_format_fourcc_stateless ||
+ !device_->Open(V4L2Device::Type::kDecoder,
+ input_format_fourcc_stateless)) {
VLOGF(1) << "Failed to open device for profile: " << profile
- << " fourcc: " << FourccToString(input_format_fourcc);
+ << " fourcc: " << FourccToString(input_format_fourcc_stateless);
+ input_format_fourcc_stateless = 0;
+ } else {
+ VLOGF(1) << "Found V4L2 device capable of stateless decoding for "
+ << FourccToString(input_format_fourcc_stateless);
+ }
+
+ uint32_t input_format_fourcc_stateful =
+ V4L2Device::VideoCodecProfileToV4L2PixFmt(profile, false);
+ if (!input_format_fourcc_stateful ||
+ !device_->Open(V4L2Device::Type::kDecoder,
+ input_format_fourcc_stateful)) {
+ VLOGF(1) << "Failed to open device for profile: " << profile
+ << " fourcc: " << FourccToString(input_format_fourcc_stateful);
+ input_format_fourcc_stateful = 0;
+ } else {
+ VLOGF(1) << "Found V4L2 device capable of stateful decoding for "
+ << FourccToString(input_format_fourcc_stateful);
+ }
+
+ if (!input_format_fourcc_stateless && !input_format_fourcc_stateful) {
std::move(init_cb).Run(StatusCode::kV4l2NoDecoder);
return;
}
@@ -172,10 +193,23 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
- // Create the backend (only stateless API supported as of now).
- backend_ = std::make_unique<V4L2StatelessVideoDecoderBackend>(
- this, device_, profile, decoder_task_runner_);
+ uint32_t input_format_fourcc;
+ if (input_format_fourcc_stateful) {
+ backend_ = std::make_unique<V4L2StatefulVideoDecoderBackend>(
+ this, device_, profile, decoder_task_runner_);
+ input_format_fourcc = input_format_fourcc_stateful;
+ } else if (input_format_fourcc_stateless) {
+ backend_ = std::make_unique<V4L2StatelessVideoDecoderBackend>(
+ this, device_, profile, decoder_task_runner_);
+ input_format_fourcc = input_format_fourcc_stateless;
+ } else {
+ VLOGF(1) << "No backend capable of taking this profile.";
+ std::move(init_cb).Run(StatusCode::kV4l2FailedResourceAllocation);
+ return;
+ }
+
if (!backend_->Initialize()) {
+ VLOGF(1) << "Failed to initialize backend.";
std::move(init_cb).Run(StatusCode::kV4l2FailedResourceAllocation);
return;
}
@@ -193,13 +227,21 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
+ // Start streaming input queue and polling. This is required for the stateful
+ // decoder, and doesn't hurt for the stateless one.
+ if (!StartStreamV4L2Queue(false)) {
+ VLOGF(1) << "Failed to start streaming.";
+ std::move(init_cb).Run(StatusCode::kV4L2FailedToStartStreamQueue);
+ return;
+ }
+
// Call init_cb
output_cb_ = output_cb;
SetState(State::kDecoding);
std::move(init_cb).Run(::media::OkStatus());
}
-bool V4L2SliceVideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
+bool V4L2VideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK_EQ(state_, State::kUninitialized);
@@ -232,8 +274,8 @@ bool V4L2SliceVideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
return true;
}
-bool V4L2SliceVideoDecoder::SetupOutputFormat(const gfx::Size& size,
- const gfx::Rect& visible_rect) {
+bool V4L2VideoDecoder::SetupOutputFormat(const gfx::Size& size,
+ const gfx::Rect& visible_rect) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "size: " << size.ToString()
<< ", visible_rect: " << visible_rect.ToString();
@@ -307,7 +349,7 @@ bool V4L2SliceVideoDecoder::SetupOutputFormat(const gfx::Size& size,
return true;
}
-void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
+void V4L2VideoDecoder::Reset(base::OnceClosure closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
@@ -321,12 +363,13 @@ void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
// Streamoff V4L2 queues to drop input and output buffers.
// If the queues are streaming before reset, then we need to start streaming
// them after stopping.
- bool is_streaming = input_queue_->IsStreaming();
- if (!StopStreamV4L2Queue())
+ const bool is_input_streaming = input_queue_->IsStreaming();
+ const bool is_output_streaming = output_queue_->IsStreaming();
+ if (!StopStreamV4L2Queue(true))
return;
- if (is_streaming) {
- if (!StartStreamV4L2Queue())
+ if (is_input_streaming) {
+ if (!StartStreamV4L2Queue(is_output_streaming))
return;
}
@@ -337,8 +380,8 @@ void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
std::move(closure).Run();
}
-void V4L2SliceVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- DecodeCB decode_cb) {
+void V4L2VideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
+ DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK_NE(state_, State::kUninitialized);
@@ -352,20 +395,20 @@ void V4L2SliceVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
bitstream_id);
}
-bool V4L2SliceVideoDecoder::StartStreamV4L2Queue() {
+bool V4L2VideoDecoder::StartStreamV4L2Queue(bool start_output_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
- if (!input_queue_->Streamon() || !output_queue_->Streamon()) {
+ if (!input_queue_->Streamon() ||
+ (start_output_queue && !output_queue_->Streamon())) {
VLOGF(1) << "Failed to streamon V4L2 queue.";
SetState(State::kError);
return false;
}
if (!device_->StartPolling(
- base::BindRepeating(&V4L2SliceVideoDecoder::ServiceDeviceTask,
- weak_this_),
- base::BindRepeating(&V4L2SliceVideoDecoder::SetState, weak_this_,
+ base::BindRepeating(&V4L2VideoDecoder::ServiceDeviceTask, weak_this_),
+ base::BindRepeating(&V4L2VideoDecoder::SetState, weak_this_,
State::kError))) {
SetState(State::kError);
return false;
@@ -374,7 +417,7 @@ bool V4L2SliceVideoDecoder::StartStreamV4L2Queue() {
return true;
}
-bool V4L2SliceVideoDecoder::StopStreamV4L2Queue() {
+bool V4L2VideoDecoder::StopStreamV4L2Queue(bool stop_input_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
@@ -384,48 +427,48 @@ bool V4L2SliceVideoDecoder::StopStreamV4L2Queue() {
}
// Streamoff input and output queue.
- if (input_queue_)
+ if (input_queue_ && stop_input_queue)
input_queue_->Streamoff();
if (output_queue_)
output_queue_->Streamoff();
if (backend_)
- backend_->OnStreamStopped();
+ backend_->OnStreamStopped(stop_input_queue);
return true;
}
-void V4L2SliceVideoDecoder::InitiateFlush() {
+void V4L2VideoDecoder::InitiateFlush() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
SetState(State::kFlushing);
}
-void V4L2SliceVideoDecoder::CompleteFlush() {
+void V4L2VideoDecoder::CompleteFlush() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
SetState(State::kDecoding);
}
-void V4L2SliceVideoDecoder::ChangeResolution(gfx::Size pic_size,
- gfx::Rect visible_rect,
- size_t num_output_frames) {
+void V4L2VideoDecoder::ChangeResolution(gfx::Size pic_size,
+ gfx::Rect visible_rect,
+ size_t num_output_frames) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
DCHECK(!continue_change_resolution_cb_);
// After the pipeline flushes all frames, we can start changing resolution.
continue_change_resolution_cb_ =
- base::BindOnce(&V4L2SliceVideoDecoder::ContinueChangeResolution,
- weak_this_, pic_size, visible_rect, num_output_frames);
+ base::BindOnce(&V4L2VideoDecoder::ContinueChangeResolution, weak_this_,
+ pic_size, visible_rect, num_output_frames);
DCHECK(client_);
client_->PrepareChangeResolution();
}
-void V4L2SliceVideoDecoder::ApplyResolutionChange() {
+void V4L2VideoDecoder::ApplyResolutionChange() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
DCHECK(continue_change_resolution_cb_);
@@ -433,13 +476,12 @@ void V4L2SliceVideoDecoder::ApplyResolutionChange() {
std::move(continue_change_resolution_cb_).Run();
}
-void V4L2SliceVideoDecoder::ContinueChangeResolution(
+void V4L2VideoDecoder::ContinueChangeResolution(
const gfx::Size& pic_size,
const gfx::Rect& visible_rect,
const size_t num_output_frames) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
- DCHECK_EQ(input_queue_->QueuedBuffersCount(), 0u);
DCHECK_EQ(output_queue_->QueuedBuffersCount(), 0u);
// If we already reset, then skip it.
@@ -455,7 +497,9 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
num_output_frames_ = num_output_frames;
- if (!StopStreamV4L2Queue())
+ // Stateful decoders require the input queue to keep running during resolution
+ // changes, but stateless ones require it to be stopped.
+ if (!StopStreamV4L2Queue(backend_->StopInputQueueOnResChange()))
return;
if (!output_queue_->DeallocateBuffers()) {
@@ -488,7 +532,7 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
return;
}
- if (!StartStreamV4L2Queue()) {
+ if (!StartStreamV4L2Queue(true)) {
SetState(State::kError);
return;
}
@@ -500,7 +544,7 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
base::Unretained(backend_.get()), true));
}
-void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
+void V4L2VideoDecoder::ServiceDeviceTask(bool event) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "Number of queued input buffers: "
<< input_queue_->QueuedBuffersCount()
@@ -509,8 +553,9 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
// Dequeue V4L2 output buffer first to reduce output latency.
bool success;
- V4L2ReadableBufferRef dequeued_buffer;
while (output_queue_->QueuedBuffersCount() > 0) {
+ V4L2ReadableBufferRef dequeued_buffer;
+
std::tie(success, dequeued_buffer) = output_queue_->DequeueBuffer();
if (!success) {
SetState(State::kError);
@@ -524,6 +569,8 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
// Dequeue V4L2 input buffer.
while (input_queue_->QueuedBuffersCount() > 0) {
+ V4L2ReadableBufferRef dequeued_buffer;
+
std::tie(success, dequeued_buffer) = input_queue_->DequeueBuffer();
if (!success) {
SetState(State::kError);
@@ -532,13 +579,15 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
if (!dequeued_buffer)
break;
}
+
+ backend_->OnServiceDeviceTask(event);
}
-void V4L2SliceVideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
- const gfx::Rect& visible_rect,
- base::TimeDelta timestamp) {
+void V4L2VideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
+ const gfx::Rect& visible_rect,
+ base::TimeDelta timestamp) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
- DVLOGF(4) << "timestamp: " << timestamp;
+ DVLOGF(4) << "timestamp: " << timestamp.InMilliseconds() << " msec";
// Set the timestamp at which the decode operation started on the
// |frame|. If the frame has been outputted before (e.g. because of VP9
@@ -561,14 +610,14 @@ void V4L2SliceVideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
output_cb_.Run(std::move(frame));
}
-DmabufVideoFramePool* V4L2SliceVideoDecoder::GetVideoFramePool() const {
+DmabufVideoFramePool* V4L2VideoDecoder::GetVideoFramePool() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(4);
return client_->GetVideoFramePool();
}
-void V4L2SliceVideoDecoder::SetState(State new_state) {
+void V4L2VideoDecoder::SetState(State new_state) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "Change state from " << static_cast<int>(state_) << " to "
<< static_cast<int>(new_state);
@@ -613,14 +662,14 @@ void V4L2SliceVideoDecoder::SetState(State new_state) {
return;
}
-void V4L2SliceVideoDecoder::OnBackendError() {
+void V4L2VideoDecoder::OnBackendError() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(2);
SetState(State::kError);
}
-bool V4L2SliceVideoDecoder::IsDecoding() const {
+bool V4L2VideoDecoder::IsDecoding() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.h b/chromium/media/gpu/v4l2/v4l2_video_decoder.h
index d5b82bbf824..b046b17dbd7 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_GPU_V4L2_V4L2_SLICE_VIDEO_DECODER_H_
-#define MEDIA_GPU_V4L2_V4L2_SLICE_VIDEO_DECODER_H_
+#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_H_
+#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_H_
#include <linux/videodev2.h>
@@ -36,12 +36,12 @@ namespace media {
class DmabufVideoFramePool;
-class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
+class MEDIA_GPU_EXPORT V4L2VideoDecoder
: public DecoderInterface,
public V4L2VideoDecoderBackend::Client {
public:
- // Create V4L2SliceVideoDecoder instance. The success of the creation doesn't
- // ensure V4L2SliceVideoDecoder is available on the device. It will be
+ // Create V4L2VideoDecoder instance. The success of the creation doesn't
+ // ensure V4L2VideoDecoder is available on the device. It will be
// determined in Initialize().
static std::unique_ptr<DecoderInterface> Create(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
@@ -71,13 +71,12 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
DmabufVideoFramePool* GetVideoFramePool() const override;
private:
- friend class V4L2SliceVideoDecoderTest;
+ friend class V4L2VideoDecoderTest;
- V4L2SliceVideoDecoder(
- scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
- base::WeakPtr<DecoderInterface::Client> client,
- scoped_refptr<V4L2Device> device);
- ~V4L2SliceVideoDecoder() override;
+ V4L2VideoDecoder(scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
+ base::WeakPtr<DecoderInterface::Client> client,
+ scoped_refptr<V4L2Device> device);
+ ~V4L2VideoDecoder() override;
enum class State {
// Initial state. Transitions to |kDecoding| if Initialize() is successful,
@@ -116,12 +115,12 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// in VideoFramePool. Return true if the setup is successful.
bool SetupOutputFormat(const gfx::Size& size, const gfx::Rect& visible_rect);
- // Start streaming V4L2 input and output queues. Attempt to start
- // |device_poll_thread_| before starting streaming.
- bool StartStreamV4L2Queue();
- // Stop streaming V4L2 input and output queues. Stop |device_poll_thread_|
- // before stopping streaming.
- bool StopStreamV4L2Queue();
+ // Start streaming V4L2 input and (if |start_output_queue| is true) output
+ // queues. Attempt to start |device_poll_thread_| after streaming starts.
+ bool StartStreamV4L2Queue(bool start_output_queue);
+ // Stop streaming V4L2 output and (if |stop_input_queue| is true) input
+ // queues. Stop |device_poll_thread_| before stopping streaming.
+ bool StopStreamV4L2Queue(bool stop_input_queue);
// Try to dequeue input and output buffers from device.
void ServiceDeviceTask(bool event);
@@ -167,10 +166,10 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// |weak_this_| must be dereferenced and invalidated on
// |decoder_task_runner_|.
- base::WeakPtr<V4L2SliceVideoDecoder> weak_this_;
- base::WeakPtrFactory<V4L2SliceVideoDecoder> weak_this_factory_;
+ base::WeakPtr<V4L2VideoDecoder> weak_this_;
+ base::WeakPtrFactory<V4L2VideoDecoder> weak_this_factory_;
};
} // namespace media
-#endif // MEDIA_GPU_V4L2_V4L2_SLICE_VIDEO_DECODER_H_
+#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h
index 093df178bb5..3c49de8f8dd 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h
@@ -72,9 +72,13 @@ class V4L2VideoDecoderBackend {
int32_t bitstream_id) = 0;
// Called by the decoder when it has dequeued a buffer from the CAPTURE queue.
virtual void OnOutputBufferDequeued(V4L2ReadableBufferRef buf) = 0;
- // Called whenever the V4L2 stream is stopped (|Streamoff| called on both
- // |V4L2Queue|s).
- virtual void OnStreamStopped() = 0;
+ // Backend can overload this method if it needs to do specific work when
+ // the device task is called.
+ virtual void OnServiceDeviceTask(bool event) {}
+ // Called whenever the V4L2 stream is stopped (|Streamoff| called on either
+ // the CAPTURE queue alone or on both queues). |input_queue_stopped| is
+ // true if the input queue has been requested to stop.
+ virtual void OnStreamStopped(bool input_queue_stopped) = 0;
// Called when the resolution has been decided, in case the backend needs
// to do something specific beyond applying these parameters to the CAPTURE
// queue.
@@ -88,6 +92,12 @@ class V4L2VideoDecoderBackend {
// with |status| as argument.
virtual void ClearPendingRequests(DecodeStatus status) = 0;
+ // Whether we should stop the input queue when changing resolution. Stateless
+ // decoders require this, but stateful ones need the input queue to keep
+ // running. Although not super elegant, this is required to express that
+ // difference.
+ virtual bool StopInputQueueOnResChange() const = 0;
+
protected:
V4L2VideoDecoderBackend(Client* const client,
scoped_refptr<V4L2Device> device);
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc
new file mode 100644
index 00000000000..417598f893c
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc
@@ -0,0 +1,608 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h"
+#include <cstddef>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback_forward.h"
+#include "base/logging.h"
+#include "base/optional.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
+#include "media/base/video_codecs.h"
+#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
+#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/v4l2/v4l2_vda_helpers.h"
+#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
+
+namespace media {
+
+V4L2StatefulVideoDecoderBackend::DecodeRequest::DecodeRequest(
+ scoped_refptr<DecoderBuffer> buf,
+ VideoDecoder::DecodeCB cb,
+ int32_t id)
+ : buffer(std::move(buf)), decode_cb(std::move(cb)), bitstream_id(id) {}
+
+V4L2StatefulVideoDecoderBackend::DecodeRequest::DecodeRequest(DecodeRequest&&) =
+ default;
+V4L2StatefulVideoDecoderBackend::DecodeRequest&
+V4L2StatefulVideoDecoderBackend::DecodeRequest::operator=(DecodeRequest&&) =
+ default;
+
+V4L2StatefulVideoDecoderBackend::DecodeRequest::~DecodeRequest() = default;
+
+bool V4L2StatefulVideoDecoderBackend::DecodeRequest::IsCompleted() const {
+ return bytes_used == buffer->data_size();
+}
+
+V4L2StatefulVideoDecoderBackend::V4L2StatefulVideoDecoderBackend(
+ Client* const client,
+ scoped_refptr<V4L2Device> device,
+ VideoCodecProfile profile,
+ scoped_refptr<base::SequencedTaskRunner> task_runner)
+ : V4L2VideoDecoderBackend(client, std::move(device)),
+ profile_(profile),
+ task_runner_(task_runner) {
+ DVLOGF(3);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+V4L2StatefulVideoDecoderBackend::~V4L2StatefulVideoDecoderBackend() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (flush_cb_ || current_decode_request_ || !decode_request_queue_.empty()) {
+ VLOGF(1) << "Should not destroy backend during pending decode!";
+ }
+
+ struct v4l2_event_subscription sub;
+ memset(&sub, 0, sizeof(sub));
+ sub.type = V4L2_EVENT_SOURCE_CHANGE;
+ if (device_->Ioctl(VIDIOC_UNSUBSCRIBE_EVENT, &sub) != 0) {
+ VLOGF(1) << "Cannot unsubscribe to event";
+ }
+}
+
+bool V4L2StatefulVideoDecoderBackend::Initialize() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (!IsSupportedProfile(profile_)) {
+ VLOGF(1) << "Unsupported profile " << GetProfileName(profile_);
+ return false;
+ }
+
+ frame_splitter_ =
+ v4l2_vda_helpers::InputBufferFragmentSplitter::CreateFromProfile(
+ profile_);
+ if (!frame_splitter_) {
+ VLOGF(1) << "Failed to create frame splitter";
+ return false;
+ }
+
+ struct v4l2_event_subscription sub;
+ memset(&sub, 0, sizeof(sub));
+ sub.type = V4L2_EVENT_SOURCE_CHANGE;
+ if (device_->Ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
+ VLOGF(1) << "Cannot subscribe to event";
+ return false;
+ }
+
+ return true;
+}
+
+void V4L2StatefulVideoDecoderBackend::EnqueueDecodeTask(
+ scoped_refptr<DecoderBuffer> buffer,
+ VideoDecoder::DecodeCB decode_cb,
+ int32_t bitstream_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ decode_request_queue_.push(
+ DecodeRequest(std::move(buffer), std::move(decode_cb), bitstream_id));
+
+ DoDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::DoDecodeWork() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Do not decode if a flush is in progress.
+ // This may actually be ok to do if we are changing resolution?
+ if (flush_cb_)
+ return;
+
+ // Get a new decode request if none is in progress.
+ if (!current_decode_request_) {
+ // No more decode request, nothing to do for now.
+ if (decode_request_queue_.empty())
+ return;
+
+ auto decode_request = std::move(decode_request_queue_.front());
+ decode_request_queue_.pop();
+
+ // Need to flush?
+ if (decode_request.buffer->end_of_stream()) {
+ InitiateFlush(std::move(decode_request.decode_cb));
+ return;
+ }
+
+ // This is our new decode request.
+ current_decode_request_ = std::move(decode_request);
+ DCHECK_EQ(current_decode_request_->bytes_used, 0u);
+ }
+
+ // Get a V4L2 buffer to copy the encoded data into.
+ if (!current_input_buffer_) {
+ current_input_buffer_ = input_queue_->GetFreeBuffer();
+ // We will be called again once an input buffer becomes available.
+ if (!current_input_buffer_)
+ return;
+
+ // Record timestamp of the input buffer so it propagates to the decoded
+ // frames.
+ const struct timespec timespec =
+ current_decode_request_->buffer->timestamp().ToTimeSpec();
+ struct timeval timestamp = {
+ .tv_sec = timespec.tv_sec,
+ .tv_usec = timespec.tv_nsec / 1000,
+ };
+ current_input_buffer_->SetTimeStamp(timestamp);
+ }
+
+ // From here on we have both a decode request and input buffer, so we can
+ // progress with decoding.
+ DCHECK(current_decode_request_.has_value());
+ DCHECK(current_input_buffer_.has_value());
+
+ const DecoderBuffer* current_buffer = current_decode_request_->buffer.get();
+ DCHECK_LT(current_decode_request_->bytes_used, current_buffer->data_size());
+ const uint8_t* const data =
+ current_buffer->data() + current_decode_request_->bytes_used;
+ const size_t data_size =
+ current_buffer->data_size() - current_decode_request_->bytes_used;
+ size_t bytes_to_copy = 0;
+
+ if (!frame_splitter_->AdvanceFrameFragment(data, data_size, &bytes_to_copy)) {
+ VLOGF(1) << "Invalid H.264 stream detected.";
+ std::move(current_decode_request_->decode_cb)
+ .Run(DecodeStatus::DECODE_ERROR);
+ current_decode_request_.reset();
+ current_input_buffer_.reset();
+ client_->OnBackendError();
+ return;
+ }
+
+ const size_t bytes_used = current_input_buffer_->GetPlaneBytesUsed(0);
+ if (bytes_used + bytes_to_copy > current_input_buffer_->GetPlaneSize(0)) {
+ VLOGF(1) << "V4L2 buffer size is too small to contain a whole frame.";
+ std::move(current_decode_request_->decode_cb)
+ .Run(DecodeStatus::DECODE_ERROR);
+ current_decode_request_.reset();
+ current_input_buffer_.reset();
+ client_->OnBackendError();
+ return;
+ }
+
+ uint8_t* dst =
+ static_cast<uint8_t*>(current_input_buffer_->GetPlaneMapping(0)) +
+ bytes_used;
+ memcpy(dst, data, bytes_to_copy);
+ current_input_buffer_->SetPlaneBytesUsed(0, bytes_used + bytes_to_copy);
+ current_decode_request_->bytes_used += bytes_to_copy;
+
+ // Release current_input_request_ if we reached its end.
+ if (current_decode_request_->IsCompleted()) {
+ std::move(current_decode_request_->decode_cb).Run(DecodeStatus::OK);
+ current_decode_request_.reset();
+ }
+
+ // If we have a partial frame, wait before submitting it.
+ if (frame_splitter_->IsPartialFramePending()) {
+ VLOGF(4) << "Partial frame pending, not queueing any buffer now.";
+ return;
+ }
+
+ // The V4L2 input buffer contains a decodable entity, queue it.
+ std::move(*current_input_buffer_).QueueMMap();
+ current_input_buffer_.reset();
+
+ // If we can still progress on a decode request, do it.
+ if (current_decode_request_ || !decode_request_queue_.empty())
+ ScheduleDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::ScheduleDecodeWork() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&V4L2StatefulVideoDecoderBackend::DoDecodeWork,
+ weak_this_));
+}
+
+void V4L2StatefulVideoDecoderBackend::OnServiceDeviceTask(bool event) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (event) {
+ while (base::Optional<struct v4l2_event> ev = device_->DequeueEvent()) {
+ if (ev->type == V4L2_EVENT_SOURCE_CHANGE &&
+ (ev->u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION)) {
+ ChangeResolution();
+ }
+ }
+ }
+
+ // We can enqueue dequeued output buffers immediately.
+ EnqueueOutputBuffers();
+
+ // Try to progress on our work since we may have dequeued input buffers.
+ DoDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::EnqueueOutputBuffers() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ const v4l2_memory mem_type = output_queue_->GetMemoryType();
+
+ while (base::Optional<V4L2WritableBufferRef> buffer =
+ output_queue_->GetFreeBuffer()) {
+ bool ret = false;
+
+ switch (mem_type) {
+ case V4L2_MEMORY_MMAP:
+ ret = std::move(*buffer).QueueMMap();
+ break;
+ case V4L2_MEMORY_DMABUF: {
+ scoped_refptr<VideoFrame> video_frame = GetPoolVideoFrame();
+ // Running out of frame is not an error, we will be called again
+ // once frames are available.
+ if (!video_frame)
+ return;
+ ret = std::move(*buffer).QueueDMABuf(std::move(video_frame));
+ break;
+ }
+ default:
+ NOTREACHED();
+ }
+
+ if (!ret)
+ client_->OnBackendError();
+ }
+
+ DVLOGF(3) << output_queue_->QueuedBuffersCount() << "/"
+ << output_queue_->AllocatedBuffersCount()
+ << " output buffers queued";
+}
+
+scoped_refptr<VideoFrame> V4L2StatefulVideoDecoderBackend::GetPoolVideoFrame() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ DmabufVideoFramePool* pool = client_->GetVideoFramePool();
+ DCHECK_EQ(output_queue_->GetMemoryType(), V4L2_MEMORY_DMABUF);
+ DCHECK_NE(pool, nullptr);
+
+ scoped_refptr<VideoFrame> frame = pool->GetFrame();
+ if (!frame) {
+ DVLOGF(3) << "No available videoframe for now";
+ // We will try again once a frame becomes available.
+ pool->NotifyWhenFrameAvailable(base::BindOnce(
+ base::IgnoreResult(&base::SequencedTaskRunner::PostTask), task_runner_,
+ FROM_HERE,
+ base::BindOnce(
+ base::IgnoreResult(
+ &V4L2StatefulVideoDecoderBackend::EnqueueOutputBuffers),
+ weak_this_)));
+ }
+
+ return frame;
+}
+
+// static
+void V4L2StatefulVideoDecoderBackend::ReuseOutputBufferThunk(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::Optional<base::WeakPtr<V4L2StatefulVideoDecoderBackend>> weak_this,
+ V4L2ReadableBufferRef buffer) {
+ DVLOGF(3);
+ DCHECK(weak_this);
+
+ if (task_runner->RunsTasksInCurrentSequence()) {
+ if (*weak_this)
+ (*weak_this)->ReuseOutputBuffer(std::move(buffer));
+ } else {
+ task_runner->PostTask(
+ FROM_HERE,
+ base::BindOnce(&V4L2StatefulVideoDecoderBackend::ReuseOutputBuffer,
+ *weak_this, std::move(buffer)));
+ }
+}
+
+void V4L2StatefulVideoDecoderBackend::ReuseOutputBuffer(
+ V4L2ReadableBufferRef buffer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3) << "Reuse output buffer #" << buffer->BufferId();
+
+ // Lose reference to the buffer so it goes back to the free list.
+ buffer.reset();
+
+ // Enqueue the newly available buffer.
+ EnqueueOutputBuffers();
+}
+
+void V4L2StatefulVideoDecoderBackend::OnOutputBufferDequeued(
+ V4L2ReadableBufferRef buffer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Zero-bytes buffers are returned as part of a flush and can be dismissed.
+ if (buffer->GetPlaneBytesUsed(0) > 0) {
+ const struct timeval timeval = buffer->GetTimeStamp();
+ const struct timespec timespec = {
+ .tv_sec = timeval.tv_sec,
+ .tv_nsec = timeval.tv_usec * 1000,
+ };
+ const base::TimeDelta timestamp = base::TimeDelta::FromTimeSpec(timespec);
+
+ scoped_refptr<VideoFrame> frame;
+
+ switch (output_queue_->GetMemoryType()) {
+ case V4L2_MEMORY_MMAP: {
+ // Wrap the videoframe into another one so we can be signaled when the
+ // consumer is done with it and reuse the V4L2 buffer.
+ scoped_refptr<VideoFrame> origin_frame = buffer->GetVideoFrame();
+ frame = VideoFrame::WrapVideoFrame(origin_frame, origin_frame->format(),
+ origin_frame->visible_rect(),
+ origin_frame->natural_size());
+ frame->AddDestructionObserver(base::BindOnce(
+ &V4L2StatefulVideoDecoderBackend::ReuseOutputBufferThunk,
+ task_runner_, weak_this_, buffer));
+ break;
+ }
+ case V4L2_MEMORY_DMABUF:
+ // The pool VideoFrame we passed to QueueDMABuf() has been decoded into,
+ // pass it as-is.
+ frame = buffer->GetVideoFrame();
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ client_->OutputFrame(std::move(frame), *visible_rect_, timestamp);
+ }
+
+ // We were waiting for the last buffer before a resolution change
+ // The order here is important! A flush event may come after a resolution
+ // change event (but not the opposite), so we must make sure both events
+ // are processed in the correct order.
+ if (buffer->IsLast() && resolution_change_cb_) {
+ std::move(resolution_change_cb_).Run();
+ } else if (buffer->IsLast() && flush_cb_) {
+ // We were waiting for a flush to complete, and received the last buffer.
+ CompleteFlush();
+ }
+
+ EnqueueOutputBuffers();
+}
+
+bool V4L2StatefulVideoDecoderBackend::InitiateFlush(
+ VideoDecoder::DecodeCB flush_cb) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ DCHECK(!flush_cb_);
+
+ // Submit any pending input buffer at the time of flush.
+ if (current_input_buffer_) {
+ std::move(*current_input_buffer_).QueueMMap();
+ current_input_buffer_.reset();
+ }
+
+ client_->InitiateFlush();
+ flush_cb_ = std::move(flush_cb);
+
+ // Special case: if our CAPTURE queue is not streaming, we cannot receive
+ // the CAPTURE buffer with the LAST flag set that signals the end of flush.
+ // In this case, we should complete the flush immediately.
+ if (!output_queue_->IsStreaming())
+ return CompleteFlush();
+
+ // Send the STOP command to the V4L2 device. The device will let us know
+ // that the flush is completed by sending us a CAPTURE buffer with the LAST
+ // flag set.
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_STOP;
+ if (device_->Ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
+ LOG(ERROR) << "Failed to issue STOP command";
+ client_->OnBackendError();
+ return false;
+ }
+
+ return true;
+}
+
+bool V4L2StatefulVideoDecoderBackend::CompleteFlush() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ DCHECK(flush_cb_);
+
+ // Signal that flush has properly been completed.
+ std::move(flush_cb_).Run(DecodeStatus::OK);
+
+ // If CAPTURE queue is streaming, send the START command to the V4L2 device
+ // to signal that we are resuming decoding with the same state.
+ if (output_queue_->IsStreaming()) {
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_START;
+ if (device_->Ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
+ LOG(ERROR) << "Failed to issue START command";
+ std::move(flush_cb_).Run(DecodeStatus::DECODE_ERROR);
+ client_->OnBackendError();
+ return false;
+ }
+ }
+
+ client_->CompleteFlush();
+
+ // Resume decoding if data is available.
+ ScheduleDecodeWork();
+
+ return true;
+}
+
+void V4L2StatefulVideoDecoderBackend::OnStreamStopped(bool stop_input_queue) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // If we are resetting, also reset the splitter.
+ if (stop_input_queue)
+ frame_splitter_->Reset();
+}
+
+void V4L2StatefulVideoDecoderBackend::ChangeResolution() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Here we just query the new resolution, visible rect, and number of output
+ // buffers before asking the client to update the resolution.
+
+ auto format = output_queue_->GetFormat().first;
+ if (!format) {
+ client_->OnBackendError();
+ return;
+ }
+ const gfx::Size pic_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
+
+ auto visible_rect = output_queue_->GetVisibleRect();
+ if (!visible_rect) {
+ client_->OnBackendError();
+ return;
+ }
+
+ auto ctrl = device_->GetCtrl(V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ constexpr size_t DEFAULT_NUM_OUTPUT_BUFFERS = 12;
+ const size_t num_output_buffers =
+ ctrl ? ctrl->value : DEFAULT_NUM_OUTPUT_BUFFERS;
+ if (!ctrl)
+ VLOGF(1) << "Using default minimum number of CAPTURE buffers";
+
+ // Signal that we are flushing and initiate the resolution change.
+ // Our flush will be done when we receive a buffer with the LAST flag on the
+ // CAPTURE queue.
+ client_->InitiateFlush();
+ DCHECK(!resolution_change_cb_);
+ resolution_change_cb_ =
+ base::BindOnce(&V4L2StatefulVideoDecoderBackend::ContinueChangeResolution,
+ weak_this_, pic_size, *visible_rect, num_output_buffers);
+
+ // ...that is, unless we are not streaming yet, in which case the resolution
+ // change can take place immediately.
+ if (!output_queue_->IsStreaming())
+ std::move(resolution_change_cb_).Run();
+}
+
+void V4L2StatefulVideoDecoderBackend::ContinueChangeResolution(
+ const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_buffers) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Flush is done, but stay in flushing state and ask our client to set the new
+ // resolution.
+ client_->ChangeResolution(pic_size, visible_rect, num_output_buffers);
+}
+
+bool V4L2StatefulVideoDecoderBackend::ApplyResolution(
+ const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_frames) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Use the visible rect for all new frames.
+ visible_rect_ = visible_rect;
+
+ return true;
+}
+
+void V4L2StatefulVideoDecoderBackend::OnChangeResolutionDone(bool success) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (!success) {
+ client_->OnBackendError();
+ return;
+ }
+
+ // Flush can be considered completed on the client side.
+ client_->CompleteFlush();
+
+ // Enqueue all available output buffers now that they are allocated.
+ EnqueueOutputBuffers();
+
+ // Also try to progress on our work.
+ DoDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::ClearPendingRequests(
+ DecodeStatus status) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ resolution_change_cb_.Reset();
+
+ if (flush_cb_) {
+ std::move(flush_cb_).Run(status);
+ }
+
+ current_input_buffer_.reset();
+
+ if (current_decode_request_) {
+ std::move(current_decode_request_->decode_cb).Run(status);
+ current_decode_request_.reset();
+ }
+
+ while (!decode_request_queue_.empty()) {
+ std::move(decode_request_queue_.front().decode_cb).Run(status);
+ decode_request_queue_.pop();
+ }
+}
+
+// TODO(b:149663704) move into helper function shared between both backends?
+bool V4L2StatefulVideoDecoderBackend::IsSupportedProfile(
+ VideoCodecProfile profile) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(device_);
+ if (supported_profiles_.empty()) {
+ constexpr uint32_t kSupportedInputFourccs[] = {
+ V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_VP8,
+ V4L2_PIX_FMT_VP9,
+ };
+ scoped_refptr<V4L2Device> device = V4L2Device::Create();
+ VideoDecodeAccelerator::SupportedProfiles profiles =
+ device->GetSupportedDecodeProfiles(base::size(kSupportedInputFourccs),
+ kSupportedInputFourccs);
+ for (const auto& profile : profiles)
+ supported_profiles_.push_back(profile.profile);
+ }
+ return std::find(supported_profiles_.begin(), supported_profiles_.end(),
+ profile) != supported_profiles_.end();
+}
+
+bool V4L2StatefulVideoDecoderBackend::StopInputQueueOnResChange() const {
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h
new file mode 100644
index 00000000000..62d6d715f4b
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h
@@ -0,0 +1,151 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATEFUL_H_
+#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATEFUL_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/sequenced_task_runner.h"
+#include "media/base/video_codecs.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
+
+namespace media {
+
+namespace v4l2_vda_helpers {
+class InputBufferFragmentSplitter;
+}
+
+class V4L2StatefulVideoDecoderBackend : public V4L2VideoDecoderBackend {
+ public:
+ V4L2StatefulVideoDecoderBackend(
+ Client* const client,
+ scoped_refptr<V4L2Device> device,
+ VideoCodecProfile profile,
+ scoped_refptr<base::SequencedTaskRunner> task_runner);
+ ~V4L2StatefulVideoDecoderBackend() override;
+
+ // We don't ever want to copy or move this.
+ V4L2StatefulVideoDecoderBackend(const V4L2StatefulVideoDecoderBackend&) =
+ delete;
+ V4L2StatefulVideoDecoderBackend& operator=(
+ const V4L2StatefulVideoDecoderBackend&) = delete;
+
+ // V4L2VideoDecoderBackend implementation
+ bool Initialize() override;
+ void EnqueueDecodeTask(scoped_refptr<DecoderBuffer> buffer,
+ VideoDecoder::DecodeCB decode_cb,
+ int32_t bitstream_id) override;
+ void OnOutputBufferDequeued(V4L2ReadableBufferRef buffer) override;
+ void OnServiceDeviceTask(bool event) override;
+ void OnStreamStopped(bool stop_input_queue) override;
+ bool ApplyResolution(const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_frames) override;
+ void OnChangeResolutionDone(bool success) override;
+ void ClearPendingRequests(DecodeStatus status) override;
+ bool StopInputQueueOnResChange() const override;
+
+ private:
+ // TODO(b:149663704): merge with stateless?
+ // Request for decoding buffer. Every EnqueueDecodeTask() call generates 1
+ // DecodeRequest.
+ struct DecodeRequest {
+ // The decode buffer passed to EnqueueDecodeTask().
+ scoped_refptr<DecoderBuffer> buffer;
+ // Number of bytes used so far from |buffer|.
+ size_t bytes_used = 0;
+ // The callback function passed to EnqueueDecodeTask().
+ VideoDecoder::DecodeCB decode_cb;
+ // Identifier for the decoder buffer.
+ int32_t bitstream_id;
+
+ DecodeRequest(scoped_refptr<DecoderBuffer> buf,
+ VideoDecoder::DecodeCB cb,
+ int32_t id);
+
+ // Allow move, but not copy
+ DecodeRequest(DecodeRequest&&);
+ DecodeRequest& operator=(DecodeRequest&&);
+
+ ~DecodeRequest();
+
+ bool IsCompleted() const;
+
+ DISALLOW_COPY_AND_ASSIGN(DecodeRequest);
+ };
+
+ bool IsSupportedProfile(VideoCodecProfile profile);
+
+ void DoDecodeWork();
+
+ static void ReuseOutputBufferThunk(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::Optional<base::WeakPtr<V4L2StatefulVideoDecoderBackend>> weak_this,
+ V4L2ReadableBufferRef buffer);
+ void ReuseOutputBuffer(V4L2ReadableBufferRef buffer);
+
+ // Called when the format has changed, in order to reallocate the output
+ // buffers according to the new format.
+ void ChangeResolution();
+ // Called when the flush triggered by a resolution change has completed,
+ // to actually apply the resolution.
+ void ContinueChangeResolution(const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_buffers);
+
+ // Enqueue all output buffers that are available.
+ void EnqueueOutputBuffers();
+ // When a video frame pool is in use, obtain a frame from the pool or, if
+ // none is available, schedule |EnqueueOutputBuffers()| to be called when one
+ // becomes available.
+ scoped_refptr<VideoFrame> GetPoolVideoFrame();
+
+ bool InitiateFlush(VideoDecoder::DecodeCB flush_cb);
+ bool CompleteFlush();
+
+ void ScheduleDecodeWork();
+
+ // Video profile we are decoding.
+ VideoCodecProfile profile_;
+
+ // The task runner we are running on, for convenience.
+ const scoped_refptr<base::SequencedTaskRunner> task_runner_;
+
+ // VideoCodecProfiles supported by a v4l2 stateless decoder driver.
+ std::vector<VideoCodecProfile> supported_profiles_;
+
+ // Queue of pending decode request.
+ base::queue<DecodeRequest> decode_request_queue_;
+
+ // The decode request which is currently processed.
+ base::Optional<DecodeRequest> current_decode_request_;
+ // V4L2 input buffer currently being prepared.
+ base::Optional<V4L2WritableBufferRef> current_input_buffer_;
+
+ std::unique_ptr<v4l2_vda_helpers::InputBufferFragmentSplitter>
+ frame_splitter_;
+
+ base::Optional<gfx::Rect> visible_rect_;
+
+ // Callback of the buffer that triggered a flush, to be called when the
+ // flush completes.
+ VideoDecoder::DecodeCB flush_cb_;
+ // Closure that will be called once the flush triggered by a resolution change
+ // event completes.
+ base::OnceClosure resolution_change_cb_;
+
+ base::WeakPtr<V4L2StatefulVideoDecoderBackend> weak_this_;
+ base::WeakPtrFactory<V4L2StatefulVideoDecoderBackend> weak_this_factory_{
+ this};
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATEFUL_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
index b8c3400a990..b03846c0784 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
@@ -559,7 +559,7 @@ void V4L2StatelessVideoDecoderBackend::OnChangeResolutionDone(bool success) {
weak_this_));
}
-void V4L2StatelessVideoDecoderBackend::OnStreamStopped() {
+void V4L2StatelessVideoDecoderBackend::OnStreamStopped(bool stop_input_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
@@ -603,6 +603,10 @@ void V4L2StatelessVideoDecoderBackend::ClearPendingRequests(
}
}
+bool V4L2StatelessVideoDecoderBackend::StopInputQueueOnResChange() const {
+ return true;
+}
+
bool V4L2StatelessVideoDecoderBackend::IsSupportedProfile(
VideoCodecProfile profile) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h
index 0dfa817309d..704d6171f7f 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h
@@ -43,12 +43,13 @@ class V4L2StatelessVideoDecoderBackend : public V4L2VideoDecoderBackend,
VideoDecoder::DecodeCB decode_cb,
int32_t bitstream_id) override;
void OnOutputBufferDequeued(V4L2ReadableBufferRef buffer) override;
- void OnStreamStopped() override;
+ void OnStreamStopped(bool stop_input_queue) override;
bool ApplyResolution(const gfx::Size& pic_size,
const gfx::Rect& visible_rect,
const size_t num_output_frames) override;
void OnChangeResolutionDone(bool success) override;
void ClearPendingRequests(DecodeStatus status) override;
+ bool StopInputQueueOnResChange() const override;
// V4L2DecodeSurfaceHandler implementation.
scoped_refptr<V4L2DecodeSurface> CreateSurface() override;
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
index 8c7ea443927..97ef7e2a648 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
@@ -381,7 +381,6 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
const gfx::Rect& output_visible_rect) {
VLOGF(2);
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
- DCHECK_NE(input_layout.format(), output_format);
auto ip_input_layout = AsMultiPlanarLayout(input_layout);
if (!ip_input_layout) {
@@ -432,7 +431,7 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
image_processor_ = ImageProcessorFactory::Create(
*input_config, *output_config, {ImageProcessor::OutputMode::IMPORT},
- kImageProcBufferCount, encoder_task_runner_,
+ kImageProcBufferCount, VIDEO_ROTATION_0, encoder_task_runner_,
base::BindRepeating(&V4L2VideoEncodeAccelerator::ImageProcessorError,
weak_this_));
if (!image_processor_) {
@@ -750,6 +749,16 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
return;
if (image_processor_) {
+ if (!frame) {
+ DCHECK(!flush_callback_.is_null());
+ NOTREACHED()
+ << "Flushing is not supported when using an image processor and this "
+ "situation should not happen for well behaved clients.";
+ NOTIFY_ERROR(kIllegalStateError);
+ child_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(std::move(flush_callback_), false));
+ return;
+ }
image_processor_input_queue_.emplace(std::move(frame), force_keyframe);
InputImageProcessorTask();
} else {
@@ -779,7 +788,7 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded(
VLOGF(1) << "Encoder resolution is changed during encoding"
<< ", frame.natural_size()=" << frame.natural_size().ToString()
<< ", encoder_input_visible_rect_="
- << input_frame_size_.ToString();
+ << encoder_input_visible_rect_.ToString();
return false;
}
if (frame.coded_size() == input_frame_size_) {
diff --git a/chromium/media/gpu/vaapi/BUILD.gn b/chromium/media/gpu/vaapi/BUILD.gn
index 2524a1c31b5..98dbf6acad4 100644
--- a/chromium/media/gpu/vaapi/BUILD.gn
+++ b/chromium/media/gpu/vaapi/BUILD.gn
@@ -66,11 +66,16 @@ source_set("vaapi") {
"vp8_vaapi_video_decoder_delegate.h",
"vp9_encoder.cc",
"vp9_encoder.h",
+ "vp9_rate_control.cc",
+ "vp9_rate_control.h",
"vp9_vaapi_video_decoder_delegate.cc",
"vp9_vaapi_video_decoder_delegate.h",
]
- configs += [ "//build/config/linux/libva" ]
+ configs += [
+ "//build/config/linux/libva",
+ "//third_party/libvpx:libvpx_config",
+ ]
deps = [
":common",
@@ -83,6 +88,7 @@ source_set("vaapi") {
"//media/gpu/chromeos:common",
"//media/parsers",
"//mojo/public/cpp/bindings",
+ "//third_party/libvpx:libvp9rc",
"//third_party/libyuv",
"//ui/gfx",
"//ui/gfx/geometry",
@@ -116,12 +122,12 @@ source_set("vaapi") {
]
}
- if (ozone_platform_gbm || use_egl) {
+ if (use_ozone || use_egl) {
sources += [
"vaapi_picture_native_pixmap.cc",
"vaapi_picture_native_pixmap.h",
]
- if (ozone_platform_gbm) {
+ if (use_ozone) {
sources += [
"vaapi_picture_native_pixmap_ozone.cc",
"vaapi_picture_native_pixmap_ozone.h",
@@ -194,7 +200,9 @@ source_set("unit_test") {
"vaapi_image_decode_accelerator_worker_unittest.cc",
"vaapi_video_decode_accelerator_unittest.cc",
"vaapi_video_encode_accelerator_unittest.cc",
+ "vp9_encoder_unittest.cc",
]
+ configs += [ "//third_party/libvpx:libvpx_config" ]
deps = [
":common",
":vaapi",
@@ -206,6 +214,7 @@ source_set("unit_test") {
"//mojo/core/embedder",
"//testing/gmock",
"//testing/gtest",
+ "//third_party/libvpx:libvp9rc",
"//ui/gfx:test_support",
"//ui/gfx/geometry",
]
diff --git a/chromium/media/gpu/vaapi/accelerated_video_encoder.cc b/chromium/media/gpu/vaapi/accelerated_video_encoder.cc
index 71acd16c8aa..4bfdb0dc06c 100644
--- a/chromium/media/gpu/vaapi/accelerated_video_encoder.cc
+++ b/chromium/media/gpu/vaapi/accelerated_video_encoder.cc
@@ -40,6 +40,12 @@ void AcceleratedVideoEncoder::EncodeJob::AddSetupCallback(
setup_callbacks_.push(std::move(cb));
}
+void AcceleratedVideoEncoder::EncodeJob::AddPostExecuteCallback(
+ base::OnceClosure cb) {
+ DCHECK(!cb.is_null());
+ post_execute_callbacks_.push(std::move(cb));
+}
+
void AcceleratedVideoEncoder::EncodeJob::AddReferencePicture(
scoped_refptr<CodecPicture> ref_pic) {
DCHECK(ref_pic);
@@ -53,10 +59,21 @@ void AcceleratedVideoEncoder::EncodeJob::Execute() {
}
std::move(execute_callback_).Run();
+
+ while (!post_execute_callbacks_.empty()) {
+ std::move(post_execute_callbacks_.front()).Run();
+ post_execute_callbacks_.pop();
+ }
}
size_t AcceleratedVideoEncoder::GetBitstreamBufferSize() const {
return GetEncodeBitstreamBufferSize(GetCodedSize());
}
+void AcceleratedVideoEncoder::BitrateControlUpdate(
+ uint64_t encoded_chunk_size_bytes) {
+ NOTREACHED() << __func__ << "() is called to on an"
+ << "AcceleratedVideoEncoder that doesn't support BitrateControl"
+ << "::kConstantQuantizationParameter";
+}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/accelerated_video_encoder.h b/chromium/media/gpu/vaapi/accelerated_video_encoder.h
index f214831dd62..e5a51bd96ac 100644
--- a/chromium/media/gpu/vaapi/accelerated_video_encoder.h
+++ b/chromium/media/gpu/vaapi/accelerated_video_encoder.h
@@ -40,12 +40,23 @@ class AcceleratedVideoEncoder {
AcceleratedVideoEncoder() = default;
virtual ~AcceleratedVideoEncoder() = default;
+ enum class BitrateControl {
+ kConstantBitrate, // Constant Bitrate mode. This class relies on other
+ // parts (e.g. driver) to achieve the specified bitrate.
+ kConstantQuantizationParameter // Constant Quantization Parameter mode.
+ // This class needs to compute a proper
+ // quantization parameter and give other
+ // parts (e.g. the driver) the value.
+ };
+
struct Config {
// Maxium number of reference frames.
// For H.264 encoding, the value represents the maximum number of reference
// frames for both the reference picture list 0 (bottom 16 bits) and the
// reference picture list 1 (top 16 bits).
size_t max_num_ref_frames;
+
+ BitrateControl bitrate_control = BitrateControl::kConstantBitrate;
};
// An abstraction of an encode job for one frame. Parameters required for an
@@ -71,6 +82,12 @@ class AcceleratedVideoEncoder {
// is executed.
void AddSetupCallback(base::OnceClosure cb);
+ // Schedules a callback to be run immediately after this job is executed.
+ // Can be called multiple times to schedule multiple callbacks, and all
+ // of them will be run, in order added. Callbacks can be used to e.g. get
+ // the encoded buffer linear size.
+ void AddPostExecuteCallback(base::OnceClosure cb);
+
// Adds |ref_pic| to the list of pictures to be used as reference pictures
// for this frame, to ensure they remain valid until the job is executed
// (or discarded).
@@ -114,6 +131,10 @@ class AcceleratedVideoEncoder {
// calls) to set up the job.
base::queue<base::OnceClosure> setup_callbacks_;
+ // Callbacks to be run (in the same order as the order of
+ // AddPostExecuteCallback() calls) to do post processing after execute.
+ base::queue<base::OnceClosure> post_execute_callbacks_;
+
// Callback to be run to execute this job.
base::OnceClosure execute_callback_;
@@ -153,6 +174,12 @@ class AcceleratedVideoEncoder {
// Prepares a new |encode_job| to be executed in Accelerator and returns true
// on success. The caller may then call Execute() on the job to run it.
virtual bool PrepareEncodeJob(EncodeJob* encode_job) = 0;
+
+ // Notifies the encoded chunk size in bytes to update a bitrate controller in
+ // AcceleratedVideoEncoder. This should be called only if
+ // AcceleratedVideoEncoder is configured with
+ // BitrateControl::kConstantQuantizationParameter.
+ virtual void BitrateControlUpdate(uint64_t encoded_chunk_size_bytes);
};
} // namespace media
diff --git a/chromium/media/gpu/vaapi/test_utils.cc b/chromium/media/gpu/vaapi/test_utils.cc
index b534b297bd5..f578bae1071 100644
--- a/chromium/media/gpu/vaapi/test_utils.cc
+++ b/chromium/media/gpu/vaapi/test_utils.cc
@@ -54,9 +54,9 @@ bool CompareImages(const DecodedImage& reference_image,
// Uses the reference image's size as the ground truth.
const gfx::Size image_size = reference_image.size;
if (image_size != hw_decoded_image.size) {
- DLOG(ERROR) << "Wrong expected software decoded image size, "
- << image_size.ToString() << " versus VaAPI provided "
- << hw_decoded_image.size.ToString();
+ LOG(ERROR) << "Wrong expected software decoded image size, "
+ << image_size.ToString() << " versus VaAPI provided "
+ << hw_decoded_image.size.ToString();
return false;
}
@@ -100,7 +100,7 @@ bool CompareImages(const DecodedImage& reference_image,
image_size.width(), image_size.height());
}
if (conversion_result != 0) {
- DLOG(ERROR) << "libyuv conversion error";
+ LOG(ERROR) << "libyuv conversion error";
return false;
}
@@ -112,12 +112,12 @@ bool CompareImages(const DecodedImage& reference_image,
temp_v.get(), half_image_size.width(), image_size.width(),
image_size.height());
} else {
- DLOG(ERROR) << "HW FourCC not supported: " << FourccToString(hw_fourcc);
+ LOG(ERROR) << "HW FourCC not supported: " << FourccToString(hw_fourcc);
return false;
}
if (ssim < min_ssim) {
- DLOG(ERROR) << "SSIM too low: " << ssim << " < " << min_ssim;
+ LOG(ERROR) << "SSIM too low: " << ssim << " < " << min_ssim;
return false;
}
diff --git a/chromium/media/gpu/vaapi/va.sigs b/chromium/media/gpu/vaapi/va.sigs
index f333cb33a7b..c24aad2c3e5 100644
--- a/chromium/media/gpu/vaapi/va.sigs
+++ b/chromium/media/gpu/vaapi/va.sigs
@@ -19,6 +19,7 @@ VAStatus vaDestroyImage(VADisplay dpy, VAImageID image);
VAStatus vaDestroySurfaces(VADisplay dpy, VASurfaceID *surfaces, int num_surfaces);
int vaDisplayIsValid(VADisplay dpy);
VAStatus vaEndPicture(VADisplay dpy, VAContextID context);
+const char *vaEntrypointStr(VAEntrypoint entrypoint);
const char *vaErrorStr(VAStatus error_status);
VAStatus vaExportSurfaceHandle(VADisplay dpy, VASurfaceID surface_id, uint32_t mem_type, uint32_t flags, void *descriptor);
VAStatus vaGetConfigAttributes(VADisplay dpy, VAProfile profile, VAEntrypoint entrypoint, VAConfigAttrib *attrib_list, int num_attribs);
@@ -29,6 +30,7 @@ int vaMaxNumConfigAttributes(VADisplay dpy);
int vaMaxNumEntrypoints(VADisplay dpy);
int vaMaxNumImageFormats(VADisplay dpy);
int vaMaxNumProfiles(VADisplay dpy);
+const char *vaProfileStr(VAProfile profile);
VAStatus vaPutImage (VADisplay dpy, VASurfaceID surface, VAImageID image, int src_x, int src_y, unsigned int src_width, unsigned int src_height, int dest_x, int dest_y, unsigned int dest_width, unsigned int dest_height);
VAStatus vaQueryConfigAttributes(VADisplay dpy, VAConfigID config_id, VAProfile *profile, VAEntrypoint *entrypoint, VAConfigAttrib *attrib_list, int *num_attribs);
VAStatus vaQueryConfigEntrypoints(VADisplay dpy, VAProfile profile, VAEntrypoint *entrypoint_list, int *num_entrypoints);
@@ -37,7 +39,6 @@ VAStatus vaQueryImageFormats(VADisplay dpy, VAImageFormat *format_list, int *num
VAStatus vaQuerySurfaceAttributes(VADisplay dpy, VAConfigID config, VASurfaceAttrib *attrib_list, unsigned int *num_attribs);
const char* vaQueryVendorString(VADisplay dpy);
VAStatus vaRenderPicture(VADisplay dpy, VAContextID context, VABufferID *buffers, int num_buffers);
-VAStatus vaSetDisplayAttributes(VADisplay dpy, VADisplayAttribute *attr_list, int num_attributes);
VAStatus vaSyncSurface(VADisplay dpy, VASurfaceID render_target);
VAStatus vaTerminate(VADisplay dpy);
VAStatus vaUnmapBuffer(VADisplay dpy, VABufferID buf_id);
diff --git a/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc b/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc
index 4cb6bceda56..3c72d13786b 100644
--- a/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc
+++ b/chromium/media/gpu/vaapi/vaapi_image_processor_backend.cc
@@ -75,6 +75,7 @@ std::unique_ptr<ImageProcessorBackend> VaapiImageProcessorBackend::Create(
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
// VaapiImageProcessorBackend supports ChromeOS only.
@@ -136,6 +137,13 @@ std::unique_ptr<ImageProcessorBackend> VaapiImageProcessorBackend::Create(
return nullptr;
}
+ // Checks if VA-API driver supports rotation.
+ if (relative_rotation != VIDEO_ROTATION_0 &&
+ !vaapi_wrapper->IsRotationSupported()) {
+ VLOGF(1) << "VaapiIP doesn't support rotation";
+ return nullptr;
+ }
+
// We should restrict the acceptable PortConfig for input and output both to
// the one returned by GetPlatformVideoFrameLayout(). However,
// ImageProcessorFactory interface doesn't provide information about what
@@ -146,7 +154,7 @@ std::unique_ptr<ImageProcessorBackend> VaapiImageProcessorBackend::Create(
// scenario.
return base::WrapUnique<ImageProcessorBackend>(new VaapiImageProcessorBackend(
std::move(vaapi_wrapper), input_config, output_config, OutputMode::IMPORT,
- std::move(error_cb), std::move(backend_task_runner)));
+ relative_rotation, std::move(error_cb), std::move(backend_task_runner)));
#endif
}
@@ -155,11 +163,13 @@ VaapiImageProcessorBackend::VaapiImageProcessorBackend(
const PortConfig& input_config,
const PortConfig& output_config,
OutputMode output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner)
: ImageProcessorBackend(input_config,
output_config,
output_mode,
+ relative_rotation,
std::move(error_cb),
std::move(backend_task_runner)),
vaapi_wrapper_(std::move(vaapi_wrapper)) {}
@@ -206,9 +216,9 @@ void VaapiImageProcessorBackend::Process(scoped_refptr<VideoFrame> input_frame,
return;
// VA-API performs pixel format conversion and scaling without any filters.
- if (!vaapi_wrapper_->BlitSurface(*src_va_surface, *dst_va_surface,
- input_frame->visible_rect(),
- output_frame->visible_rect())) {
+ if (!vaapi_wrapper_->BlitSurface(
+ *src_va_surface, *dst_va_surface, input_frame->visible_rect(),
+ output_frame->visible_rect(), relative_rotation_)) {
// Failed to execute BlitSurface(). Since VaapiWrapper has invoked
// ReportToUMA(), calling error_cb_ here is not needed.
return;
diff --git a/chromium/media/gpu/vaapi/vaapi_image_processor_backend.h b/chromium/media/gpu/vaapi/vaapi_image_processor_backend.h
index 8abbb323dd8..8d5da751214 100644
--- a/chromium/media/gpu/vaapi/vaapi_image_processor_backend.h
+++ b/chromium/media/gpu/vaapi/vaapi_image_processor_backend.h
@@ -28,6 +28,7 @@ class VaapiImageProcessorBackend : public ImageProcessorBackend {
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
@@ -42,6 +43,7 @@ class VaapiImageProcessorBackend : public ImageProcessorBackend {
const PortConfig& input_config,
const PortConfig& output_config,
OutputMode output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
~VaapiImageProcessorBackend() override;
diff --git a/chromium/media/gpu/vaapi/vaapi_unittest.cc b/chromium/media/gpu/vaapi/vaapi_unittest.cc
index d3d459fadf8..abb662d777a 100644
--- a/chromium/media/gpu/vaapi/vaapi_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_unittest.cc
@@ -11,9 +11,11 @@
#include <vector>
#include <va/va.h>
+#include <va/va_str.h>
#include "base/files/file.h"
#include "base/files/scoped_file.h"
+#include "base/logging.h"
#include "base/optional.h"
#include "base/process/launch.h"
#include "base/stl_util.h"
@@ -34,10 +36,8 @@ base::Optional<VAProfile> ConvertToVAProfile(VideoCodecProfile profile) {
{VP8PROFILE_ANY, VAProfileVP8Version0_3},
{VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
{VP9PROFILE_PROFILE1, VAProfileVP9Profile1},
- // TODO(crbug.com/1011454, crbug.com/1011469): Reenable
- // VP9PROFILE_PROFILE2 and _PROFILE3 when P010 is completely supported.
- //{VP9PROFILE_PROFILE2, VAProfileVP9Profile2},
- //{VP9PROFILE_PROFILE3, VAProfileVP9Profile3},
+ {VP9PROFILE_PROFILE2, VAProfileVP9Profile2},
+ {VP9PROFILE_PROFILE3, VAProfileVP9Profile3},
};
auto it = kProfileMap.find(profile);
return it != kProfileMap.end() ? base::make_optional<VAProfile>(it->second)
@@ -56,10 +56,8 @@ base::Optional<VAProfile> StringToVAProfile(const std::string& va_profile) {
{"VAProfileVP8Version0_3", VAProfileVP8Version0_3},
{"VAProfileVP9Profile0", VAProfileVP9Profile0},
{"VAProfileVP9Profile1", VAProfileVP9Profile1},
- // TODO(crbug.com/1011454, crbug.com/1011469): Reenable
- // VP9PROFILE_PROFILE2 and _PROFILE3 when P010 is completely supported.
- // {"VAProfileVP9Profile2", VAProfileVP9Profile2},
- // {"VAProfileVP9Profile3", VAProfileVP9Profile3},
+ {"VAProfileVP9Profile2", VAProfileVP9Profile2},
+ {"VAProfileVP9Profile3", VAProfileVP9Profile3},
};
auto it = kStringToVAProfile.find(va_profile);
@@ -165,7 +163,8 @@ TEST_F(VaapiTest, VaapiProfiles) {
va_info[VAProfileH264ConstrainedBaseline], VAEntrypointVLD);
}
- EXPECT_TRUE(is_profile_supported) << " profile: " << profile.profile;
+ EXPECT_TRUE(is_profile_supported)
+ << " profile: " << GetProfileName(profile.profile);
}
for (const auto& profile : VaapiWrapper::GetSupportedEncodeProfiles()) {
@@ -184,7 +183,8 @@ TEST_F(VaapiTest, VaapiProfiles) {
VAEntrypointEncSliceLP);
}
- EXPECT_TRUE(is_profile_supported) << " profile: " << profile.profile;
+ EXPECT_TRUE(is_profile_supported)
+ << " profile: " << GetProfileName(profile.profile);
}
EXPECT_EQ(VaapiWrapper::IsDecodeSupported(VAProfileJPEGBaseline),
@@ -194,21 +194,24 @@ TEST_F(VaapiTest, VaapiProfiles) {
base::Contains(va_info[VAProfileJPEGBaseline], VAEntrypointEncPicture));
}
+// Verifies that the default VAEntrypoint as per VaapiWrapper is indeed among
+// the supported ones.
TEST_F(VaapiTest, DefaultEntrypointIsSupported) {
for (size_t i = 0; i < VaapiWrapper::kCodecModeMax; ++i) {
- const VaapiWrapper::CodecMode mode =
- static_cast<VaapiWrapper::CodecMode>(i);
+ const auto wrapper_mode = static_cast<VaapiWrapper::CodecMode>(i);
std::map<VAProfile, std::vector<VAEntrypoint>> configurations =
- VaapiWrapper::GetSupportedConfigurationsForCodecModeForTesting(mode);
+ VaapiWrapper::GetSupportedConfigurationsForCodecModeForTesting(
+ wrapper_mode);
for (const auto& profile_and_entrypoints : configurations) {
const VAEntrypoint default_entrypoint =
- VaapiWrapper::GetDefaultVaEntryPoint(mode,
+ VaapiWrapper::GetDefaultVaEntryPoint(wrapper_mode,
profile_and_entrypoints.first);
const auto& supported_entrypoints = profile_and_entrypoints.second;
EXPECT_TRUE(base::Contains(supported_entrypoints, default_entrypoint))
- << "Default VAEntrypoint " << default_entrypoint
- << " (mode = " << mode << ") is not supported for VAProfile = "
- << profile_and_entrypoints.first;
+ << "Default VAEntrypoint " << vaEntrypointStr(default_entrypoint)
+ << " (VaapiWrapper mode = " << wrapper_mode
+ << ") is not supported for "
+ << vaProfileStr(profile_and_entrypoints.first);
}
}
}
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
index ed053f16ec5..62b90c85858 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
@@ -19,7 +19,6 @@
#include "base/containers/queue.h"
#include "base/containers/small_map.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decoder.cc b/chromium/media/gpu/vaapi/vaapi_video_decoder.cc
index c97f1a06cd9..48b9092156b 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decoder.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decoder.cc
@@ -33,17 +33,15 @@ constexpr size_t kTimestampCacheSize = 128;
// Returns the preferred VA_RT_FORMAT for the given |profile|.
unsigned int GetVaFormatForVideoCodecProfile(VideoCodecProfile profile) {
- switch (profile) {
- case VP9PROFILE_PROFILE2:
- case VP9PROFILE_PROFILE3:
- return VA_RT_FORMAT_YUV420_10BPP;
- default:
- return VA_RT_FORMAT_YUV420;
- }
+ if (profile == VP9PROFILE_PROFILE2 || profile == VP9PROFILE_PROFILE3)
+ return VA_RT_FORMAT_YUV420_10BPP;
+ return VA_RT_FORMAT_YUV420;
}
-gfx::BufferFormat GetBufferFormat() {
+gfx::BufferFormat GetBufferFormat(VideoCodecProfile profile) {
#if defined(USE_OZONE)
+ if (profile == VP9PROFILE_PROFILE2 || profile == VP9PROFILE_PROFILE3)
+ return gfx::BufferFormat::P010;
return gfx::BufferFormat::YUV_420_BIPLANAR;
#else
return gfx::BufferFormat::RGBX_8888;
@@ -341,7 +339,7 @@ scoped_refptr<VASurface> VaapiVideoDecoder::CreateSurface() {
void VaapiVideoDecoder::SurfaceReady(scoped_refptr<VASurface> va_surface,
int32_t buffer_id,
const gfx::Rect& visible_rect,
- const VideoColorSpace& /*color_space*/) {
+ const VideoColorSpace& color_space) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK_EQ(state_, State::kDecoding);
DVLOGF(3);
@@ -360,16 +358,7 @@ void VaapiVideoDecoder::SurfaceReady(scoped_refptr<VASurface> va_surface,
// Find the frame associated with the surface. We won't erase it from
// |output_frames_| yet, as the decoder might still be using it for reference.
DCHECK_EQ(output_frames_.count(va_surface->id()), 1u);
- OutputFrameTask(output_frames_[va_surface->id()], visible_rect, timestamp);
-}
-
-void VaapiVideoDecoder::OutputFrameTask(scoped_refptr<VideoFrame> video_frame,
- const gfx::Rect& visible_rect,
- base::TimeDelta timestamp) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
- DCHECK_EQ(state_, State::kDecoding);
- DCHECK(video_frame);
- DVLOGF(4);
+ scoped_refptr<VideoFrame> video_frame = output_frames_[va_surface->id()];
// Set the timestamp at which the decode operation started on the
// |video_frame|. If the frame has been outputted before (e.g. because of VP9
@@ -389,6 +378,10 @@ void VaapiVideoDecoder::OutputFrameTask(scoped_refptr<VideoFrame> video_frame,
video_frame = std::move(wrapped_frame);
}
+ const auto gfx_color_space = color_space.ToGfxColorSpace();
+ if (gfx_color_space.IsValid())
+ video_frame->set_color_space(gfx_color_space);
+
output_cb_.Run(std::move(video_frame));
}
@@ -403,12 +396,18 @@ void VaapiVideoDecoder::ApplyResolutionChange() {
gfx::Size natural_size = GetNaturalSize(visible_rect, pixel_aspect_ratio_);
pic_size_ = decoder_->GetPicSize();
const base::Optional<VideoPixelFormat> format =
- GfxBufferFormatToVideoPixelFormat(GetBufferFormat());
+ GfxBufferFormatToVideoPixelFormat(
+ GetBufferFormat(decoder_->GetProfile()));
CHECK(format);
auto format_fourcc = Fourcc::FromVideoPixelFormat(*format);
CHECK(format_fourcc);
- frame_pool_->Initialize(*format_fourcc, pic_size_, visible_rect, natural_size,
- decoder_->GetRequiredNumOfPictures());
+ if (!frame_pool_->Initialize(*format_fourcc, pic_size_, visible_rect,
+ natural_size,
+ decoder_->GetRequiredNumOfPictures())) {
+ DLOG(WARNING) << "Failed Initialize()ing the frame pool.";
+ SetState(State::kError);
+ return;
+ }
// All pending decode operations will be completed before triggering a
// resolution change, so we can safely destroy the context here.
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decoder.h b/chromium/media/gpu/vaapi/vaapi_video_decoder.h
index db186f14734..d7a4d3e18c0 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decoder.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_decoder.h
@@ -102,10 +102,6 @@ class VaapiVideoDecoder : public DecoderInterface,
// resetting or destroying the decoder, or encountering an error.
void ClearDecodeTaskQueue(DecodeStatus status);
- // Output a single |video_frame| on the decoder thread.
- void OutputFrameTask(scoped_refptr<VideoFrame> video_frame,
- const gfx::Rect& visible_rect,
- base::TimeDelta timestamp);
// Release the video frame associated with the specified |surface_id| on the
// decoder thread. This is called when the last reference to the associated
// VASurface has been released, which happens when the decoder outputted the
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
index c7ae04b8be9..528c424a1a8 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
@@ -18,6 +18,7 @@
#include "base/bind_helpers.h"
#include "base/bits.h"
#include "base/callback.h"
+#include "base/callback_helpers.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
@@ -77,12 +78,6 @@ static void ReportToUMA(VAVEAEncoderFailure failure) {
// requirements.
gfx::Size GetInputFrameSize(VideoPixelFormat format,
const gfx::Size& visible_size) {
- if (format == PIXEL_FORMAT_I420) {
- // Since we don't have gfx::BufferFormat for I420, replace I420 with YV12.
- // Remove this workaround once crrev.com/c/1573718 is landed.
- format = PIXEL_FORMAT_YV12;
- }
-
std::unique_ptr<::gpu::GpuMemoryBufferFactory> gpu_memory_buffer_factory =
::gpu::GpuMemoryBufferFactory::CreateNativeType(nullptr);
// Get a VideoFrameLayout of a graphic buffer with the same gfx::BufferUsage
@@ -168,6 +163,8 @@ struct VaapiVideoEncodeAccelerator::BitstreamBufferRef {
VideoEncodeAccelerator::SupportedProfiles
VaapiVideoEncodeAccelerator::GetSupportedProfiles() {
+ if (IsConfiguredForTesting())
+ return supported_profiles_for_testing_;
return VaapiWrapper::GetSupportedEncodeProfiles();
}
@@ -341,13 +338,18 @@ bool VaapiVideoEncodeAccelerator::Initialize(const Config& config,
return false;
}
- vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec(
- VaapiWrapper::kEncode, config.output_profile,
- base::Bind(&ReportToUMA, VAAPI_ERROR));
- if (!vaapi_wrapper_) {
- VLOGF(1) << "Failed initializing VAAPI for profile "
- << GetProfileName(config.output_profile);
- return false;
+ DCHECK_EQ(IsConfiguredForTesting(), !!vaapi_wrapper_);
+ if (!IsConfiguredForTesting()) {
+ VaapiWrapper::CodecMode mode =
+ codec == kCodecVP9 ? VaapiWrapper::kEncodeConstantQuantizationParameter
+ : VaapiWrapper::kEncode;
+ vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec(
+ mode, config.output_profile, base::Bind(&ReportToUMA, VAAPI_ERROR));
+ if (!vaapi_wrapper_) {
+ VLOGF(1) << "Failed initializing VAAPI for profile "
+ << GetProfileName(config.output_profile);
+ return false;
+ }
}
// Finish remaining initialization on the encoder thread.
@@ -363,33 +365,46 @@ void VaapiVideoEncodeAccelerator::InitializeTask(const Config& config) {
VLOGF(2);
VideoCodec codec = VideoCodecProfileToVideoCodec(config.output_profile);
+ AcceleratedVideoEncoder::Config ave_config{};
+ DCHECK_EQ(IsConfiguredForTesting(), !!encoder_);
switch (codec) {
case kCodecH264:
- encoder_ = std::make_unique<H264Encoder>(
- std::make_unique<H264Accelerator>(this));
+ if (!IsConfiguredForTesting()) {
+ encoder_ = std::make_unique<H264Encoder>(
+ std::make_unique<H264Accelerator>(this));
+ }
+ DCHECK_EQ(ave_config.bitrate_control,
+ AcceleratedVideoEncoder::BitrateControl::kConstantBitrate);
break;
-
case kCodecVP8:
- encoder_ =
- std::make_unique<VP8Encoder>(std::make_unique<VP8Accelerator>(this));
+ if (!IsConfiguredForTesting()) {
+ encoder_ = std::make_unique<VP8Encoder>(
+ std::make_unique<VP8Accelerator>(this));
+ }
+ DCHECK_EQ(ave_config.bitrate_control,
+ AcceleratedVideoEncoder::BitrateControl::kConstantBitrate);
break;
-
case kCodecVP9:
- encoder_ =
- std::make_unique<VP9Encoder>(std::make_unique<VP9Accelerator>(this));
+ if (!IsConfiguredForTesting()) {
+ encoder_ = std::make_unique<VP9Encoder>(
+ std::make_unique<VP9Accelerator>(this));
+ }
+ ave_config.bitrate_control = AcceleratedVideoEncoder::BitrateControl::
+ kConstantQuantizationParameter;
break;
-
default:
NOTREACHED() << "Unsupported codec type " << GetCodecName(codec);
return;
}
- AcceleratedVideoEncoder::Config ave_config;
if (!vaapi_wrapper_->GetVAEncMaxNumOfRefFrames(
- config.output_profile, &ave_config.max_num_ref_frames))
+ config.output_profile, &ave_config.max_num_ref_frames)) {
+ NOTIFY_ERROR(kPlatformFailureError,
+ "Failed getting max number of reference frames"
+ "supported by the driver");
return;
+ }
DCHECK_GT(ave_config.max_num_ref_frames, 0u);
-
if (!encoder_->Initialize(config, ave_config)) {
NOTIFY_ERROR(kInvalidArgumentError, "Failed initializing encoder");
return;
@@ -409,13 +424,17 @@ void VaapiVideoEncodeAccelerator::InitializeTask(const Config& config) {
expected_input_coded_size_.width() <= encoder_->GetCodedSize().width() &&
expected_input_coded_size_.height() <= encoder_->GetCodedSize().height());
- // The aligned surface size must be the same as a size of a native graphic
- // buffer.
- aligned_va_surface_size_ =
- GetInputFrameSize(config.input_format, config.input_visible_size);
- if (aligned_va_surface_size_.IsEmpty()) {
- NOTIFY_ERROR(kPlatformFailureError, "Failed to get frame size");
- return;
+ DCHECK_EQ(IsConfiguredForTesting(), !aligned_va_surface_size_.IsEmpty());
+ if (!IsConfiguredForTesting()) {
+ // The aligned VA surface size must be the same as a size of a native
+ // graphics buffer. Since the VA surface's format is NV12, we specify NV12
+ // to query the size of the native graphics buffer.
+ aligned_va_surface_size_ =
+ GetInputFrameSize(PIXEL_FORMAT_NV12, config.input_visible_size);
+ if (aligned_va_surface_size_.IsEmpty()) {
+ NOTIFY_ERROR(kPlatformFailureError, "Failed to get frame size");
+ return;
+ }
}
va_surfaces_per_video_frame_ =
@@ -536,6 +555,19 @@ void VaapiVideoEncodeAccelerator::SubmitH264BitstreamBuffer(
}
}
+void VaapiVideoEncodeAccelerator::NotifyEncodedChunkSize(
+ VABufferID buffer_id,
+ VASurfaceID sync_surface_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
+ const uint64_t encoded_chunk_size =
+ vaapi_wrapper_->GetEncodedChunkSize(buffer_id, sync_surface_id);
+ if (encoded_chunk_size == 0)
+ NOTIFY_ERROR(kPlatformFailureError, "Failed getting an encoded chunksize");
+
+ DCHECK(encoder_);
+ encoder_->BitrateControlUpdate(encoded_chunk_size);
+}
+
void VaapiVideoEncodeAccelerator::TryToReturnBitstreamBuffer() {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
@@ -567,26 +599,27 @@ void VaapiVideoEncodeAccelerator::ReturnBitstreamBuffer(
std::unique_ptr<VaapiEncodeJob> encode_job,
std::unique_ptr<BitstreamBufferRef> buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
+ const VABufferID coded_buffer_id = encode_job->coded_buffer_id();
+ base::ScopedClosureRunner scoped_buffer(
+ base::BindOnce(&VaapiWrapper::DestroyVABuffer,
+ base::Unretained(vaapi_wrapper_.get()), coded_buffer_id));
uint8_t* target_data = static_cast<uint8_t*>(buffer->shm->memory());
size_t data_size = 0;
-
if (!vaapi_wrapper_->DownloadFromVABuffer(
encode_job->coded_buffer_id(), encode_job->input_surface()->id(),
target_data, buffer->shm->size(), &data_size)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
return;
}
-
DVLOGF(4) << "Returning bitstream buffer "
<< (encode_job->IsKeyframeRequested() ? "(keyframe)" : "")
<< " id: " << buffer->id << " size: " << data_size;
+ scoped_buffer.RunAndReset();
child_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&Client::BitstreamBufferReady, client_,
buffer->id, encode_job->Metadata(data_size)));
-
- vaapi_wrapper_->DestroyVABuffer(encode_job->coded_buffer_id());
}
void VaapiVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
@@ -1476,6 +1509,25 @@ bool VaapiVideoEncodeAccelerator::VP9Accelerator::SubmitFrameParameters(
pic_param.log2_tile_rows = frame_header->tile_rows_log2;
pic_param.log2_tile_columns = frame_header->tile_cols_log2;
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncSequenceParameterBufferType,
+ MakeRefCountedBytes(&seq_param, sizeof(seq_param))));
+
+ job->AddSetupCallback(
+ base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
+ base::Unretained(vea_), VAEncPictureParameterBufferType,
+ MakeRefCountedBytes(&pic_param, sizeof(pic_param))));
+
+ if (bitrate_control_ ==
+ AcceleratedVideoEncoder::BitrateControl::kConstantQuantizationParameter) {
+ job->AddPostExecuteCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::NotifyEncodedChunkSize,
+ base::Unretained(vea_), job->AsVaapiEncodeJob()->coded_buffer_id(),
+ job->AsVaapiEncodeJob()->input_surface()->id()));
+ return true;
+ }
+
VAEncMiscParameterRateControl rate_control_param = {};
rate_control_param.bits_per_second =
encode_params.bitrate_allocation.GetSumBps();
@@ -1493,16 +1545,6 @@ bool VaapiVideoEncodeAccelerator::VP9Accelerator::SubmitFrameParameters(
hrd_param.buffer_size = encode_params.cpb_size_bits;
hrd_param.initial_buffer_fullness = hrd_param.buffer_size / 2;
- job->AddSetupCallback(
- base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
- base::Unretained(vea_), VAEncSequenceParameterBufferType,
- MakeRefCountedBytes(&seq_param, sizeof(seq_param))));
-
- job->AddSetupCallback(
- base::BindOnce(&VaapiVideoEncodeAccelerator::SubmitBuffer,
- base::Unretained(vea_), VAEncPictureParameterBufferType,
- MakeRefCountedBytes(&pic_param, sizeof(pic_param))));
-
job->AddSetupCallback(base::BindOnce(
&VaapiVideoEncodeAccelerator::SubmitVAEncMiscParamBuffer,
base::Unretained(vea_), VAEncMiscParameterTypeRateControl,
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
index cdb90fd455d..ec0f1ca5860 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
@@ -35,7 +35,7 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
~VaapiVideoEncodeAccelerator() override;
// VideoEncodeAccelerator implementation.
- VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
+ SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
@@ -49,6 +49,7 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
bool IsFlushSupported() override;
private:
+ friend class VaapiVideoEncodeAcceleratorTest;
class H264Accelerator;
class VP8Accelerator;
class VP9Accelerator;
@@ -148,6 +149,15 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// Submits a H264BitstreamBuffer |buffer| to the driver.
void SubmitH264BitstreamBuffer(scoped_refptr<H264BitstreamBuffer> buffer);
+ // Gets the encoded chunk size whose id is |buffer_id| and notifies |encoder_|
+ // the size.
+ void NotifyEncodedChunkSize(VABufferID buffer_id,
+ VASurfaceID sync_surface_id);
+
+ bool IsConfiguredForTesting() const {
+ return !supported_profiles_for_testing_.empty();
+ }
+
// The unchanged values are filled upon the construction. The varied values
// (e.g. ScalingSettings) are filled properly during encoding.
VideoEncoderInfo encoder_info_;
@@ -240,6 +250,9 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// The completion callback of the Flush() function.
FlushCallback flush_callback_;
+ // Supported profiles that are filled if and only if in a unit test.
+ SupportedProfiles supported_profiles_for_testing_;
+
// WeakPtr of this, bound to |child_task_runner_|.
base::WeakPtr<VaapiVideoEncodeAccelerator> child_weak_this_;
// WeakPtr of this, bound to |encoder_task_runner_|.
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc
index 01bfbb3a6e0..896a7251dbc 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator_unittest.cc
@@ -4,22 +4,48 @@
#include "media/gpu/vaapi/vaapi_video_encode_accelerator.h"
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "base/run_loop.h"
+#include "base/test/gmock_callback_support.h"
#include "base/test/task_environment.h"
#include "media/video/video_encode_accelerator.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using base::test::RunClosure;
+using ::testing::_;
+using ::testing::Return;
+using ::testing::WithArgs;
+
namespace media {
namespace {
constexpr gfx::Size kDefaultEncodeSize(1280, 720);
constexpr uint32_t kDefaultBitrateBps = 4 * 1000 * 1000;
constexpr uint32_t kDefaultFramerate = 30;
-const VideoEncodeAccelerator::Config kDefaultVEAConfig(PIXEL_FORMAT_I420,
- kDefaultEncodeSize,
- VP8PROFILE_ANY,
- kDefaultBitrateBps,
- kDefaultFramerate);
+constexpr size_t kMaxNumOfRefFrames = 3u;
+const VideoEncodeAccelerator::Config kDefaultVideoEncodeAcceleratorConfig(
+ PIXEL_FORMAT_I420,
+ kDefaultEncodeSize,
+ VP9PROFILE_PROFILE0,
+ kDefaultBitrateBps,
+ kDefaultFramerate);
+
+MATCHER_P2(MatchesAcceleratedVideoEncoderConfig,
+ max_ref_frames,
+ bitrate_control,
+ "") {
+ return arg.max_num_ref_frames == max_ref_frames &&
+ arg.bitrate_control == bitrate_control;
+}
+
+MATCHER_P2(MatchesBitstreamBufferMetadata, payload_size_bytes, key_frame, "") {
+ return arg.payload_size_bytes == payload_size_bytes &&
+ arg.key_frame == key_frame;
+}
class MockVideoEncodeAcceleratorClient : public VideoEncodeAccelerator::Client {
public:
@@ -27,35 +53,239 @@ class MockVideoEncodeAcceleratorClient : public VideoEncodeAccelerator::Client {
virtual ~MockVideoEncodeAcceleratorClient() = default;
MOCK_METHOD3(RequireBitstreamBuffers,
- void(unsigned int, const gfx::Size&, size_t output_buffer_size));
+ void(unsigned int, const gfx::Size&, size_t));
MOCK_METHOD2(BitstreamBufferReady,
void(int32_t, const BitstreamBufferMetadata&));
MOCK_METHOD1(NotifyError, void(VideoEncodeAccelerator::Error));
- MOCK_METHOD1(NotifyEncoderInfoChange, void(const VideoEncoderInfo& info));
+ MOCK_METHOD1(NotifyEncoderInfoChange, void(const VideoEncoderInfo&));
};
-struct VaapiVEAInitializeTestParam {
- uint8_t num_of_temporal_layers = 0;
- uint8_t num_of_spatial_layers = 0;
- bool expected_result;
+class MockVaapiWrapper : public VaapiWrapper {
+ public:
+ MockVaapiWrapper(CodecMode mode) : VaapiWrapper(mode) {}
+ MOCK_METHOD2(GetVAEncMaxNumOfRefFrames, bool(VideoCodecProfile, size_t*));
+ MOCK_METHOD5(CreateContextAndSurfaces,
+ bool(unsigned int,
+ const gfx::Size&,
+ SurfaceUsageHint,
+ size_t,
+ std::vector<VASurfaceID>*));
+ MOCK_METHOD2(CreateVABuffer, bool(size_t, VABufferID*));
+ MOCK_METHOD2(GetEncodedChunkSize, uint64_t(VABufferID, VASurfaceID));
+ MOCK_METHOD5(DownloadFromVABuffer,
+ bool(VABufferID, VASurfaceID, uint8_t*, size_t, size_t*));
+ MOCK_METHOD3(UploadVideoFrameToSurface,
+ bool(const VideoFrame&, VASurfaceID, const gfx::Size&));
+ MOCK_METHOD1(ExecuteAndDestroyPendingBuffers, bool(VASurfaceID));
+ MOCK_METHOD1(DestroyVABuffer, void(VABufferID));
+ MOCK_METHOD0(DestroyContext, void());
+ MOCK_METHOD1(DestroySurfaces, void(std::vector<VASurfaceID> va_surface_ids));
+
+ private:
+ ~MockVaapiWrapper() override = default;
+};
+
+class MockAcceleratedVideoEncoder : public AcceleratedVideoEncoder {
+ public:
+ MOCK_METHOD2(Initialize,
+ bool(const VideoEncodeAccelerator::Config&,
+ const AcceleratedVideoEncoder::Config&));
+ MOCK_CONST_METHOD0(GetCodedSize, gfx::Size());
+ MOCK_CONST_METHOD0(GetBitstreamBufferSize, size_t());
+ MOCK_CONST_METHOD0(GetMaxNumOfRefFrames, size_t());
+ MOCK_METHOD1(PrepareEncodeJob, bool(EncodeJob*));
+ MOCK_METHOD1(BitrateControlUpdate, void(uint64_t));
+ bool UpdateRates(const VideoBitrateAllocation&, uint32_t) override {
+ return false;
+ }
+ ScalingSettings GetScalingSettings() const override {
+ return ScalingSettings();
+ }
};
+} // namespace
+
+struct VaapiVideoEncodeAcceleratorTestParam;
-class VaapiVEAInitializeTest
- : public ::testing::TestWithParam<VaapiVEAInitializeTestParam> {
+class VaapiVideoEncodeAcceleratorTest
+ : public ::testing::TestWithParam<VaapiVideoEncodeAcceleratorTestParam> {
protected:
- VaapiVEAInitializeTest() = default;
- ~VaapiVEAInitializeTest() override = default;
+ VaapiVideoEncodeAcceleratorTest() = default;
+ ~VaapiVideoEncodeAcceleratorTest() override = default;
+
+ void SetUp() override {
+ mock_vaapi_wrapper_ =
+ base::MakeRefCounted<MockVaapiWrapper>(VaapiWrapper::kEncode);
+ encoder_.reset(new VaapiVideoEncodeAccelerator);
+ auto* vaapi_encoder =
+ reinterpret_cast<VaapiVideoEncodeAccelerator*>(encoder_.get());
+ vaapi_encoder->vaapi_wrapper_ = mock_vaapi_wrapper_;
+ vaapi_encoder->encoder_ = std::make_unique<MockAcceleratedVideoEncoder>();
+ mock_encoder_ = reinterpret_cast<MockAcceleratedVideoEncoder*>(
+ vaapi_encoder->encoder_.get());
+ }
+
+ void SetDefaultMocksBehavior(const VideoEncodeAccelerator::Config& config) {
+ ASSERT_TRUE(mock_vaapi_wrapper_);
+ ASSERT_TRUE(mock_encoder_);
+
+ ON_CALL(*mock_vaapi_wrapper_, GetVAEncMaxNumOfRefFrames)
+ .WillByDefault(WithArgs<1>([](size_t* max_ref_frames) {
+ *max_ref_frames = kMaxNumOfRefFrames;
+ return true;
+ }));
+
+ ON_CALL(*mock_encoder_, GetBitstreamBufferSize)
+ .WillByDefault(Return(config.input_visible_size.GetArea()));
+ ON_CALL(*mock_encoder_, GetCodedSize())
+ .WillByDefault(Return(config.input_visible_size));
+ ON_CALL(*mock_encoder_, GetMaxNumOfRefFrames())
+ .WillByDefault(Return(kMaxNumOfRefFrames));
+ }
+
+ bool InitializeVideoEncodeAccelerator(
+ const VideoEncodeAccelerator::Config& config) {
+ VideoEncodeAccelerator::SupportedProfile profile(config.output_profile,
+ config.input_visible_size);
+ auto* vaapi_encoder =
+ reinterpret_cast<VaapiVideoEncodeAccelerator*>(encoder_.get());
+ vaapi_encoder->supported_profiles_for_testing_.push_back(profile);
+ vaapi_encoder->aligned_va_surface_size_ = config.input_visible_size;
+ if (config.input_visible_size.IsEmpty())
+ return false;
+ return encoder_->Initialize(config, &client_);
+ }
+
+ void InitializeSequenceForVP9(const VideoEncodeAccelerator::Config& config) {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ ::testing::InSequence s;
+ constexpr auto kBitrateControl =
+ AcceleratedVideoEncoder::BitrateControl::kConstantQuantizationParameter;
+ EXPECT_CALL(*mock_encoder_,
+ Initialize(_, MatchesAcceleratedVideoEncoderConfig(
+ kMaxNumOfRefFrames, kBitrateControl)))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*mock_vaapi_wrapper_,
+ CreateContextAndSurfaces(
+ _, kDefaultEncodeSize,
+ VaapiWrapper::SurfaceUsageHint::kVideoEncoder, _, _))
+ .WillOnce(WithArgs<3, 4>(
+ [&surfaces = this->va_surfaces_](
+ size_t num_surfaces, std::vector<VASurfaceID>* va_surface_ids) {
+ surfaces.resize(num_surfaces);
+ std::iota(surfaces.begin(), surfaces.end(), 0);
+ *va_surface_ids = surfaces;
+ return true;
+ }));
+ EXPECT_CALL(client_, RequireBitstreamBuffers(_, kDefaultEncodeSize, _))
+ .WillOnce(WithArgs<2>([this, &quit_closure](size_t output_buffer_size) {
+ this->output_buffer_size_ = output_buffer_size;
+ quit_closure.Run();
+ }));
+ ASSERT_TRUE(InitializeVideoEncodeAccelerator(config));
+ run_loop.Run();
+ }
+
+ void EncodeSequenceForVP9() {
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+ ::testing::InSequence s;
+
+ constexpr VABufferID kCodedBufferId = 123;
+ EXPECT_CALL(*mock_vaapi_wrapper_, CreateVABuffer(output_buffer_size_, _))
+ .WillOnce(WithArgs<1>([](VABufferID* va_buffer_id) {
+ *va_buffer_id = kCodedBufferId;
+ return true;
+ }));
+
+ ASSERT_FALSE(va_surfaces_.empty());
+ const VASurfaceID kInputSurfaceId = va_surfaces_.back();
+ EXPECT_CALL(*mock_encoder_, PrepareEncodeJob(_))
+ .WillOnce(WithArgs<0>(
+ [encoder = encoder_.get(), kCodedBufferId,
+ kInputSurfaceId](AcceleratedVideoEncoder::EncodeJob* job) {
+ job->AddPostExecuteCallback(base::BindOnce(
+ &VaapiVideoEncodeAccelerator::NotifyEncodedChunkSize,
+ base::Unretained(
+ reinterpret_cast<VaapiVideoEncodeAccelerator*>(encoder)),
+ kCodedBufferId, kInputSurfaceId));
+ return true;
+ }));
+ EXPECT_CALL(
+ *mock_vaapi_wrapper_,
+ UploadVideoFrameToSurface(_, kInputSurfaceId, kDefaultEncodeSize))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*mock_vaapi_wrapper_,
+ ExecuteAndDestroyPendingBuffers(kInputSurfaceId))
+ .WillOnce(Return(true));
+
+ constexpr uint64_t kEncodedChunkSize = 1234;
+ ASSERT_LE(kEncodedChunkSize, output_buffer_size_);
+ EXPECT_CALL(*mock_vaapi_wrapper_,
+ GetEncodedChunkSize(kCodedBufferId, kInputSurfaceId))
+ .WillOnce(Return(kEncodedChunkSize));
+ EXPECT_CALL(*mock_encoder_, BitrateControlUpdate(kEncodedChunkSize))
+ .WillOnce(Return());
+ EXPECT_CALL(*mock_vaapi_wrapper_,
+ DownloadFromVABuffer(kCodedBufferId, kInputSurfaceId, _,
+ output_buffer_size_, _))
+ .WillOnce(WithArgs<4>([](size_t* coded_data_size) {
+ *coded_data_size = kEncodedChunkSize;
+ return true;
+ }));
+ EXPECT_CALL(*mock_vaapi_wrapper_, DestroyVABuffer(kCodedBufferId))
+ .WillOnce(Return());
+
+ constexpr int32_t kBitstreamId = 12;
+ EXPECT_CALL(client_, BitstreamBufferReady(kBitstreamId,
+ MatchesBitstreamBufferMetadata(
+ kEncodedChunkSize, false)))
+ .WillOnce(RunClosure(quit_closure));
+
+ auto region = base::UnsafeSharedMemoryRegion::Create(output_buffer_size_);
+ ASSERT_TRUE(region.IsValid());
+ encoder_->UseOutputBitstreamBuffer(
+ BitstreamBuffer(kBitstreamId, std::move(region), output_buffer_size_));
+
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_I420, kDefaultEncodeSize,
+ gfx::Rect(kDefaultEncodeSize),
+ kDefaultEncodeSize, base::TimeDelta());
+ ASSERT_TRUE(frame);
+ encoder_->Encode(std::move(frame), false /* force_keyframe */);
+ run_loop.Run();
+ }
+
+ size_t output_buffer_size_ = 0;
+ std::vector<VASurfaceID> va_surfaces_;
base::test::TaskEnvironment task_environment_;
+ MockVideoEncodeAcceleratorClient client_;
+ std::unique_ptr<VideoEncodeAccelerator> encoder_;
+ scoped_refptr<MockVaapiWrapper> mock_vaapi_wrapper_;
+ MockAcceleratedVideoEncoder* mock_encoder_ = nullptr;
};
-TEST_P(VaapiVEAInitializeTest, SpatialLayerAndTemporalLayerEncoding) {
- VideoEncodeAccelerator::Config config = kDefaultVEAConfig;
- const uint8_t num_of_temporal_layers = GetParam().num_of_temporal_layers;
+struct VaapiVideoEncodeAcceleratorTestParam {
+ uint8_t num_of_spatial_layers = 0;
+ uint8_t num_of_temporal_layers = 0;
+} kTestCases[]{
+ {1u, 1u}, // Single spatial layer, single temporal layer.
+ {1u, 3u}, // Single spatial layer, multiple temporal layers.
+ {3u, 1u}, // Multiple spatial layers, single temporal layer.
+ {3u, 3u}, // Multiple spatial layers, multiple temporal layers.
+};
+
+TEST_P(VaapiVideoEncodeAcceleratorTest,
+ InitializeVP9WithMultipleSpatialLayers) {
const uint8_t num_of_spatial_layers = GetParam().num_of_spatial_layers;
+ if (num_of_spatial_layers <= 1)
+ GTEST_SKIP() << "Test only meant for multiple spatial layers configuration";
+
+ VideoEncodeAccelerator::Config config = kDefaultVideoEncodeAcceleratorConfig;
+ const uint8_t num_of_temporal_layers = GetParam().num_of_temporal_layers;
constexpr int kDenom[] = {4, 2, 1};
for (uint8_t i = 0; i < num_of_spatial_layers; ++i) {
VideoEncodeAccelerator::Config::SpatialLayer spatial_layer;
- int denom = kDenom[i];
+ const int denom = kDenom[i];
spatial_layer.width = kDefaultEncodeSize.width() / denom;
spatial_layer.height = kDefaultEncodeSize.height() / denom;
spatial_layer.bitrate_bps = kDefaultBitrateBps / denom;
@@ -65,18 +295,29 @@ TEST_P(VaapiVEAInitializeTest, SpatialLayerAndTemporalLayerEncoding) {
config.spatial_layers.push_back(spatial_layer);
}
- VaapiVideoEncodeAccelerator vea;
- MockVideoEncodeAcceleratorClient client;
- EXPECT_EQ(vea.Initialize(config, &client), GetParam().expected_result);
+ EXPECT_FALSE(InitializeVideoEncodeAccelerator(config));
}
-constexpr VaapiVEAInitializeTestParam kTestCases[] = {
- {1u, 3u, false}, // Spatial Layer only.
- {3u, 3u, false}, // Temporal + Spatial Layer.
-};
+TEST_P(VaapiVideoEncodeAcceleratorTest, EncodeVP9WithSingleSpatialLayer) {
+ if (GetParam().num_of_spatial_layers > 1u)
+ GTEST_SKIP() << "Test only meant for single spatial layer";
+
+ VideoEncodeAccelerator::Config config = kDefaultVideoEncodeAcceleratorConfig;
+ VideoEncodeAccelerator::Config::SpatialLayer spatial_layer;
+ spatial_layer.width = kDefaultEncodeSize.width();
+ spatial_layer.height = kDefaultEncodeSize.height();
+ spatial_layer.bitrate_bps = kDefaultBitrateBps;
+ spatial_layer.framerate = kDefaultFramerate;
+ spatial_layer.max_qp = 30;
+ spatial_layer.num_of_temporal_layers = GetParam().num_of_temporal_layers;
+ config.spatial_layers.push_back(spatial_layer);
+ SetDefaultMocksBehavior(config);
-INSTANTIATE_TEST_SUITE_P(SpatialLayerAndTemporalLayerEncoding,
- VaapiVEAInitializeTest,
+ InitializeSequenceForVP9(config);
+ EncodeSequenceForVP9();
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+ VaapiVideoEncodeAcceleratorTest,
::testing::ValuesIn(kTestCases));
-} // namespace
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.cc b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
index f238e6f0851..ad898555fe7 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.cc
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
@@ -10,6 +10,7 @@
#include <va/va.h>
#include <va/va_drm.h>
#include <va/va_drmcommon.h>
+#include <va/va_str.h>
#include <va/va_version.h>
#include <algorithm>
@@ -115,8 +116,10 @@ uint32_t BufferFormatToVAFourCC(gfx::BufferFormat fmt) {
return VA_FOURCC_YV12;
case gfx::BufferFormat::YUV_420_BIPLANAR:
return VA_FOURCC_NV12;
+ case gfx::BufferFormat::P010:
+ return VA_FOURCC_P010;
default:
- NOTREACHED();
+ NOTREACHED() << gfx::BufferFormatToString(fmt);
return 0;
}
}
@@ -142,6 +145,21 @@ namespace media {
namespace {
+// Returns true if the SoC has a Gen9 GPU. CPU model ID's are referenced from
+// the following file in the kernel source: arch/x86/include/asm/intel-family.h.
+bool IsGen9Gpu() {
+ constexpr int kPentiumAndLaterFamily = 0x06;
+ constexpr int kSkyLakeModelId = 0x5E;
+ constexpr int kSkyLake_LModelId = 0x4E;
+ constexpr int kApolloLakeModelId = 0x5c;
+ static base::NoDestructor<base::CPU> cpuid;
+ static const bool is_gen9_gpu = cpuid->family() == kPentiumAndLaterFamily &&
+ (cpuid->model() == kSkyLakeModelId ||
+ cpuid->model() == kSkyLake_LModelId ||
+ cpuid->model() == kApolloLakeModelId);
+ return is_gen9_gpu;
+}
+
// Returns true if the SoC has a 9.5 GPU. CPU model IDs are referenced from the
// following file in the kernel source: arch/x86/include/asm/intel-family.h.
bool IsGen95Gpu() {
@@ -246,111 +264,18 @@ static const struct {
{H264PROFILE_HIGH, VAProfileH264High},
{VP8PROFILE_ANY, VAProfileVP8Version0_3},
{VP9PROFILE_PROFILE0, VAProfileVP9Profile0},
- // VP9 hw encode/decode on profile 1 is not enabled on chromium-vaapi.
+ // Chrome does not support VP9 Profile 1, see b/153680337.
// {VP9PROFILE_PROFILE1, VAProfileVP9Profile1},
- // TODO(crbug.com/1011454, crbug.com/1011469): Reenable VP9PROFILE_PROFILE2
- // and _PROFILE3 when P010 is completely supported.
- //{VP9PROFILE_PROFILE2, VAProfileVP9Profile2},
- //{VP9PROFILE_PROFILE3, VAProfileVP9Profile3},
+ {VP9PROFILE_PROFILE2, VAProfileVP9Profile2},
+ {VP9PROFILE_PROFILE3, VAProfileVP9Profile3},
};
-// Converts the given |va_profile| to the corresponding string.
-// See: http://go/gh/intel/libva/blob/master/va/va.h#L359
-std::string VAProfileToString(VAProfile va_profile) {
- switch (va_profile) {
- case VAProfileNone:
- return "VAProfileNone";
- case VAProfileMPEG2Simple:
- return "VAProfileMPEG2Simple";
- case VAProfileMPEG2Main:
- return "VAProfileMPEG2Main";
- case VAProfileMPEG4Simple:
- return "VAProfileMPEG4Simple";
- case VAProfileMPEG4AdvancedSimple:
- return "VAProfileMPEG4AdvancedSimple";
- case VAProfileMPEG4Main:
- return "VAProfileMPEG4Main";
- case VAProfileH264Baseline:
- return "VAProfileH264Baseline";
- case VAProfileH264Main:
- return "VAProfileH264Main";
- case VAProfileH264High:
- return "VAProfileH264High";
- case VAProfileVC1Simple:
- return "VAProfileVC1Simple";
- case VAProfileVC1Main:
- return "VAProfileVC1Main";
- case VAProfileVC1Advanced:
- return "VAProfileVC1Advanced";
- case VAProfileH263Baseline:
- return "VAProfileH263Baseline";
- case VAProfileJPEGBaseline:
- return "VAProfileJPEGBaseline";
- case VAProfileH264ConstrainedBaseline:
- return "VAProfileH264ConstrainedBaseline";
- case VAProfileVP8Version0_3:
- return "VAProfileVP8Version0_3";
- case VAProfileH264MultiviewHigh:
- return "VAProfileH264MultiviewHigh";
- case VAProfileH264StereoHigh:
- return "VAProfileH264StereoHigh";
- case VAProfileHEVCMain:
- return "VAProfileHEVCMain";
- case VAProfileHEVCMain10:
- return "VAProfileHEVCMain10";
- case VAProfileVP9Profile0:
- return "VAProfileVP9Profile0";
- case VAProfileVP9Profile1:
- return "VAProfileVP9Profile1";
- case VAProfileVP9Profile2:
- return "VAProfileVP9Profile2";
- case VAProfileVP9Profile3:
- return "VAProfileVP9Profile3";
-#if VA_MAJOR_VERSION >= 2 || (VA_MAJOR_VERSION == 1 && VA_MINOR_VERSION >= 2)
- case VAProfileHEVCMain12:
- return "VAProfileHEVCMain12";
- case VAProfileHEVCMain422_10:
- return "VAProfileHEVCMain422_10";
- case VAProfileHEVCMain422_12:
- return "VAProfileHEVCMain422_12";
- case VAProfileHEVCMain444:
- return "VAProfileHEVCMain444";
- case VAProfileHEVCMain444_10:
- return "VAProfileHEVCMain444_10";
- case VAProfileHEVCMain444_12:
- return "VAProfileHEVCMain444_12";
- case VAProfileHEVCSccMain:
- return "VAProfileHEVCSccMain";
- case VAProfileHEVCSccMain10:
- return "VAProfileHEVCSccMain10";
- case VAProfileHEVCSccMain444:
- return "VAProfileHEVCSccMain444";
-#endif
- default:
- NOTREACHED();
- return "";
- }
-}
-
bool IsBlackListedDriver(const std::string& va_vendor_string,
VaapiWrapper::CodecMode mode,
VAProfile va_profile) {
if (!IsModeEncoding(mode))
return false;
- // TODO(crbug.com/828482): Remove once H264 encoder on AMD is enabled by
- // default.
- if (VendorStringToImplementationType(va_vendor_string) ==
- VAImplementation::kMesaGallium &&
- base::Contains(va_vendor_string, "AMD STONEY") &&
- !base::FeatureList::IsEnabled(kVaapiH264AMDEncoder)) {
- constexpr VAProfile kH264Profiles[] = {VAProfileH264Baseline,
- VAProfileH264Main, VAProfileH264High,
- VAProfileH264ConstrainedBaseline};
- if (base::Contains(kH264Profiles, va_profile))
- return true;
- }
-
// TODO(posciak): Remove once VP8 encoding is to be enabled by default.
if (va_profile == VAProfileVP8Version0_3 &&
!base::FeatureList::IsEnabled(kVaapiVP8Encoder)) {
@@ -639,8 +564,8 @@ static bool GetRequiredAttribs(const base::Lock* va_lock,
VAStatus va_res =
vaGetConfigAttributes(va_display, profile, entrypoint, &attrib, 1);
if (va_res != VA_STATUS_SUCCESS) {
- LOG(ERROR) << "GetConfigAttributes failed for va_profile "
- << VAProfileToString(profile);
+ LOG(ERROR) << "vaGetConfigAttributes failed for "
+ << vaProfileStr(profile);
return false;
}
@@ -761,7 +686,7 @@ VASupportedProfiles::VASupportedProfiles()
static_assert(std::extent<decltype(supported_profiles_)>() ==
VaapiWrapper::kCodecModeMax,
- "The array size of supported profile is incorrect.");
+ "|supported_profiles_| size is incorrect.");
if (!display_state->Initialize())
return;
@@ -793,6 +718,18 @@ VASupportedProfiles::GetSupportedProfileInfosForCodecModeInternal(
VaapiWrapper::CodecMode mode) const {
std::vector<ProfileInfo> supported_profile_infos;
std::vector<VAProfile> va_profiles;
+ // VAProfiles supported by VaapiWrapper.
+ constexpr VAProfile kSupportedVaProfiles[] = {
+ VAProfileH264ConstrainedBaseline,
+ VAProfileH264Main,
+ VAProfileH264High,
+ VAProfileJPEGBaseline,
+ VAProfileVP8Version0_3,
+ VAProfileVP9Profile0,
+ // Chrome does not support VP9 Profile 1, see b/153680337.
+ // VAProfileVP9Profile1,
+ VAProfileVP9Profile2,
+ VAProfileVP9Profile3};
if (!GetSupportedVAProfiles(&va_profiles))
return supported_profile_infos;
@@ -802,6 +739,10 @@ VASupportedProfiles::GetSupportedProfileInfosForCodecModeInternal(
VADisplayState::Get()->va_vendor_string();
for (const auto& va_profile : va_profiles) {
+ if ((mode != VaapiWrapper::CodecMode::kVideoProcess) &&
+ !base::Contains(kSupportedVaProfiles, va_profile)) {
+ continue;
+ }
const std::vector<VAEntrypoint> supported_entrypoints =
GetEntryPointsForProfile(va_lock_, va_display_, mode, va_profile);
if (supported_entrypoints.empty())
@@ -824,8 +765,8 @@ VASupportedProfiles::GetSupportedProfileInfosForCodecModeInternal(
if (!FillProfileInfo_Locked(va_profile, entrypoint, required_attribs,
&profile_info)) {
LOG(ERROR) << "FillProfileInfo_Locked failed for va_profile "
- << VAProfileToString(va_profile) << " and entrypoint "
- << entrypoint;
+ << vaProfileStr(va_profile) << " and entrypoint "
+ << vaEntrypointStr(entrypoint);
continue;
}
supported_profile_infos.push_back(profile_info);
@@ -1195,6 +1136,37 @@ bool VASupportedImageFormats::InitSupportedImageFormats_Locked() {
return true;
}
+bool IsLowPowerEncSupported(VAProfile va_profile) {
+ constexpr VAProfile kSupportedLowPowerEncodeProfiles[] = {
+ VAProfileH264ConstrainedBaseline,
+ VAProfileH264Main,
+ VAProfileH264High,
+ VAProfileVP9Profile0,
+ VAProfileVP9Profile1,
+ VAProfileVP9Profile2,
+ VAProfileVP9Profile3};
+ if (!base::Contains(kSupportedLowPowerEncodeProfiles, va_profile))
+ return false;
+
+ if ((IsGen95Gpu() || IsGen9Gpu()) &&
+ !base::FeatureList::IsEnabled(kVaapiLowPowerEncoderGen9x)) {
+ return false;
+ }
+
+ const std::vector<VASupportedProfiles::ProfileInfo>& encode_profile_infos =
+ VASupportedProfiles::Get().GetSupportedProfileInfosForCodecMode(
+ VaapiWrapper::kEncode);
+
+ for (const auto& profile_info : encode_profile_infos) {
+ if (profile_info.va_profile == va_profile &&
+ profile_info.va_entrypoint == VAEntrypointEncSliceLP) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
} // namespace
NativePixmapAndSizeInfo::NativePixmapAndSizeInfo() = default;
@@ -1212,7 +1184,7 @@ scoped_refptr<VaapiWrapper> VaapiWrapper::Create(
VAProfile va_profile,
const base::Closure& report_error_to_uma_cb) {
if (!VASupportedProfiles::Get().IsProfileSupported(mode, va_profile)) {
- DVLOG(1) << "Unsupported va_profile: " << va_profile;
+ DVLOG(1) << "Unsupported va_profile: " << vaProfileStr(va_profile);
return nullptr;
}
@@ -1222,7 +1194,7 @@ scoped_refptr<VaapiWrapper> VaapiWrapper::Create(
return vaapi_wrapper;
}
LOG(ERROR) << "Failed to create VaapiWrapper for va_profile: "
- << VAProfileToString(va_profile);
+ << vaProfileStr(va_profile);
return nullptr;
}
@@ -1494,8 +1466,10 @@ VAEntrypoint VaapiWrapper::GetDefaultVaEntryPoint(CodecMode mode,
case VaapiWrapper::kEncodeConstantQuantizationParameter:
if (profile == VAProfileJPEGBaseline)
return VAEntrypointEncPicture;
- else
- return VAEntrypointEncSlice;
+ DCHECK(IsModeEncoding(mode));
+ if (IsLowPowerEncSupported(profile))
+ return VAEntrypointEncSliceLP;
+ return VAEntrypointEncSlice;
case VaapiWrapper::kVideoProcess:
return VAEntrypointVideoProc;
case VaapiWrapper::kCodecModeMax:
@@ -1515,8 +1489,10 @@ uint32_t VaapiWrapper::BufferFormatToVARTFormat(gfx::BufferFormat fmt) {
case gfx::BufferFormat::YVU_420:
case gfx::BufferFormat::YUV_420_BIPLANAR:
return VA_RT_FORMAT_YUV420;
+ case gfx::BufferFormat::P010:
+ return VA_RT_FORMAT_YUV420_10BPP;
default:
- NOTREACHED();
+ NOTREACHED() << gfx::BufferFormatToString(fmt);
return 0;
}
}
@@ -2028,6 +2004,28 @@ bool VaapiWrapper::CreateVABuffer(size_t size, VABufferID* buffer_id) {
return true;
}
+uint64_t VaapiWrapper::GetEncodedChunkSize(VABufferID buffer_id,
+ VASurfaceID sync_surface_id) {
+ TRACE_EVENT0("media,gpu", "VaapiWrapper::GetEncodedChunkSize");
+ base::AutoLock auto_lock(*va_lock_);
+ TRACE_EVENT0("media,gpu", "VaapiWrapper::GetEncodedChunkSizeLocked");
+ VAStatus va_res = vaSyncSurface(va_display_, sync_surface_id);
+ VA_SUCCESS_OR_RETURN(va_res, "vaSyncSurface", 0u);
+
+ ScopedVABufferMapping mapping(va_lock_, va_display_, buffer_id);
+ if (!mapping.IsValid())
+ return 0u;
+
+ uint64_t coded_data_size = 0;
+ for (auto* buffer_segment =
+ reinterpret_cast<VACodedBufferSegment*>(mapping.data());
+ buffer_segment; buffer_segment = reinterpret_cast<VACodedBufferSegment*>(
+ buffer_segment->next)) {
+ coded_data_size += buffer_segment->size;
+ }
+ return coded_data_size;
+}
+
bool VaapiWrapper::DownloadFromVABuffer(VABufferID buffer_id,
VASurfaceID sync_surface_id,
uint8_t* target_ptr,
@@ -2062,13 +2060,11 @@ bool VaapiWrapper::DownloadFromVABuffer(VABufferID buffer_id,
<< ", the buffer segment size: " << buffer_segment->size;
break;
}
-
memcpy(target_ptr, buffer_segment->buf, buffer_segment->size);
target_ptr += buffer_segment->size;
- *coded_data_size += buffer_segment->size;
target_size -= buffer_segment->size;
-
+ *coded_data_size += buffer_segment->size;
buffer_segment =
reinterpret_cast<VACodedBufferSegment*>(buffer_segment->next);
}
@@ -2113,10 +2109,28 @@ void VaapiWrapper::DestroyVABuffers() {
va_buffers_.clear();
}
+bool VaapiWrapper::IsRotationSupported() {
+ base::AutoLock auto_lock(*va_lock_);
+ VAProcPipelineCaps pipeline_caps;
+ memset(&pipeline_caps, 0, sizeof(pipeline_caps));
+ VAStatus va_res = vaQueryVideoProcPipelineCaps(va_display_, va_context_id_,
+ nullptr, 0, &pipeline_caps);
+ if (va_res != VA_STATUS_SUCCESS) {
+ LOG_VA_ERROR_AND_REPORT(va_res, "vaQueryVideoProcPipelineCaps failed");
+ return false;
+ }
+ if (!pipeline_caps.rotation_flags) {
+ DVLOG(2) << "VA-API driver doesn't support any rotation";
+ return false;
+ }
+ return true;
+}
+
bool VaapiWrapper::BlitSurface(const VASurface& va_surface_src,
const VASurface& va_surface_dest,
base::Optional<gfx::Rect> src_rect,
- base::Optional<gfx::Rect> dest_rect) {
+ base::Optional<gfx::Rect> dest_rect,
+ VideoRotation rotation) {
base::AutoLock auto_lock(*va_lock_);
if (va_buffers_.empty()) {
@@ -2165,6 +2179,21 @@ bool VaapiWrapper::BlitSurface(const VASurface& va_surface_src,
pipeline_param->output_color_standard = VAProcColorStandardNone;
pipeline_param->filter_flags = VA_FILTER_SCALING_DEFAULT;
+ switch (rotation) {
+ case VIDEO_ROTATION_0:
+ pipeline_param->rotation_state = VA_ROTATION_NONE;
+ break;
+ case VIDEO_ROTATION_90:
+ pipeline_param->rotation_state = VA_ROTATION_90;
+ break;
+ case VIDEO_ROTATION_180:
+ pipeline_param->rotation_state = VA_ROTATION_180;
+ break;
+ case VIDEO_ROTATION_270:
+ pipeline_param->rotation_state = VA_ROTATION_270;
+ break;
+ }
+
VA_SUCCESS_OR_RETURN(mapping.Unmap(), "Vpp Buffer unmapping", false);
}
@@ -2241,15 +2270,7 @@ bool VaapiWrapper::Initialize(CodecMode mode, VAProfile va_profile) {
}
#endif // DCHECK_IS_ON()
- if (mode != kVideoProcess)
- TryToSetVADisplayAttributeToLocalGPU();
-
- VAEntrypoint entrypoint = GetDefaultVaEntryPoint(mode, va_profile);
- if (IsModeEncoding(mode) && IsLowPowerEncSupported(va_profile, mode) &&
- base::FeatureList::IsEnabled(kVaapiLowPowerEncoder)) {
- entrypoint = VAEntrypointEncSliceLP;
- DVLOG(2) << "Enable VA-API Low-Power Encode Entrypoint";
- }
+ const VAEntrypoint entrypoint = GetDefaultVaEntryPoint(mode, va_profile);
base::AutoLock auto_lock(*va_lock_);
std::vector<VAConfigAttrib> required_attribs;
@@ -2258,7 +2279,7 @@ bool VaapiWrapper::Initialize(CodecMode mode, VAProfile va_profile) {
return false;
}
- VAStatus va_res =
+ const VAStatus va_res =
vaCreateConfig(va_display_, va_profile, entrypoint,
required_attribs.empty() ? nullptr : &required_attribs[0],
required_attribs.size(), &va_config_id_);
@@ -2456,54 +2477,4 @@ bool VaapiWrapper::Execute_Locked(VASurfaceID va_surface_id) {
return true;
}
-void VaapiWrapper::TryToSetVADisplayAttributeToLocalGPU() {
- base::AutoLock auto_lock(*va_lock_);
- VADisplayAttribute item = {VADisplayAttribRenderMode,
- 1, // At least support '_LOCAL_OVERLAY'.
- -1, // The maximum possible support 'ALL'.
- VA_RENDER_MODE_LOCAL_GPU,
- VA_DISPLAY_ATTRIB_SETTABLE};
-
- VAStatus va_res = vaSetDisplayAttributes(va_display_, &item, 1);
- if (va_res != VA_STATUS_SUCCESS)
- DVLOG(2) << "vaSetDisplayAttributes unsupported, ignoring by default.";
-}
-
-// Check the support for low-power encode
-bool VaapiWrapper::IsLowPowerEncSupported(VAProfile va_profile,
- CodecMode mode) const {
- // Enabled only for H264/AVC & VP9 Encoders
- if (va_profile != VAProfileH264ConstrainedBaseline &&
- va_profile != VAProfileH264Main && va_profile != VAProfileH264High &&
- va_profile != VAProfileVP9Profile0 && va_profile != VAProfileVP9Profile1)
- return false;
-
- constexpr VAEntrypoint kLowPowerEncEntryPoint = VAEntrypointEncSliceLP;
- std::vector<VAConfigAttrib> required_attribs;
-
- base::AutoLock auto_lock(*va_lock_);
- GetRequiredAttribs(va_lock_, va_display_, mode, va_profile,
- kLowPowerEncEntryPoint, &required_attribs);
- // Query the driver for required attributes.
- std::vector<VAConfigAttrib> attribs = required_attribs;
- for (size_t i = 0; i < required_attribs.size(); ++i)
- attribs[i].value = 0;
-
- VAStatus va_res =
- vaGetConfigAttributes(va_display_, va_profile, kLowPowerEncEntryPoint,
- &attribs[0], attribs.size());
- VA_SUCCESS_OR_RETURN(va_res, "vaGetConfigAttributes", false);
-
- for (size_t i = 0; i < required_attribs.size(); ++i) {
- if (attribs[i].type != required_attribs[i].type ||
- (attribs[i].value & required_attribs[i].value) !=
- required_attribs[i].value) {
- DVLOG(1) << "Unsupported value " << required_attribs[i].value
- << " for attribute type " << required_attribs[i].type;
- return false;
- }
- }
- return true;
-}
-
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.h b/chromium/media/gpu/vaapi/vaapi_wrapper.h
index 7f087039c58..c4d005ba456 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.h
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.h
@@ -322,9 +322,9 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// Useful when a pending job is to be cancelled (on reset or error).
void DestroyPendingBuffers();
- // Execute job in hardware on target |va_surface_id| and destroy pending
- // buffers. Return false if Execute() fails.
- bool ExecuteAndDestroyPendingBuffers(VASurfaceID va_surface_id);
+ // Executes job in hardware on target |va_surface_id| and destroys pending
+ // buffers. Returns false if Execute() fails.
+ virtual bool ExecuteAndDestroyPendingBuffers(VASurfaceID va_surface_id);
#if defined(USE_X11)
// Put data from |va_surface_id| into |x_pixmap| of size
@@ -343,28 +343,37 @@ class MEDIA_GPU_EXPORT VaapiWrapper
VAImageFormat* format,
const gfx::Size& size);
- // Upload contents of |frame| into |va_surface_id| for encode.
- bool UploadVideoFrameToSurface(const VideoFrame& frame,
- VASurfaceID va_surface_id,
- const gfx::Size& va_surface_size);
+ // Uploads contents of |frame| into |va_surface_id| for encode.
+ virtual bool UploadVideoFrameToSurface(const VideoFrame& frame,
+ VASurfaceID va_surface_id,
+ const gfx::Size& va_surface_size);
- // Create a buffer of |size| bytes to be used as encode output.
- bool CreateVABuffer(size_t size, VABufferID* buffer_id);
+ // Creates a buffer of |size| bytes to be used as encode output.
+ virtual bool CreateVABuffer(size_t size, VABufferID* buffer_id);
- // Download the contents of the buffer with given |buffer_id| into a buffer of
- // size |target_size|, pointed to by |target_ptr|. The number of bytes
+ // Gets the encoded frame linear size of the buffer with given |buffer_id|.
+ // |sync_surface_id| will be used as a sync point, i.e. it will have to become
+ // idle before starting the acquirement. |sync_surface_id| should be the
+ // source surface passed to the encode job. Returns 0 if it fails for any
+ // reason.
+ virtual uint64_t GetEncodedChunkSize(VABufferID buffer_id,
+ VASurfaceID sync_surface_id);
+
+ // Downloads the contents of the buffer with given |buffer_id| into a buffer
+ // of size |target_size|, pointed to by |target_ptr|. The number of bytes
// downloaded will be returned in |coded_data_size|. |sync_surface_id| will
// be used as a sync point, i.e. it will have to become idle before starting
// the download. |sync_surface_id| should be the source surface passed
- // to the encode job.
- bool DownloadFromVABuffer(VABufferID buffer_id,
- VASurfaceID sync_surface_id,
- uint8_t* target_ptr,
- size_t target_size,
- size_t* coded_data_size);
+ // to the encode job. Returns false if it fails for any reason. For example,
+ // the linear size of the resulted encoded frame is larger than |target_size|.
+ virtual bool DownloadFromVABuffer(VABufferID buffer_id,
+ VASurfaceID sync_surface_id,
+ uint8_t* target_ptr,
+ size_t target_size,
+ size_t* coded_data_size);
// Deletes the VA buffer identified by |buffer_id|.
- void DestroyVABuffer(VABufferID buffer_id);
+ virtual void DestroyVABuffer(VABufferID buffer_id);
// Destroy all previously-allocated (and not yet destroyed) buffers.
void DestroyVABuffers();
@@ -374,23 +383,27 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// For H.264 encoding, the value represents the maximum number of reference
// frames for both the reference picture list 0 (bottom 16 bits) and the
// reference picture list 1 (top 16 bits).
- bool GetVAEncMaxNumOfRefFrames(VideoCodecProfile profile,
- size_t* max_ref_frames);
+ virtual bool GetVAEncMaxNumOfRefFrames(VideoCodecProfile profile,
+ size_t* max_ref_frames);
+
+ // Checks if the driver supports frame rotation.
+ bool IsRotationSupported();
// Blits a VASurface |va_surface_src| into another VASurface
- // |va_surface_dest| applying pixel format conversion, cropping and scaling
- // if needed. |src_rect| and |dest_rect| are optional. They can be used to
- // specify the area used in the blit.
+ // |va_surface_dest| applying pixel format conversion, rotation, cropping
+ // and scaling if needed. |src_rect| and |dest_rect| are optional. They can
+ // be used to specify the area used in the blit.
bool BlitSurface(const VASurface& va_surface_src,
const VASurface& va_surface_dest,
base::Optional<gfx::Rect> src_rect = base::nullopt,
- base::Optional<gfx::Rect> dest_rect = base::nullopt);
+ base::Optional<gfx::Rect> dest_rect = base::nullopt,
+ VideoRotation rotation = VIDEO_ROTATION_0);
// Initialize static data before sandbox is enabled.
static void PreSandboxInitialization();
// vaDestroySurfaces() a vector or a single VASurfaceID.
- void DestroySurfaces(std::vector<VASurfaceID> va_surfaces);
+ virtual void DestroySurfaces(std::vector<VASurfaceID> va_surfaces);
virtual void DestroySurface(VASurfaceID va_surface_id);
protected:
@@ -425,12 +438,6 @@ class MEDIA_GPU_EXPORT VaapiWrapper
void DestroyPendingBuffers_Locked() EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
- // Attempt to set render mode to "render to texture.". Failure is non-fatal.
- void TryToSetVADisplayAttributeToLocalGPU();
-
- // Check low-power encode support for |profile| and |mode|.
- bool IsLowPowerEncSupported(VAProfile va_profile, CodecMode mode) const;
-
const CodecMode mode_;
// Pointer to VADisplayState's member |va_lock_|. Guaranteed to be valid for
diff --git a/chromium/media/gpu/vaapi/vp9_encoder.cc b/chromium/media/gpu/vaapi/vp9_encoder.cc
index 140ac37af4c..0c125f02b36 100644
--- a/chromium/media/gpu/vaapi/vp9_encoder.cc
+++ b/chromium/media/gpu/vaapi/vp9_encoder.cc
@@ -4,8 +4,12 @@
#include "media/gpu/vaapi/vp9_encoder.h"
+#include <algorithm>
+
#include "base/bits.h"
#include "media/gpu/macros.h"
+#include "media/gpu/vaapi/vp9_rate_control.h"
+#include "third_party/libvpx/source/libvpx/vp9/ratectrl_rtc.h"
namespace media {
@@ -19,6 +23,9 @@ constexpr int kCPBWindowSizeMs = 500;
// Quantization parameter. They are vp9 ac/dc indices and their ranges are
// 0-255. Based on WebRTC's defaults.
constexpr int kMinQP = 4;
+// TODO(crbug.com/1060775): Relax this max quantization parameter upper bound
+// so that our encoder and bitrate controller can select a higher value in the
+// case a requested bitrate is small.
constexpr int kMaxQP = 112;
// This stands for 31 as a real ac value (see rfc 8.6.1 table
// ac_qlookup[3][256]). Note: This needs to be revisited once we have 10&12 bit
@@ -29,6 +36,84 @@ constexpr int kDefaultQP = 24;
// we set a constant value (== 10) which is what other VA-API
// implementations like libyami and gstreamer-vaapi are using.
constexpr uint8_t kDefaultLfLevel = 10;
+
+// Convert Qindex, whose range is 0-255, to the quantizer parameter used in
+// libvpx vp9 rate control, whose range is 0-63.
+// Cited from //third_party/libvpx/source/libvpx/vp9/encoder/vp9_quantize.cc.
+int QindexToQuantizer(int q_index) {
+ constexpr int kQuantizerToQindex[] = {
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48,
+ 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100,
+ 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152,
+ 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204,
+ 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
+ };
+
+ for (size_t q = 0; q < base::size(kQuantizerToQindex); ++q) {
+ if (kQuantizerToQindex[q] >= q_index)
+ return q;
+ }
+ return base::size(kQuantizerToQindex) - 1;
+}
+
+// The return value is expressed as a percentage of the average. For example,
+// to allocate no more than 4.5 frames worth of bitrate to a keyframe, the
+// return value is 450.
+uint32_t MaxSizeOfKeyframeAsPercentage(uint32_t optimal_buffer_size,
+ uint32_t max_framerate) {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scale_par.
+ // Max target size = scale_par * optimal_buffer_size * targetBR[Kbps].
+ // This value is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / framerate.
+ // The target in % is as follows:
+ const double target_size_byte_per_frame = optimal_buffer_size * 0.5;
+ const uint32_t target_size_kbyte =
+ target_size_byte_per_frame * max_framerate / 1000;
+ const uint32_t target_size_kbyte_as_percent = target_size_kbyte * 100;
+
+ // Don't go below 3 times the per frame bandwidth.
+ constexpr uint32_t kMinIntraSizePercentage = 300u;
+ return std::max(kMinIntraSizePercentage, target_size_kbyte_as_percent);
+}
+
+libvpx::VP9RateControlRtcConfig CreateRCConfig(
+ const gfx::Size& encode_size,
+ const VP9Encoder::EncodeParams& encode_params) {
+ libvpx::VP9RateControlRtcConfig rc_cfg{};
+ rc_cfg.width = encode_size.width();
+ rc_cfg.height = encode_size.height();
+ rc_cfg.max_quantizer =
+ QindexToQuantizer(encode_params.scaling_settings.max_qp);
+ rc_cfg.min_quantizer =
+ QindexToQuantizer(encode_params.scaling_settings.min_qp);
+ // libvpx::VP9RateControlRtcConfig is kbps.
+ rc_cfg.target_bandwidth =
+ encode_params.bitrate_allocation.GetSumBps() / 1000.0;
+ // These default values come from
+ // //third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc.
+ rc_cfg.buf_initial_sz = 500;
+ rc_cfg.buf_optimal_sz = 600;
+ rc_cfg.buf_sz = 1000;
+ rc_cfg.undershoot_pct = 50;
+ rc_cfg.overshoot_pct = 50;
+ rc_cfg.max_intra_bitrate_pct = MaxSizeOfKeyframeAsPercentage(
+ rc_cfg.buf_optimal_sz, encode_params.framerate);
+ rc_cfg.framerate = encode_params.framerate;
+
+ // Spatial layer variables.
+ rc_cfg.ss_number_layers = 1;
+ rc_cfg.max_quantizers[0] = rc_cfg.max_quantizer;
+ rc_cfg.min_quantizers[0] = rc_cfg.min_quantizer;
+ // TODO(crbug.com/1030199): Fill multiple temporal layers variables.
+ // Temporal layer variables.
+ rc_cfg.ts_number_layers = 1;
+ rc_cfg.scaling_factor_num[0] = 1;
+ rc_cfg.scaling_factor_den[0] = 1;
+ rc_cfg.layer_target_bitrate[0] = rc_cfg.target_bandwidth;
+ rc_cfg.ts_rate_decimator[0] = 1;
+ return rc_cfg;
+}
} // namespace
VP9Encoder::EncodeParams::EncodeParams()
@@ -40,6 +125,11 @@ VP9Encoder::EncodeParams::EncodeParams()
scaling_settings(kMinQP, kMaxQP),
error_resilient_mode(false) {}
+void VP9Encoder::set_rate_ctrl_for_testing(
+ std::unique_ptr<VP9RateControl> rate_ctrl) {
+ rate_ctrl_ = std::move(rate_ctrl);
+}
+
void VP9Encoder::Reset() {
current_params_ = EncodeParams();
reference_frames_.Clear();
@@ -66,20 +156,27 @@ bool VP9Encoder::Initialize(const VideoEncodeAccelerator::Config& config,
DVLOGF(1) << "Input visible size could not be empty";
return false;
}
- // 4:2:0 format has to be 2-aligned.
- if ((config.input_visible_size.width() % 2 != 0) ||
- (config.input_visible_size.height() % 2 != 0)) {
- DVLOGF(1) << "The pixel sizes are not even: "
- << config.input_visible_size.ToString();
- return false;
- }
+ accelerator_->set_bitrate_control(ave_config.bitrate_control);
visible_size_ = config.input_visible_size;
coded_size_ = gfx::Size(base::bits::Align(visible_size_.width(), 16),
base::bits::Align(visible_size_.height(), 16));
-
Reset();
+ if (ave_config.bitrate_control ==
+ BitrateControl::kConstantQuantizationParameter) {
+ // |rate_ctrl_| might be injected for tests.
+ if (!rate_ctrl_) {
+ rate_ctrl_ = VP9RateControl::Create(
+ CreateRCConfig(visible_size_, current_params_));
+ }
+ if (!rate_ctrl_)
+ return false;
+ } else {
+ DCHECK(!rate_ctrl_) << "|rate_ctrl_| should only be configured when in "
+ "kConstantQuantizationParameter";
+ }
+
VideoBitrateAllocation initial_bitrate_allocation;
initial_bitrate_allocation.SetBitrate(0, 0, config.initial_bitrate);
return UpdateRates(initial_bitrate_allocation,
@@ -121,13 +218,14 @@ bool VP9Encoder::PrepareEncodeJob(EncodeJob* encode_job) {
scoped_refptr<VP9Picture> picture = accelerator_->GetPicture(encode_job);
DCHECK(picture);
- UpdateFrameHeader(encode_job->IsKeyframeRequested());
+ const bool keyframe = encode_job->IsKeyframeRequested();
+ UpdateFrameHeader(keyframe);
*picture->frame_hdr = current_frame_hdr_;
// Use last, golden and altref for references.
- constexpr std::array<bool, kVp9NumRefsPerFrame> ref_frames_used = {true, true,
- true};
+ const std::array<bool, kVp9NumRefsPerFrame> ref_frames_used = {
+ !keyframe, !keyframe, !keyframe};
if (!accelerator_->SubmitFrameParameters(encode_job, current_params_, picture,
reference_frames_,
ref_frames_used)) {
@@ -139,6 +237,18 @@ bool VP9Encoder::PrepareEncodeJob(EncodeJob* encode_job) {
return true;
}
+void VP9Encoder::BitrateControlUpdate(uint64_t encoded_chunk_size_bytes) {
+ if (accelerator_->bitrate_control() !=
+ BitrateControl::kConstantQuantizationParameter ||
+ !rate_ctrl_) {
+ DLOG(ERROR) << __func__ << "() is called when no bitrate controller exists";
+ return;
+ }
+
+ DVLOGF(4) << "|encoded_chunk_size_bytes|=" << encoded_chunk_size_bytes;
+ rate_ctrl_->PostEncodeUpdate(encoded_chunk_size_bytes);
+}
+
bool VP9Encoder::UpdateRates(const VideoBitrateAllocation& bitrate_allocation,
uint32_t framerate) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -160,6 +270,10 @@ bool VP9Encoder::UpdateRates(const VideoBitrateAllocation& bitrate_allocation,
current_params_.bitrate_allocation.GetSumBps() *
current_params_.cpb_window_size_ms / 1000;
+ if (!rate_ctrl_)
+ return true;
+
+ rate_ctrl_->UpdateRateControl(CreateRCConfig(visible_size_, current_params_));
return true;
}
@@ -181,8 +295,6 @@ void VP9Encoder::UpdateFrameHeader(bool keyframe) {
current_frame_hdr_.refresh_frame_flags = 0xff;
ref_frame_index_ = 0;
} else {
- // TODO(crbug.com/811912): Add temporal layer support when there is a driver
- // support. Use the last three frames for reference.
current_frame_hdr_.frame_type = Vp9FrameHeader::INTERFRAME;
current_frame_hdr_.ref_frame_idx[0] = ref_frame_index_;
current_frame_hdr_.ref_frame_idx[1] =
@@ -192,6 +304,19 @@ void VP9Encoder::UpdateFrameHeader(bool keyframe) {
ref_frame_index_ = (ref_frame_index_ + 1) % kVp9NumRefFrames;
current_frame_hdr_.refresh_frame_flags = 1 << ref_frame_index_;
}
+
+ if (!rate_ctrl_)
+ return;
+
+ libvpx::VP9FrameParamsQpRTC frame_params{};
+ frame_params.frame_type =
+ keyframe ? FRAME_TYPE::KEY_FRAME : FRAME_TYPE::INTER_FRAME;
+ rate_ctrl_->ComputeQP(frame_params);
+ // TODO(crbug.com/1030199): Fill temporal layer id.
+ current_frame_hdr_.quant_params.base_q_idx = rate_ctrl_->GetQP();
+ current_frame_hdr_.loop_filter.level = rate_ctrl_->GetLoopfilterLevel();
+ DVLOGF(4) << "|qp|=" << rate_ctrl_->GetQP()
+ << ", |filter_level|=" << rate_ctrl_->GetLoopfilterLevel();
}
void VP9Encoder::UpdateReferenceFrames(scoped_refptr<VP9Picture> picture) {
diff --git a/chromium/media/gpu/vaapi/vp9_encoder.h b/chromium/media/gpu/vaapi/vp9_encoder.h
index 2f3eda4b440..9c0ad1cb9f3 100644
--- a/chromium/media/gpu/vaapi/vp9_encoder.h
+++ b/chromium/media/gpu/vaapi/vp9_encoder.h
@@ -19,6 +19,7 @@
#include "media/gpu/vp9_reference_frame_vector.h"
namespace media {
+class VP9RateControl;
class VP9Encoder : public AcceleratedVideoEncoder {
public:
@@ -71,6 +72,12 @@ class VP9Encoder : public AcceleratedVideoEncoder {
const Vp9ReferenceFrameVector& ref_frames,
const std::array<bool, kVp9NumRefsPerFrame>& ref_frames_used) = 0;
+ void set_bitrate_control(BitrateControl bc) { bitrate_control_ = bc; }
+ BitrateControl bitrate_control() { return bitrate_control_; }
+
+ protected:
+ BitrateControl bitrate_control_ = BitrateControl::kConstantBitrate;
+
DISALLOW_COPY_AND_ASSIGN(Accelerator);
};
@@ -86,8 +93,13 @@ class VP9Encoder : public AcceleratedVideoEncoder {
size_t GetMaxNumOfRefFrames() const override;
ScalingSettings GetScalingSettings() const override;
bool PrepareEncodeJob(EncodeJob* encode_job) override;
+ void BitrateControlUpdate(uint64_t encoded_chunk_size_bytes) override;
private:
+ friend class VP9EncoderTest;
+
+ void set_rate_ctrl_for_testing(std::unique_ptr<VP9RateControl> rate_ctrl);
+
void InitializeFrameHeader();
void UpdateFrameHeader(bool keyframe);
void UpdateReferenceFrames(scoped_refptr<VP9Picture> picture);
@@ -105,6 +117,7 @@ class VP9Encoder : public AcceleratedVideoEncoder {
Vp9FrameHeader current_frame_hdr_;
Vp9ReferenceFrameVector reference_frames_;
+ std::unique_ptr<VP9RateControl> rate_ctrl_;
const std::unique_ptr<Accelerator> accelerator_;
SEQUENCE_CHECKER(sequence_checker_);
diff --git a/chromium/media/gpu/vaapi/vp9_encoder_unittest.cc b/chromium/media/gpu/vaapi/vp9_encoder_unittest.cc
new file mode 100644
index 00000000000..fa0f8b53d3c
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vp9_encoder_unittest.cc
@@ -0,0 +1,381 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/vaapi/vp9_encoder.h"
+
+#include <memory>
+#include <numeric>
+
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/optional.h"
+#include "media/filters/vp9_parser.h"
+#include "media/gpu/vaapi/vp9_rate_control.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/libvpx/source/libvpx/vp9/common/vp9_blockd.h"
+#include "third_party/libvpx/source/libvpx/vp9/ratectrl_rtc.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace media {
+namespace {
+
+constexpr size_t kDefaultMaxNumRefFrames = kVp9NumRefsPerFrame;
+
+AcceleratedVideoEncoder::Config kDefaultAcceleratedVideoEncoderConfig{
+ kDefaultMaxNumRefFrames,
+ AcceleratedVideoEncoder::BitrateControl::kConstantBitrate};
+
+VideoEncodeAccelerator::Config kDefaultVideoEncodeAcceleratorConfig(
+ PIXEL_FORMAT_I420,
+ gfx::Size(1280, 720),
+ VP9PROFILE_PROFILE0,
+ 14000000 /* = maximum bitrate in bits per second for level 3.1 */,
+ VideoEncodeAccelerator::kDefaultFramerate,
+ base::nullopt /* gop_length */,
+ base::nullopt /* h264 output level*/,
+ VideoEncodeAccelerator::Config::StorageType::kShmem);
+
+const std::vector<bool> kRefFramesUsedForKeyFrame = {false, false, false};
+const std::vector<bool> kRefFramesUsedForInterFrame = {true, true, true};
+
+MATCHER_P4(MatchRtcConfigWithRates,
+ size,
+ bitrate_allocation,
+ framerate,
+ num_temporal_layers,
+ "") {
+ if (arg.target_bandwidth !=
+ static_cast<int64_t>(bitrate_allocation.GetSumBps() / 1000.0)) {
+ return false;
+ }
+
+ if (arg.framerate != static_cast<double>(framerate))
+ return false;
+
+ for (size_t i = 0; i < num_temporal_layers; i++) {
+ if (arg.layer_target_bitrate[i] !=
+ static_cast<int>(bitrate_allocation.GetBitrateBps(0, i) / 1000.0)) {
+ return false;
+ }
+ if (arg.ts_rate_decimator[i] != (1 << i))
+ return false;
+ }
+
+ return arg.width == size.width() && arg.height == size.height() &&
+ base::checked_cast<size_t>(arg.ts_number_layers) ==
+ num_temporal_layers &&
+ arg.ss_number_layers == 1 && arg.scaling_factor_num[0] == 1 &&
+ arg.scaling_factor_den[0] == 1;
+}
+
+MATCHER_P2(MatchFrameParam, frame_type, temporal_idx, "") {
+ return arg.frame_type == frame_type &&
+ (!temporal_idx || arg.temporal_layer_id == *temporal_idx);
+}
+
+class MockVP9Accelerator : public VP9Encoder::Accelerator {
+ public:
+ MockVP9Accelerator() = default;
+ ~MockVP9Accelerator() override = default;
+ MOCK_METHOD1(GetPicture,
+ scoped_refptr<VP9Picture>(AcceleratedVideoEncoder::EncodeJob*));
+
+ MOCK_METHOD5(SubmitFrameParameters,
+ bool(AcceleratedVideoEncoder::EncodeJob*,
+ const VP9Encoder::EncodeParams&,
+ scoped_refptr<VP9Picture>,
+ const Vp9ReferenceFrameVector&,
+ const std::array<bool, kVp9NumRefsPerFrame>&));
+};
+
+class MockVP9RateControl : public VP9RateControl {
+ public:
+ MockVP9RateControl() = default;
+ ~MockVP9RateControl() override = default;
+
+ MOCK_METHOD1(UpdateRateControl, void(const libvpx::VP9RateControlRtcConfig&));
+ MOCK_CONST_METHOD0(GetQP, int());
+ MOCK_CONST_METHOD0(GetLoopfilterLevel, int());
+ MOCK_METHOD1(ComputeQP, void(const libvpx::VP9FrameParamsQpRTC&));
+ MOCK_METHOD1(PostEncodeUpdate, void(uint64_t));
+};
+} // namespace
+
+struct VP9EncoderTestParam;
+
+class VP9EncoderTest : public ::testing::TestWithParam<VP9EncoderTestParam> {
+ public:
+ using BitrateControl = AcceleratedVideoEncoder::BitrateControl;
+
+ VP9EncoderTest() = default;
+ ~VP9EncoderTest() override = default;
+
+ void SetUp() override;
+
+ protected:
+ using FrameType = Vp9FrameHeader::FrameType;
+
+ void InitializeVP9Encoder(BitrateControl bitrate_control);
+ void EncodeSequence(FrameType frame_type);
+ void EncodeConstantQuantizationParameterSequence(
+ FrameType frame_type,
+ const std::vector<bool>& expected_ref_frames_used,
+ base::Optional<uint8_t> expected_temporal_idx = base::nullopt);
+ void UpdateRatesTest(BitrateControl bitrate_control,
+ size_t num_temporal_layers);
+
+ private:
+ std::unique_ptr<AcceleratedVideoEncoder::EncodeJob> CreateEncodeJob(
+ bool keyframe);
+ void UpdateRatesSequence(const VideoBitrateAllocation& bitrate_allocation,
+ uint32_t framerate,
+ BitrateControl bitrate_control);
+
+ std::unique_ptr<VP9Encoder> encoder_;
+ MockVP9Accelerator* mock_accelerator_ = nullptr;
+ MockVP9RateControl* mock_rate_ctrl_ = nullptr;
+};
+
+void VP9EncoderTest::SetUp() {
+ auto mock_accelerator = std::make_unique<MockVP9Accelerator>();
+ mock_accelerator_ = mock_accelerator.get();
+ auto rate_ctrl = std::make_unique<MockVP9RateControl>();
+ mock_rate_ctrl_ = rate_ctrl.get();
+
+ encoder_ = std::make_unique<VP9Encoder>(std::move(mock_accelerator));
+ encoder_->set_rate_ctrl_for_testing(std::move(rate_ctrl));
+}
+
+std::unique_ptr<AcceleratedVideoEncoder::EncodeJob>
+VP9EncoderTest::CreateEncodeJob(bool keyframe) {
+ auto input_frame = VideoFrame::CreateFrame(
+ kDefaultVideoEncodeAcceleratorConfig.input_format,
+ kDefaultVideoEncodeAcceleratorConfig.input_visible_size,
+ gfx::Rect(kDefaultVideoEncodeAcceleratorConfig.input_visible_size),
+ kDefaultVideoEncodeAcceleratorConfig.input_visible_size,
+ base::TimeDelta());
+ LOG_ASSERT(input_frame) << " Failed to create VideoFrame";
+ return std::make_unique<AcceleratedVideoEncoder::EncodeJob>(
+ input_frame, keyframe, base::DoNothing());
+}
+
+void VP9EncoderTest::InitializeVP9Encoder(BitrateControl bitrate_control) {
+ auto ave_config = kDefaultAcceleratedVideoEncoderConfig;
+ ave_config.bitrate_control = bitrate_control;
+ if (bitrate_control == BitrateControl::kConstantQuantizationParameter) {
+ constexpr size_t kNumTemporalLayers = 1u;
+ VideoBitrateAllocation initial_bitrate_allocation;
+ initial_bitrate_allocation.SetBitrate(
+ 0, 0, kDefaultVideoEncodeAcceleratorConfig.initial_bitrate);
+
+ EXPECT_CALL(
+ *mock_rate_ctrl_,
+ UpdateRateControl(MatchRtcConfigWithRates(
+ kDefaultVideoEncodeAcceleratorConfig.input_visible_size,
+ initial_bitrate_allocation,
+ VideoEncodeAccelerator::kDefaultFramerate, kNumTemporalLayers)))
+ .Times(1)
+ .WillOnce(Return());
+ }
+ EXPECT_TRUE(
+ encoder_->Initialize(kDefaultVideoEncodeAcceleratorConfig, ave_config));
+}
+
+void VP9EncoderTest::EncodeSequence(FrameType frame_type) {
+ InSequence seq;
+ const bool keyframe = frame_type == FrameType::KEYFRAME;
+ auto encode_job = CreateEncodeJob(keyframe);
+ scoped_refptr<VP9Picture> picture(new VP9Picture);
+ EXPECT_CALL(*mock_accelerator_, GetPicture(encode_job.get()))
+ .WillOnce(Invoke(
+ [picture](AcceleratedVideoEncoder::EncodeJob*) { return picture; }));
+ const auto& expected_ref_frames_used =
+ keyframe ? kRefFramesUsedForKeyFrame : kRefFramesUsedForInterFrame;
+ EXPECT_CALL(*mock_accelerator_,
+ SubmitFrameParameters(
+ encode_job.get(), _, _, _,
+ ::testing::ElementsAreArray(expected_ref_frames_used)))
+ .WillOnce(Return(true));
+ EXPECT_TRUE(encoder_->PrepareEncodeJob(encode_job.get()));
+ // TODO(hiroh): Test for encoder_->reference_frames_.
+}
+
+void VP9EncoderTest::EncodeConstantQuantizationParameterSequence(
+ FrameType frame_type,
+ const std::vector<bool>& expected_ref_frames_used,
+ base::Optional<uint8_t> expected_temporal_idx) {
+ const bool keyframe = frame_type == FrameType::KEYFRAME;
+ InSequence seq;
+ auto encode_job = CreateEncodeJob(keyframe);
+ scoped_refptr<VP9Picture> picture(new VP9Picture);
+ EXPECT_CALL(*mock_accelerator_, GetPicture(encode_job.get()))
+ .WillOnce(Invoke(
+ [picture](AcceleratedVideoEncoder::EncodeJob*) { return picture; }));
+
+ FRAME_TYPE libvpx_frame_type =
+ keyframe ? FRAME_TYPE::KEY_FRAME : FRAME_TYPE::INTER_FRAME;
+ EXPECT_CALL(*mock_rate_ctrl_, ComputeQP(MatchFrameParam(
+ libvpx_frame_type, expected_temporal_idx)))
+ .WillOnce(Return());
+ constexpr int kDefaultQP = 34;
+ constexpr int kDefaultLoopFilterLevel = 8;
+ EXPECT_CALL(*mock_rate_ctrl_, GetQP()).WillOnce(Return(kDefaultQP));
+ EXPECT_CALL(*mock_rate_ctrl_, GetLoopfilterLevel())
+ .WillOnce(Return(kDefaultLoopFilterLevel));
+ if (!expected_ref_frames_used.empty()) {
+ EXPECT_CALL(*mock_accelerator_,
+ SubmitFrameParameters(
+ encode_job.get(), _, _, _,
+ ::testing::ElementsAreArray(expected_ref_frames_used)))
+ .WillOnce(Return(true));
+ } else {
+ EXPECT_CALL(*mock_accelerator_,
+ SubmitFrameParameters(encode_job.get(), _, _, _, _))
+ .WillOnce(Return(true));
+ }
+ EXPECT_TRUE(encoder_->PrepareEncodeJob(encode_job.get()));
+
+ // TODO(hiroh): Test for encoder_->reference_frames_.
+
+ constexpr size_t kDefaultEncodedFrameSize = 123456;
+ // For BitrateControlUpdate sequence.
+ EXPECT_CALL(*mock_rate_ctrl_, PostEncodeUpdate(kDefaultEncodedFrameSize))
+ .WillOnce(Return());
+ encoder_->BitrateControlUpdate(kDefaultEncodedFrameSize);
+}
+
+void VP9EncoderTest::UpdateRatesSequence(
+ const VideoBitrateAllocation& bitrate_allocation,
+ uint32_t framerate,
+ BitrateControl bitrate_control) {
+ ASSERT_TRUE(encoder_->current_params_.bitrate_allocation !=
+ bitrate_allocation ||
+ encoder_->current_params_.framerate != framerate);
+
+ if (bitrate_control == BitrateControl::kConstantQuantizationParameter) {
+ constexpr size_t kNumTemporalLayers = 1u;
+ EXPECT_CALL(*mock_rate_ctrl_,
+ UpdateRateControl(MatchRtcConfigWithRates(
+ encoder_->visible_size_, bitrate_allocation, framerate,
+ kNumTemporalLayers)))
+ .Times(1)
+ .WillOnce(Return());
+ }
+
+ EXPECT_TRUE(encoder_->UpdateRates(bitrate_allocation, framerate));
+ EXPECT_EQ(encoder_->current_params_.bitrate_allocation, bitrate_allocation);
+ EXPECT_EQ(encoder_->current_params_.framerate, framerate);
+}
+
+void VP9EncoderTest::UpdateRatesTest(BitrateControl bitrate_control,
+ size_t num_temporal_layers) {
+ ASSERT_LE(num_temporal_layers, 3u);
+ auto create_allocation =
+ [num_temporal_layers](uint32_t bitrate) -> VideoBitrateAllocation {
+ VideoBitrateAllocation bitrate_allocation;
+ constexpr int kTemporalLayerBitrateScaleFactor[] = {1, 2, 4};
+ const int kScaleFactors =
+ std::accumulate(std::cbegin(kTemporalLayerBitrateScaleFactor),
+ std::cend(kTemporalLayerBitrateScaleFactor), 0);
+ for (size_t ti = 0; ti < num_temporal_layers; ti++) {
+ bitrate_allocation.SetBitrate(
+ 0, ti,
+ bitrate * kTemporalLayerBitrateScaleFactor[ti] / kScaleFactors);
+ }
+ return bitrate_allocation;
+ };
+
+ const auto update_rates_and_encode =
+ [this, bitrate_control](FrameType frame_type,
+ const VideoBitrateAllocation& bitrate_allocation,
+ uint32_t framerate) {
+ UpdateRatesSequence(bitrate_allocation, framerate, bitrate_control);
+ if (bitrate_control == BitrateControl::kConstantQuantizationParameter) {
+ EncodeConstantQuantizationParameterSequence(frame_type, {},
+ base::nullopt);
+ } else {
+ EncodeSequence(frame_type);
+ }
+ };
+
+ const uint32_t kBitrate =
+ kDefaultVideoEncodeAcceleratorConfig.initial_bitrate;
+ const uint32_t kFramerate =
+ *kDefaultVideoEncodeAcceleratorConfig.initial_framerate;
+ // Call UpdateRates before Encode.
+ update_rates_and_encode(FrameType::KEYFRAME, create_allocation(kBitrate / 2),
+ kFramerate);
+ // Bitrate change only.
+ update_rates_and_encode(FrameType::INTERFRAME, create_allocation(kBitrate),
+ kFramerate);
+ // Framerate change only.
+ update_rates_and_encode(FrameType::INTERFRAME, create_allocation(kBitrate),
+ kFramerate + 2);
+ // Bitrate + Frame changes.
+ update_rates_and_encode(FrameType::INTERFRAME,
+ create_allocation(kBitrate * 3 / 4), kFramerate - 5);
+}
+
+struct VP9EncoderTestParam {
+ VP9EncoderTest::BitrateControl bitrate_control;
+} kTestCasesForVP9EncoderTest[] = {
+ {VP9EncoderTest::BitrateControl::kConstantBitrate},
+ {VP9EncoderTest::BitrateControl::kConstantQuantizationParameter},
+};
+
+TEST_P(VP9EncoderTest, Initialize) {
+ InitializeVP9Encoder(GetParam().bitrate_control);
+}
+
+TEST_P(VP9EncoderTest, Encode) {
+ const auto& bitrate_control = GetParam().bitrate_control;
+ InitializeVP9Encoder(bitrate_control);
+ if (bitrate_control == BitrateControl::kConstantBitrate) {
+ EncodeSequence(FrameType::KEYFRAME);
+ EncodeSequence(FrameType::INTERFRAME);
+ } else {
+ EncodeConstantQuantizationParameterSequence(FrameType::KEYFRAME,
+ kRefFramesUsedForKeyFrame);
+ EncodeConstantQuantizationParameterSequence(FrameType::INTERFRAME,
+ kRefFramesUsedForInterFrame);
+ }
+}
+
+TEST_P(VP9EncoderTest, UpdateRates) {
+ const auto& bitrate_control = GetParam().bitrate_control;
+ InitializeVP9Encoder(bitrate_control);
+ constexpr size_t kNumTemporalLayers = 1u;
+ UpdateRatesTest(bitrate_control, kNumTemporalLayers);
+}
+
+TEST_P(VP9EncoderTest, ForceKeyFrame) {
+ const auto& bitrate_control = GetParam().bitrate_control;
+ InitializeVP9Encoder(GetParam().bitrate_control);
+ if (bitrate_control == BitrateControl::kConstantBitrate) {
+ EncodeSequence(FrameType::KEYFRAME);
+ EncodeSequence(FrameType::INTERFRAME);
+ EncodeSequence(FrameType::KEYFRAME);
+ EncodeSequence(FrameType::INTERFRAME);
+ } else {
+ EncodeConstantQuantizationParameterSequence(FrameType::KEYFRAME,
+ kRefFramesUsedForKeyFrame);
+ EncodeConstantQuantizationParameterSequence(FrameType::INTERFRAME,
+ kRefFramesUsedForInterFrame);
+ EncodeConstantQuantizationParameterSequence(FrameType::KEYFRAME,
+ kRefFramesUsedForKeyFrame);
+ EncodeConstantQuantizationParameterSequence(FrameType::INTERFRAME,
+ kRefFramesUsedForInterFrame);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+ VP9EncoderTest,
+ ::testing::ValuesIn(kTestCasesForVP9EncoderTest));
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/vp9_rate_control.cc b/chromium/media/gpu/vaapi/vp9_rate_control.cc
new file mode 100644
index 00000000000..f4d6beb6129
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vp9_rate_control.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/vaapi/vp9_rate_control.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "third_party/libvpx/source/libvpx/vp9/ratectrl_rtc.h"
+
+namespace media {
+namespace {
+class LibvpxVP9RateControl : public VP9RateControl {
+ public:
+ explicit LibvpxVP9RateControl(std::unique_ptr<libvpx::VP9RateControlRTC> impl)
+ : impl_(std::move(impl)) {}
+
+ ~LibvpxVP9RateControl() override = default;
+ LibvpxVP9RateControl(const LibvpxVP9RateControl&) = delete;
+ LibvpxVP9RateControl& operator=(const LibvpxVP9RateControl&) = delete;
+
+ void UpdateRateControl(
+ const libvpx::VP9RateControlRtcConfig& rate_control_config) override {
+ impl_->UpdateRateControl(rate_control_config);
+ }
+ int GetQP() const override { return impl_->GetQP(); }
+ int GetLoopfilterLevel() const override {
+ return impl_->GetLoopfilterLevel();
+ }
+ void ComputeQP(const libvpx::VP9FrameParamsQpRTC& frame_params) override {
+ impl_->ComputeQP(frame_params);
+ }
+ void PostEncodeUpdate(uint64_t encoded_frame_size) override {
+ impl_->PostEncodeUpdate(encoded_frame_size);
+ }
+
+ private:
+ const std::unique_ptr<libvpx::VP9RateControlRTC> impl_;
+};
+
+} // namespace
+
+// static
+std::unique_ptr<VP9RateControl> VP9RateControl::Create(
+ const libvpx::VP9RateControlRtcConfig& config) {
+ auto impl = libvpx::VP9RateControlRTC::Create(config);
+ if (!impl) {
+ DLOG(ERROR) << "Failed creating libvpx::VP9RateControlRTC";
+ return nullptr;
+ }
+ return std::make_unique<LibvpxVP9RateControl>(std::move(impl));
+}
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/vp9_rate_control.h b/chromium/media/gpu/vaapi/vp9_rate_control.h
new file mode 100644
index 00000000000..116f47f5895
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vp9_rate_control.h
@@ -0,0 +1,38 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef MEDIA_GPU_VAAPI_VP9_RATE_CONTROL_H_
+#define MEDIA_GPU_VAAPI_VP9_RATE_CONTROL_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/optional.h"
+
+namespace libvpx {
+struct VP9FrameParamsQpRTC;
+struct VP9RateControlRtcConfig;
+} // namespace libvpx
+
+namespace media {
+// VP9RateControl is an interface to compute proper quantization
+// parameter and loop filter level for vp9.
+class VP9RateControl {
+ public:
+ // Creates VP9RateControl using libvpx implementation.
+ static std::unique_ptr<VP9RateControl> Create(
+ const libvpx::VP9RateControlRtcConfig& config);
+
+ virtual ~VP9RateControl() = default;
+
+ virtual void UpdateRateControl(
+ const libvpx::VP9RateControlRtcConfig& rate_control_config) = 0;
+ // libvpx::VP9FrameParamsQpRTC takes 0-63 quantization parameter.
+ virtual void ComputeQP(const libvpx::VP9FrameParamsQpRTC& frame_params) = 0;
+ // GetQP() returns vp9 ac/dc table index. The range is 0-255.
+ virtual int GetQP() const = 0;
+ virtual int GetLoopfilterLevel() const = 0;
+ virtual void PostEncodeUpdate(uint64_t encoded_frame_size) = 0;
+};
+} // namespace media
+#endif // MEDIA_GPU_VAAPI_VP9_RATE_CONTROL_H_
diff --git a/chromium/media/gpu/video_encode_accelerator_perf_tests.cc b/chromium/media/gpu/video_encode_accelerator_perf_tests.cc
index 24eb0e26467..f86d3824973 100644
--- a/chromium/media/gpu/video_encode_accelerator_perf_tests.cc
+++ b/chromium/media/gpu/video_encode_accelerator_perf_tests.cc
@@ -29,6 +29,7 @@ namespace {
// TODO(dstaessens): Add video_encoder_perf_test_usage.md
constexpr const char* usage_msg =
"usage: video_encode_accelerator_perf_tests\n"
+ " [--codec=<codec>]\n"
" [-v=<level>] [--vmodule=<config>] [--output_folder]\n"
" [--gtest_help] [--help]\n"
" [<video path>] [<video metadata path>]\n";
@@ -42,6 +43,8 @@ constexpr const char* help_msg =
"containing the video's metadata. By default <video path>.json will be\n"
"used.\n"
"\nThe following arguments are supported:\n"
+ " --codec codec profile to encode, \"h264 (baseline)\",\n"
+ " \"h264main, \"h264high\", \"vp8\" and \"vp9\"\n"
" -v enable verbose mode, e.g. -v=2.\n"
" --vmodule enable verbose mode for the specified module,\n"
" --output_folder overwrite the output folder used to store\n"
@@ -115,11 +118,8 @@ class PerformanceEvaluator : public BitstreamProcessor {
// Create a new performance evaluator.
PerformanceEvaluator() {}
- // Interface BitstreamProcessor
- void ProcessBitstreamBuffer(
- int32_t bitstream_buffer_id,
- const BitstreamBufferMetadata& metadata,
- const base::UnsafeSharedMemoryRegion* shm) override;
+ void ProcessBitstream(scoped_refptr<BitstreamRef> bitstream,
+ size_t frame_index) override;
bool WaitUntilDone() override { return true; }
// Start/Stop collecting performance metrics.
@@ -141,10 +141,9 @@ class PerformanceEvaluator : public BitstreamProcessor {
PerformanceMetrics perf_metrics_;
};
-void PerformanceEvaluator::ProcessBitstreamBuffer(
- int32_t bitstream_buffer_id,
- const BitstreamBufferMetadata& metadata,
- const base::UnsafeSharedMemoryRegion* shm) {
+void PerformanceEvaluator::ProcessBitstream(
+ scoped_refptr<BitstreamRef> bitstream,
+ size_t frame_index) {
base::TimeTicks now = base::TimeTicks::Now();
base::TimeDelta delivery_time = (now - prev_bitstream_delivery_time_);
@@ -152,7 +151,8 @@ void PerformanceEvaluator::ProcessBitstreamBuffer(
delivery_time.InMillisecondsF());
prev_bitstream_delivery_time_ = now;
- base::TimeDelta encode_time = now.since_origin() - metadata.timestamp;
+ base::TimeDelta encode_time =
+ now.since_origin() - bitstream->metadata.timestamp;
perf_metrics_.bitstream_encode_times_.push_back(
encode_time.InMillisecondsF());
}
@@ -270,7 +270,8 @@ void PerformanceMetrics::WriteToFile() const {
class VideoEncoderTest : public ::testing::Test {
public:
// Create a new video encoder instance.
- std::unique_ptr<VideoEncoder> CreateVideoEncoder(const Video* video) {
+ std::unique_ptr<VideoEncoder> CreateVideoEncoder(const Video* video,
+ VideoCodecProfile profile) {
LOG_ASSERT(video);
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors;
@@ -280,6 +281,7 @@ class VideoEncoderTest : public ::testing::Test {
VideoEncoderClientConfig config;
config.framerate = video->FrameRate();
+ config.output_profile = profile;
auto video_encoder =
VideoEncoder::Create(config, std::move(bitstream_processors));
LOG_ASSERT(video_encoder);
@@ -297,7 +299,7 @@ class VideoEncoderTest : public ::testing::Test {
// test will encode a video as fast as possible, and gives an idea about the
// maximum output of the encoder.
TEST_F(VideoEncoderTest, MeasureUncappedPerformance) {
- auto encoder = CreateVideoEncoder(g_env->Video());
+ auto encoder = CreateVideoEncoder(g_env->Video(), g_env->Profile());
performance_evaluator_->StartMeasuring();
encoder->Encode();
@@ -336,6 +338,7 @@ int main(int argc, char** argv) {
: base::FilePath(media::test::kDefaultTestVideoPath);
base::FilePath video_metadata_path =
(args.size() >= 2) ? base::FilePath(args[1]) : base::FilePath();
+ std::string codec = "h264";
// Parse command line arguments.
base::FilePath::StringType output_folder = media::test::kDefaultOutputFolder;
@@ -349,6 +352,8 @@ int main(int argc, char** argv) {
if (it->first == "output_folder") {
output_folder = it->second;
+ } else if (it->first == "codec") {
+ codec = it->second;
} else {
std::cout << "unknown option: --" << it->first << "\n"
<< media::test::usage_msg;
@@ -361,7 +366,8 @@ int main(int argc, char** argv) {
// Set up our test environment.
media::test::VideoEncoderTestEnvironment* test_environment =
media::test::VideoEncoderTestEnvironment::Create(
- video_path, video_metadata_path, base::FilePath(output_folder));
+ video_path, video_metadata_path, false, base::FilePath(output_folder),
+ codec, false /* output_bitstream */);
if (!test_environment)
return EXIT_FAILURE;
diff --git a/chromium/media/gpu/video_encode_accelerator_tests.cc b/chromium/media/gpu/video_encode_accelerator_tests.cc
index 155a602be67..1531e9ba965 100644
--- a/chromium/media/gpu/video_encode_accelerator_tests.cc
+++ b/chromium/media/gpu/video_encode_accelerator_tests.cc
@@ -2,15 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
#include <limits>
#include "base/command_line.h"
#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "media/base/media_util.h"
#include "media/base/test_data_util.h"
+#include "media/base/video_bitrate_allocation.h"
+#include "media/base/video_decoder_config.h"
#include "media/gpu/test/video.h"
+#include "media/gpu/test/video_encoder/bitstream_file_writer.h"
+#include "media/gpu/test/video_encoder/bitstream_validator.h"
+#include "media/gpu/test/video_encoder/decoder_buffer_validator.h"
#include "media/gpu/test/video_encoder/video_encoder.h"
#include "media/gpu/test/video_encoder/video_encoder_client.h"
#include "media/gpu/test/video_encoder/video_encoder_test_environment.h"
+#include "media/gpu/test/video_frame_helpers.h"
+#include "media/gpu/test/video_frame_validator.h"
+#include "media/gpu/test/video_test_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -23,6 +34,8 @@ namespace {
// TODO(dstaessens): Add video_encoder_test_usage.md
constexpr const char* usage_msg =
"usage: video_encode_accelerator_tests\n"
+ " [--codec=<codec>] [--disable_validator]\n"
+ " [--output_bitstream] [--output_folder=<filepath>]\n"
" [-v=<level>] [--vmodule=<config>] [--gtest_help] [--help]\n"
" [<video path>] [<video metadata path>]\n";
@@ -35,6 +48,16 @@ constexpr const char* help_msg =
"containing the video's metadata, such as frame checksums. By default\n"
"<video path>.json will be used.\n"
"\nThe following arguments are supported:\n"
+ " --codec codec profile to encode, \"h264\" (baseline),\n"
+ " \"h264main, \"h264high\", \"vp8\" and \"vp9\".\n"
+ " H264 Baseline is selected if unspecified.\n"
+ " --disable_validator disable validation of encoded bitstream.\n\n"
+ " --output_bitstream save the output bitstream in either H264 AnnexB\n"
+ " format (for H264) or IVF format (for vp8 and vp9)\n"
+ " to <output_folder>/<testname>/<filename> +\n"
+ " .(h264|ivf).\n"
+ " --output_folder set the basic folder used to store the output\n"
+ " stream. The default is the current directory.\n"
" -v enable verbose mode, e.g. -v=2.\n"
" --vmodule enable verbose mode for the specified module,\n"
" e.g. --vmodule=*media/gpu*=2.\n\n"
@@ -45,31 +68,115 @@ constexpr const char* help_msg =
constexpr base::FilePath::CharType kDefaultTestVideoPath[] =
FILE_PATH_LITERAL("bear_320x192_40frames.yuv.webm");
+// The number of frames to encode for bitrate check test cases.
+// TODO(hiroh): Decrease this values to make the test faster.
+constexpr size_t kNumFramesToEncodeForBitrateCheck = 300;
+// Tolerance factor for how encoded bitrate can differ from requested bitrate.
+constexpr double kBitrateTolerance = 0.1;
+
media::test::VideoEncoderTestEnvironment* g_env;
// Video encode test class. Performs setup and teardown for each single test.
class VideoEncoderTest : public ::testing::Test {
public:
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
- const Video* video,
- VideoEncoderClientConfig config = VideoEncoderClientConfig()) {
+ Video* video,
+ VideoEncoderClientConfig config) {
LOG_ASSERT(video);
- auto video_encoder = VideoEncoder::Create(config);
-
+ auto video_encoder =
+ VideoEncoder::Create(config, CreateBitstreamProcessors(video, config));
LOG_ASSERT(video_encoder);
- LOG_ASSERT(video_encoder->Initialize(video));
+
+ if (!video_encoder->Initialize(video))
+ ADD_FAILURE();
return video_encoder;
}
+
+ private:
+ std::vector<std::unique_ptr<BitstreamProcessor>> CreateBitstreamProcessors(
+ Video* video,
+ VideoEncoderClientConfig config) {
+ std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors;
+ if (!g_env->IsBitstreamValidatorEnabled()) {
+ return bitstream_processors;
+ }
+
+ const gfx::Rect visible_rect(video->Resolution());
+ VideoCodec codec = VideoCodecProfileToVideoCodec(config.output_profile);
+ switch (codec) {
+ case kCodecH264:
+ bitstream_processors.emplace_back(
+ new H264Validator(config.output_profile, visible_rect));
+ break;
+ case kCodecVP8:
+ bitstream_processors.emplace_back(new VP8Validator(visible_rect));
+ break;
+ case kCodecVP9:
+ bitstream_processors.emplace_back(
+ new VP9Validator(config.output_profile, visible_rect));
+ break;
+ default:
+ LOG(ERROR) << "Unsupported profile: "
+ << GetProfileName(config.output_profile);
+ break;
+ }
+
+ // Attach a bitstream validator to validate all encoded video frames. The
+ // bitstream validator uses a software video decoder to validate the
+ // encoded buffers by decoding them. Metrics such as the image's SSIM can
+ // be calculated for additional quality checks.
+ VideoDecoderConfig decoder_config(
+ codec, config.output_profile, VideoDecoderConfig::AlphaMode::kIsOpaque,
+ VideoColorSpace(), kNoTransformation, visible_rect.size(), visible_rect,
+ visible_rect.size(), EmptyExtraData(), EncryptionScheme::kUnencrypted);
+ std::vector<std::unique_ptr<VideoFrameProcessor>> video_frame_processors;
+
+ raw_data_helper_ = RawDataHelper::Create(video);
+ if (!raw_data_helper_) {
+ LOG(ERROR) << "Failed to create raw data helper";
+ return bitstream_processors;
+ }
+
+ // TODO(hiroh): Add corrupt frame processors.
+ VideoFrameValidator::GetModelFrameCB get_model_frame_cb =
+ base::BindRepeating(&VideoEncoderTest::GetModelFrame,
+ base::Unretained(this));
+ auto psnr_validator = PSNRVideoFrameValidator::Create(get_model_frame_cb);
+ auto ssim_validator = SSIMVideoFrameValidator::Create(get_model_frame_cb);
+ video_frame_processors.push_back(std::move(psnr_validator));
+ video_frame_processors.push_back(std::move(ssim_validator));
+ auto bitstream_validator = BitstreamValidator::Create(
+ decoder_config, config.num_frames_to_encode - 1,
+ std::move(video_frame_processors));
+ LOG_ASSERT(bitstream_validator);
+ bitstream_processors.emplace_back(std::move(bitstream_validator));
+
+ auto output_bitstream_filepath = g_env->OutputBitstreamFilePath();
+ if (output_bitstream_filepath) {
+ auto bitstream_writer = BitstreamFileWriter::Create(
+ *output_bitstream_filepath, codec, visible_rect.size(),
+ config.framerate, config.num_frames_to_encode);
+ LOG_ASSERT(bitstream_writer);
+ bitstream_processors.emplace_back(std::move(bitstream_writer));
+ }
+
+ return bitstream_processors;
+ }
+
+ scoped_refptr<const VideoFrame> GetModelFrame(size_t frame_index) {
+ LOG_ASSERT(raw_data_helper_);
+ return raw_data_helper_->GetFrame(frame_index %
+ g_env->Video()->NumFrames());
+ }
+
+ std::unique_ptr<RawDataHelper> raw_data_helper_;
};
} // namespace
// TODO(dstaessens): Add more test scenarios:
-// - Vary framerate
-// - Vary bitrate
-// - Flush midstream
// - Forcing key frames
// Encode video from start to end. Wait for the kFlushDone event at the end of
@@ -77,6 +184,8 @@ class VideoEncoderTest : public ::testing::Test {
TEST_F(VideoEncoderTest, FlushAtEndOfStream) {
VideoEncoderClientConfig config = VideoEncoderClientConfig();
config.framerate = g_env->Video()->FrameRate();
+ config.output_profile = g_env->Profile();
+ config.num_frames_to_encode = g_env->Video()->NumFrames();
auto encoder = CreateVideoEncoder(g_env->Video(), config);
encoder->Encode();
@@ -87,6 +196,132 @@ TEST_F(VideoEncoderTest, FlushAtEndOfStream) {
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
}
+// Test initializing the video encoder. The test will be successful if the video
+// encoder is capable of setting up the encoder for the specified codec and
+// resolution. The test only verifies initialization and doesn't do any
+// encoding.
+TEST_F(VideoEncoderTest, Initialize) {
+ VideoEncoderClientConfig config = VideoEncoderClientConfig();
+ auto encoder = CreateVideoEncoder(g_env->Video(), config);
+
+ EXPECT_EQ(encoder->GetEventCount(VideoEncoder::kInitialized), 1u);
+}
+
+// Create a video encoder and immediately destroy it without initializing. The
+// video encoder will be automatically destroyed when the video encoder goes out
+// of scope at the end of the test. The test will pass if no asserts or crashes
+// are triggered upon destroying.
+TEST_F(VideoEncoderTest, DestroyBeforeInitialize) {
+ VideoEncoderClientConfig config = VideoEncoderClientConfig();
+ auto video_encoder = VideoEncoder::Create(config);
+
+ EXPECT_NE(video_encoder, nullptr);
+}
+
+// Encode multiple videos simultaneously from start to finish.
+TEST_F(VideoEncoderTest, FlushAtEndOfStream_MultipleConcurrentEncodes) {
+ // The minimal number of concurrent encoders we expect to be supported.
+ constexpr size_t kMinSupportedConcurrentEncoders = 3;
+
+ VideoEncoderClientConfig config = VideoEncoderClientConfig();
+ config.framerate = g_env->Video()->FrameRate();
+ config.output_profile = g_env->Profile();
+ config.num_frames_to_encode = g_env->Video()->NumFrames();
+
+ std::vector<std::unique_ptr<VideoEncoder>> encoders(
+ kMinSupportedConcurrentEncoders);
+ for (size_t i = 0; i < kMinSupportedConcurrentEncoders; ++i)
+ encoders[i] = CreateVideoEncoder(g_env->Video(), config);
+
+ for (size_t i = 0; i < kMinSupportedConcurrentEncoders; ++i)
+ encoders[i]->Encode();
+
+ for (size_t i = 0; i < kMinSupportedConcurrentEncoders; ++i) {
+ EXPECT_TRUE(encoders[i]->WaitForFlushDone());
+ EXPECT_EQ(encoders[i]->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(encoders[i]->GetFrameReleasedCount(),
+ g_env->Video()->NumFrames());
+ EXPECT_TRUE(encoders[i]->WaitForBitstreamProcessors());
+ }
+}
+
+TEST_F(VideoEncoderTest, BitrateCheck) {
+ VideoEncoderClientConfig config = VideoEncoderClientConfig();
+ config.framerate = g_env->Video()->FrameRate();
+ config.output_profile = g_env->Profile();
+ config.num_frames_to_encode = kNumFramesToEncodeForBitrateCheck;
+ auto encoder = CreateVideoEncoder(g_env->Video(), config);
+
+ encoder->Encode();
+ EXPECT_TRUE(encoder->WaitForFlushDone());
+
+ EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
+ EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
+ EXPECT_NEAR(encoder->GetStats().Bitrate(), config.bitrate,
+ kBitrateTolerance * config.bitrate);
+}
+
+TEST_F(VideoEncoderTest, DynamicBitrateChange) {
+ VideoEncoderClientConfig config;
+ config.framerate = g_env->Video()->FrameRate();
+ config.output_profile = g_env->Profile();
+ config.num_frames_to_encode = kNumFramesToEncodeForBitrateCheck * 2;
+ auto encoder = CreateVideoEncoder(g_env->Video(), config);
+
+ // Encode the video with the first bitrate.
+ const uint32_t first_bitrate = config.bitrate;
+ encoder->EncodeUntil(VideoEncoder::kFrameReleased,
+ kNumFramesToEncodeForBitrateCheck);
+ encoder->WaitForEvent(VideoEncoder::kFrameReleased,
+ kNumFramesToEncodeForBitrateCheck);
+ EXPECT_NEAR(encoder->GetStats().Bitrate(), first_bitrate,
+ kBitrateTolerance * first_bitrate);
+
+ // Encode the video with the second bitrate.
+ const uint32_t second_bitrate = first_bitrate * 3 / 2;
+ encoder->ResetStats();
+ encoder->UpdateBitrate(second_bitrate, config.framerate);
+ encoder->Encode();
+ EXPECT_TRUE(encoder->WaitForFlushDone());
+ EXPECT_NEAR(encoder->GetStats().Bitrate(), second_bitrate,
+ kBitrateTolerance * second_bitrate);
+
+ EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
+ EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
+}
+
+TEST_F(VideoEncoderTest, DynamicFramerateChange) {
+ VideoEncoderClientConfig config;
+ config.framerate = g_env->Video()->FrameRate();
+ config.output_profile = g_env->Profile();
+ config.num_frames_to_encode = kNumFramesToEncodeForBitrateCheck * 2;
+ auto encoder = CreateVideoEncoder(g_env->Video(), config);
+
+ // Encode the video with the first framerate.
+ const uint32_t first_framerate = config.framerate;
+
+ encoder->EncodeUntil(VideoEncoder::kFrameReleased,
+ kNumFramesToEncodeForBitrateCheck);
+ encoder->WaitForEvent(VideoEncoder::kFrameReleased,
+ kNumFramesToEncodeForBitrateCheck);
+ EXPECT_NEAR(encoder->GetStats().Bitrate(), config.bitrate,
+ kBitrateTolerance * config.bitrate);
+
+ // Encode the video with the second framerate.
+ const uint32_t second_framerate = first_framerate * 3 / 2;
+ encoder->ResetStats();
+ encoder->UpdateBitrate(config.bitrate, second_framerate);
+ encoder->Encode();
+ EXPECT_TRUE(encoder->WaitForFlushDone());
+ EXPECT_NEAR(encoder->GetStats().Bitrate(), config.bitrate,
+ kBitrateTolerance * config.bitrate);
+
+ EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
+ EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
+}
} // namespace test
} // namespace media
@@ -111,8 +346,13 @@ int main(int argc, char** argv) {
: base::FilePath(media::test::kDefaultTestVideoPath);
base::FilePath video_metadata_path =
(args.size() >= 2) ? base::FilePath(args[1]) : base::FilePath();
+ std::string codec = "h264";
+ bool output_bitstream = false;
+ base::FilePath output_folder =
+ base::FilePath(base::FilePath::kCurrentDirectory);
// Parse command line arguments.
+ bool enable_bitstream_validator = true;
base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
it != switches.end(); ++it) {
@@ -121,9 +361,19 @@ int main(int argc, char** argv) {
continue;
}
- std::cout << "unknown option: --" << it->first << "\n"
- << media::test::usage_msg;
- return EXIT_FAILURE;
+ if (it->first == "codec") {
+ codec = it->second;
+ } else if (it->first == "disable_validator") {
+ enable_bitstream_validator = false;
+ } else if (it->first == "output_bitstream") {
+ output_bitstream = true;
+ } else if (it->first == "output_folder") {
+ output_folder = base::FilePath(it->second);
+ } else {
+ std::cout << "unknown option: --" << it->first << "\n"
+ << media::test::usage_msg;
+ return EXIT_FAILURE;
+ }
}
testing::InitGoogleTest(&argc, argv);
@@ -131,7 +381,8 @@ int main(int argc, char** argv) {
// Set up our test environment.
media::test::VideoEncoderTestEnvironment* test_environment =
media::test::VideoEncoderTestEnvironment::Create(
- video_path, video_metadata_path, base::FilePath());
+ video_path, video_metadata_path, enable_bitstream_validator,
+ output_folder, codec, output_bitstream);
if (!test_environment)
return EXIT_FAILURE;
diff --git a/chromium/media/gpu/video_encode_accelerator_unittest.cc b/chromium/media/gpu/video_encode_accelerator_unittest.cc
index 25b8342343c..002cd3bea2a 100644
--- a/chromium/media/gpu/video_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_encode_accelerator_unittest.cc
@@ -20,6 +20,7 @@
#include "base/containers/queue.h"
#include "base/files/file_util.h"
#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/ref_counted.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/memory/weak_ptr.h"
@@ -216,6 +217,21 @@ bool g_native_input = false;
class VideoEncodeAcceleratorTestEnvironment;
VideoEncodeAcceleratorTestEnvironment* g_env;
+std::unique_ptr<base::test::ScopedFeatureList> CreateScopedFeatureList() {
+#if BUILDFLAG(USE_VAAPI)
+ auto scoped_feature_list = std::make_unique<base::test::ScopedFeatureList>();
+ std::vector<base::Feature> enabled_features = {
+ // TODO(crbug.com/828482): remove once enabled by default.
+ media::kVaapiLowPowerEncoderGen9x,
+ // TODO(crbug.com/811912): remove once enabled by default.
+ media::kVaapiVP9Encoder};
+ scoped_feature_list->InitWithFeatures(enabled_features, {});
+ return scoped_feature_list;
+#else
+ return nullptr;
+#endif // BUILDFLAG(USE_VAAPI)
+}
+
// The number of frames to be encoded. This variable is set by the switch
// "--num_frames_to_encode". Ignored if 0.
int g_num_frames_to_encode = 0;
@@ -541,7 +557,7 @@ static void CreateAlignedInputStreamFile(const gfx::Size& coded_size,
const char* src_ptr = &src_data[0];
for (size_t i = 0; i < num_planes; i++) {
// Assert that each plane of frame starts at required byte boundary.
- ASSERT_EQ(0u, dest_offset & (test::kPlatformBufferAlignment - 1))
+ ASSERT_TRUE(base::IsAligned(dest_offset, test::kPlatformBufferAlignment))
<< "Planes of frame should be mapped per platform requirements";
char* dst_ptr = &test_stream->aligned_in_file_data[dest_offset];
for (size_t j = 0; j < visible_plane_rows[i]; j++) {
@@ -1492,10 +1508,15 @@ class VEAClient : public VEAClientBase {
bool mid_stream_framerate_switch,
bool verify_output,
bool verify_output_timestamp,
- bool force_level);
+ bool force_level,
+ bool scale);
void CreateEncoder();
void DestroyEncoder();
+ bool requested_scaling() const {
+ return encoded_visible_size_ != test_stream_->visible_size;
+ }
+
// VideoDecodeAccelerator::Client implementation.
void RequireBitstreamBuffers(unsigned int input_count,
const gfx::Size& input_coded_size,
@@ -1653,6 +1674,11 @@ class VEAClient : public VEAClientBase {
// Check whether the output timestamps match input timestamps.
bool verify_output_timestamp_;
+ // The visible size we want the encoded stream to have. This can be different
+ // than the visible size of the |test_stream_| when doing scaling in native
+ // input mode.
+ gfx::Size encoded_visible_size_;
+
// Used to perform codec-specific sanity checks on the stream.
std::unique_ptr<StreamValidator> stream_validator_;
@@ -1704,7 +1730,8 @@ VEAClient::VEAClient(TestStream* test_stream,
bool mid_stream_framerate_switch,
bool verify_output,
bool verify_output_timestamp,
- bool force_level)
+ bool force_level,
+ bool scale)
: VEAClientBase(note),
state_(CS_CREATED),
test_stream_(test_stream),
@@ -1756,6 +1783,18 @@ VEAClient::VEAClient(TestStream* test_stream,
}
}
+ encoded_visible_size_ = test_stream_->visible_size;
+ if (scale) {
+ LOG_ASSERT(g_native_input)
+ << "Scaling is only supported on native input mode";
+ // Scale to 3/4 of the original size. The reason we don't go smaller is that
+ // we don't want to go below the minimum supported resolution of the
+ // hardware encoder and 3/4 works across all boards with the current test
+ // videos.
+ encoded_visible_size_.set_width(3 * encoded_visible_size_.width() / 4);
+ encoded_visible_size_.set_height(3 * encoded_visible_size_.height() / 4);
+ }
+
if (save_to_file_) {
LOG_ASSERT(!test_stream_->out_filename.empty());
#if defined(OS_POSIX)
@@ -1805,7 +1844,7 @@ void VEAClient::CreateEncoder() {
? VideoEncodeAccelerator::Config::StorageType::kDmabuf
: VideoEncodeAccelerator::Config::StorageType::kShmem;
const VideoEncodeAccelerator::Config config(
- test_stream_->pixel_format, test_stream_->visible_size,
+ test_stream_->pixel_format, encoded_visible_size_,
test_stream_->requested_profile, requested_bitrate_, requested_framerate_,
keyframe_period_, test_stream_->requested_level, storage_type);
encoder_ = CreateVideoEncodeAccelerator(config, this, gpu::GpuPreferences());
@@ -1892,9 +1931,16 @@ void VEAClient::RequireBitstreamBuffers(unsigned int input_count,
if (quality_validator_)
quality_validator_->Initialize(input_coded_size,
- gfx::Rect(test_stream_->visible_size));
+ gfx::Rect(encoded_visible_size_));
- CreateAlignedInputStreamFile(input_coded_size, test_stream_);
+ // When scaling is requested in native input mode, |input_coded_size| is not
+ // useful for building the input video frames because the encoder's image
+ // processor will be the one responsible for building the video frames that
+ // are fed to the hardware encoder. Instead, we can just use the unscaled
+ // visible size as the coded size.
+ const gfx::Size coded_size_to_use =
+ requested_scaling() ? test_stream_->visible_size : input_coded_size;
+ CreateAlignedInputStreamFile(coded_size_to_use, test_stream_);
num_frames_to_encode_ = test_stream_->num_frames;
if (g_num_frames_to_encode > 0)
@@ -1916,7 +1962,7 @@ void VEAClient::RequireBitstreamBuffers(unsigned int input_count,
}
}
- input_coded_size_ = input_coded_size;
+ input_coded_size_ = coded_size_to_use;
num_required_input_buffers_ = input_count;
ASSERT_GT(num_required_input_buffers_, 0UL);
@@ -1989,9 +2035,9 @@ void VEAClient::BitstreamBufferReady(
stream_validator_->ProcessStreamBuffer(stream_ptr,
metadata.payload_size_bytes);
} else {
- // We don't know the visible size of without stream validator, just
- // send the expected value to pass the check.
- HandleEncodedFrame(metadata.key_frame, test_stream_->visible_size);
+ // We don't know the visible size of the encoded stream without the stream
+ // validator, so just send the expected value to pass the check.
+ HandleEncodedFrame(metadata.key_frame, encoded_visible_size_);
}
if (quality_validator_) {
@@ -2001,8 +2047,9 @@ void VEAClient::BitstreamBufferReady(
quality_validator_->AddDecodeBuffer(buffer);
}
// If the encoder does not support flush, pretend flush is done when all
- // frames are received.
- if (!encoder_->IsFlushSupported() &&
+ // frames are received. We also do this when scaling is requested (because a
+ // well behaved client should not request a flush in this situation).
+ if ((!encoder_->IsFlushSupported() || requested_scaling()) &&
num_encoded_frames_ == num_frames_to_encode_) {
FlushEncoderDone(true);
}
@@ -2098,7 +2145,7 @@ scoped_refptr<VideoFrame> VEAClient::CreateFrame(off_t position) {
scoped_refptr<VideoFrame> video_frame =
VideoFrame::WrapExternalYuvDataWithLayout(
*layout, gfx::Rect(test_stream_->visible_size),
- test_stream_->visible_size, frame_data[0], frame_data[1],
+ /*natural_size=*/encoded_visible_size_, frame_data[0], frame_data[1],
frame_data[2],
// Timestamp needs to avoid starting from 0.
base::TimeDelta().FromMilliseconds(
@@ -2201,7 +2248,12 @@ void VEAClient::FeedEncoderWithOneInput() {
}
encoder_->Encode(video_frame, force_keyframe);
++num_frames_submitted_to_encoder_;
- if (num_frames_submitted_to_encoder_ == num_frames_to_encode_) {
+
+ // If scaling was requested, we don't need to flush: that's because the only
+ // use case for Flush() is ARC++ and pixel format conversion and/or scaling
+ // are not used.
+ if (!requested_scaling() &&
+ num_frames_submitted_to_encoder_ == num_frames_to_encode_) {
FlushEncoder();
}
}
@@ -2266,7 +2318,7 @@ bool VEAClient::HandleEncodedFrame(bool keyframe,
}
}
- EXPECT_EQ(test_stream_->visible_size, visible_size);
+ EXPECT_EQ(encoded_visible_size_, visible_size);
if (num_encoded_frames_ == num_frames_to_encode_ / 2) {
VerifyStreamProperties();
@@ -2398,10 +2450,8 @@ void VEAClient::WriteIvfFileHeader(uint32_t fourcc) {
header.version = 0;
header.header_size = sizeof(header);
header.fourcc = fourcc; // VP80 or VP90
- header.width =
- base::checked_cast<uint16_t>(test_stream_->visible_size.width());
- header.height =
- base::checked_cast<uint16_t>(test_stream_->visible_size.height());
+ header.width = base::checked_cast<uint16_t>(encoded_visible_size_.width());
+ header.height = base::checked_cast<uint16_t>(encoded_visible_size_.height());
header.timebase_denum = requested_framerate_;
header.timebase_num = 1;
header.num_frames = num_frames_to_encode_;
@@ -2681,11 +2731,32 @@ void VEACacheLineUnalignedInputClient::FeedEncoderWithOneInput(
// - If true, verify the timestamps of output frames.
// - If true, verify the output level is as provided in input stream. Only
// available for H264 encoder for now.
+// - If true, request that the encoder scales the input stream to 75% of the
+// original size prior to encoding. This is only applicable when
+// |g_native_input| is true. Otherwise, the test is skipped. This is because
+// the intention is to exercise the image processor path inside the decoder,
+// and in non-native input mode, the scaling is done by the client instead of
+// the encoder (and we're not interested in testing that).
+// Note: we don't go smaller than 75% because we don't want to go below the
+// minimum supported resolution by the encoder (75% happens to work across all
+// devices with the current test videos).
class VideoEncodeAcceleratorTest
: public ::testing::TestWithParam<
- std::tuple<int, bool, int, bool, bool, bool, bool, bool, bool>> {};
+ std::
+ tuple<int, bool, int, bool, bool, bool, bool, bool, bool, bool>> {
+ public:
+ void SetUp() override {
+ const bool scale = std::get<9>(GetParam());
+ if (scale && !g_native_input)
+ GTEST_SKIP();
+ }
+};
TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
+ // Workaround: TestSuite::Initialize() overwrites specified features.
+ // Re-enable our required features here so that they are enabled in encoding.
+ auto scoped_feature_list = CreateScopedFeatureList();
+
size_t num_concurrent_encoders = std::get<0>(GetParam());
const bool save_to_file = std::get<1>(GetParam());
const unsigned int keyframe_period = std::get<2>(GetParam());
@@ -2696,6 +2767,7 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
std::get<6>(GetParam()) || g_env->verify_all_output();
const bool verify_output_timestamp = std::get<7>(GetParam());
const bool force_level = std::get<8>(GetParam());
+ const bool scale = std::get<9>(GetParam());
#if defined(OS_CHROMEOS)
if (ShouldSkipTest(g_env->test_streams_[0]->pixel_format))
@@ -2749,7 +2821,7 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
g_env->test_streams_[test_stream_index].get(), notes.back().get(),
encoder_save_to_file, keyframe_period, force_bitrate,
mid_stream_bitrate_switch, mid_stream_framerate_switch, verify_output,
- verify_output_timestamp, force_level));
+ verify_output_timestamp, force_level, scale));
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&VEAClient::CreateEncoder,
@@ -2825,6 +2897,10 @@ void SimpleTestFunc() {
}
TEST_P(VideoEncodeAcceleratorSimpleTest, TestSimpleEncode) {
+ // Workaround: TestSuite::Initialize() overwrites specified features.
+ // Re-enable our required features here so that they are enabled in encoding.
+ auto scoped_feature_list = CreateScopedFeatureList();
+
const int test_type = GetParam();
ASSERT_LT(test_type, 2) << "Invalid test type=" << test_type;
@@ -2852,8 +2928,22 @@ INSTANTIATE_TEST_SUITE_P(SimpleEncode,
false,
false,
false,
+ false,
false)));
+INSTANTIATE_TEST_SUITE_P(SimpleEncodeWithScaling,
+ VideoEncodeAcceleratorTest,
+ ::testing::Values(std::make_tuple(1,
+ true,
+ 0,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true)));
+
INSTANTIATE_TEST_SUITE_P(EncoderPerf,
VideoEncodeAcceleratorTest,
::testing::Values(std::make_tuple(1,
@@ -2864,6 +2954,7 @@ INSTANTIATE_TEST_SUITE_P(EncoderPerf,
false,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(ForceKeyframes,
@@ -2876,6 +2967,7 @@ INSTANTIATE_TEST_SUITE_P(ForceKeyframes,
false,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(ForceBitrate,
@@ -2888,6 +2980,7 @@ INSTANTIATE_TEST_SUITE_P(ForceBitrate,
false,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(MidStreamParamSwitchBitrate,
@@ -2900,6 +2993,7 @@ INSTANTIATE_TEST_SUITE_P(MidStreamParamSwitchBitrate,
false,
false,
false,
+ false,
false)));
// TODO(kcwu): add back bitrate test after https://crbug.com/693336 fixed.
@@ -2913,6 +3007,7 @@ INSTANTIATE_TEST_SUITE_P(DISABLED_MidStreamParamSwitchFPS,
true,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
@@ -2925,6 +3020,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
false,
false,
false,
+ false,
false),
std::make_tuple(3,
false,
@@ -2934,6 +3030,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
false,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(VerifyTimestamp,
@@ -2946,6 +3043,7 @@ INSTANTIATE_TEST_SUITE_P(VerifyTimestamp,
false,
false,
true,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(ForceLevel,
@@ -2958,7 +3056,8 @@ INSTANTIATE_TEST_SUITE_P(ForceLevel,
false,
false,
false,
- true)));
+ true,
+ false)));
INSTANTIATE_TEST_SUITE_P(NoInputTest,
VideoEncodeAcceleratorSimpleTest,
@@ -2979,6 +3078,7 @@ INSTANTIATE_TEST_SUITE_P(SimpleEncode,
false,
false,
false,
+ false,
false),
std::make_tuple(1,
true,
@@ -2988,6 +3088,7 @@ INSTANTIATE_TEST_SUITE_P(SimpleEncode,
false,
true,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(EncoderPerf,
@@ -3000,6 +3101,7 @@ INSTANTIATE_TEST_SUITE_P(EncoderPerf,
false,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
@@ -3012,6 +3114,7 @@ INSTANTIATE_TEST_SUITE_P(MultipleEncoders,
false,
false,
false,
+ false,
false)));
INSTANTIATE_TEST_SUITE_P(VerifyTimestamp,
@@ -3024,6 +3127,7 @@ INSTANTIATE_TEST_SUITE_P(VerifyTimestamp,
false,
false,
true,
+ false,
false)));
#if defined(OS_WIN)
@@ -3037,6 +3141,7 @@ INSTANTIATE_TEST_SUITE_P(ForceBitrate,
false,
false,
false,
+ false,
false)));
#endif // defined(OS_WIN)
@@ -3072,14 +3177,7 @@ class VEATestSuite : public base::TestSuite {
media::g_verify_all_output)));
#if BUILDFLAG(USE_VAAPI)
- base::test::ScopedFeatureList scoped_feature_list;
- std::vector<base::Feature> enabled_features = {
- // TODO(crbug.com/811912): remove once enabled by default.
- media::kVaapiVP9Encoder,
- // TODO(crbug.com/828482): Remove once H264 encoder on AMD is enabled by
- // default.
- media::kVaapiH264AMDEncoder};
- scoped_feature_list.InitWithFeatures(enabled_features, {});
+ auto scoped_feature_list = CreateScopedFeatureList();
media::VaapiWrapper::PreSandboxInitialization();
#elif defined(OS_WIN)
media::MediaFoundationVideoEncodeAccelerator::PreSandboxInitialization();
diff --git a/chromium/media/gpu/vp8_decoder.cc b/chromium/media/gpu/vp8_decoder.cc
index 9b6767284df..23a88b6696b 100644
--- a/chromium/media/gpu/vp8_decoder.cc
+++ b/chromium/media/gpu/vp8_decoder.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "media/gpu/vp8_decoder.h"
+
+#include "base/logging.h"
+#include "base/notreached.h"
#include "media/base/limits.h"
namespace media {
diff --git a/chromium/media/gpu/vp9_reference_frame_vector.cc b/chromium/media/gpu/vp9_reference_frame_vector.cc
index 94627f23ecc..f4541bf53bd 100644
--- a/chromium/media/gpu/vp9_reference_frame_vector.cc
+++ b/chromium/media/gpu/vp9_reference_frame_vector.cc
@@ -4,6 +4,8 @@
#include "media/gpu/vp9_reference_frame_vector.h"
+#include <bitset>
+
#include "media/gpu/vp9_picture.h"
namespace media {
@@ -12,32 +14,26 @@ Vp9ReferenceFrameVector::Vp9ReferenceFrameVector() {
DETACH_FROM_SEQUENCE(sequence_checker_);
}
-Vp9ReferenceFrameVector::~Vp9ReferenceFrameVector() {}
+Vp9ReferenceFrameVector::~Vp9ReferenceFrameVector() = default;
-// Refresh the reference frame buffer slots with current frame
-// based on refresh_frame_flags set in the frame_hdr.
+// Refreshes |reference_frames_| slots with the current |pic|s frame header.
void Vp9ReferenceFrameVector::Refresh(scoped_refptr<VP9Picture> pic) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(pic);
- const auto& frame_hdr = pic->frame_hdr;
+ const std::bitset<kVp9NumRefFrames> refresh_frame_flags(
+ pic->frame_hdr->refresh_frame_flags);
- for (size_t i = 0, mask = frame_hdr->refresh_frame_flags; mask;
- mask >>= 1, ++i) {
- if (mask & 1)
+ for (size_t i = 0; i < kVp9NumRefFrames; ++i) {
+ if (refresh_frame_flags[i])
reference_frames_[i] = pic;
}
}
void Vp9ReferenceFrameVector::Clear() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
- for (auto& f : reference_frames_)
- f = nullptr;
+ reference_frames_.fill(nullptr);
}
-// VP9 can maintains up to eight active reference frames and each
-// frame can use up to three reference frames from this list.
-// GetFrame will return the reference frame placed in reference_frames_[index]
scoped_refptr<VP9Picture> Vp9ReferenceFrameVector::GetFrame(
size_t index) const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
diff --git a/chromium/media/gpu/vp9_reference_frame_vector.h b/chromium/media/gpu/vp9_reference_frame_vector.h
index ea9b2b8bdee..bf91596b320 100644
--- a/chromium/media/gpu/vp9_reference_frame_vector.h
+++ b/chromium/media/gpu/vp9_reference_frame_vector.h
@@ -15,9 +15,8 @@ namespace media {
class VP9Picture;
-// class to share reference frame management code
-// between encoder and decoder classes.
-// TODO(crbug.com/924804): Add the support in Decoder class.
+// This class encapsulates VP9-specific reference frame management code. This
+// class is thread afine.
class Vp9ReferenceFrameVector {
public:
Vp9ReferenceFrameVector();
diff --git a/chromium/media/gpu/windows/av1_guids.h b/chromium/media/gpu/windows/av1_guids.h
new file mode 100644
index 00000000000..c5e4e5d314c
--- /dev/null
+++ b/chromium/media/gpu/windows/av1_guids.h
@@ -0,0 +1,52 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_WINDOWS_AV1_GUIDS_H_
+#define MEDIA_GPU_WINDOWS_AV1_GUIDS_H_
+
+#include <dxva.h>
+#include <initguid.h>
+
+#if WDK_NTDDI_VERSION <= NTDDI_WIN10_19H1
+DEFINE_GUID(DXVA_ModeAV1_VLD_Profile0,
+ 0xb8be4ccb,
+ 0xcf53,
+ 0x46ba,
+ 0x8d,
+ 0x59,
+ 0xd6,
+ 0xb8,
+ 0xa6,
+ 0xda,
+ 0x5d,
+ 0x2a);
+
+DEFINE_GUID(DXVA_ModeAV1_VLD_Profile1,
+ 0x6936ff0f,
+ 0x45b1,
+ 0x4163,
+ 0x9c,
+ 0xc1,
+ 0x64,
+ 0x6e,
+ 0xf6,
+ 0x94,
+ 0x61,
+ 0x08);
+
+DEFINE_GUID(DXVA_ModeAV1_VLD_Profile2,
+ 0x0c5f2aa1,
+ 0xe541,
+ 0x4089,
+ 0xbb,
+ 0x7b,
+ 0x98,
+ 0x11,
+ 0x0a,
+ 0x19,
+ 0xd7,
+ 0xc8);
+#endif // WDK_NTDDI_VERSION <= NTDDI_WIN10_19H1
+
+#endif // MEDIA_GPU_WINDOWS_AV1_GUIDS_H_
diff --git a/chromium/media/gpu/windows/d3d11_decoder_configurator.cc b/chromium/media/gpu/windows/d3d11_decoder_configurator.cc
index a4a42eabf51..869106f87e9 100644
--- a/chromium/media/gpu/windows/d3d11_decoder_configurator.cc
+++ b/chromium/media/gpu/windows/d3d11_decoder_configurator.cc
@@ -73,9 +73,11 @@ bool D3D11DecoderConfigurator::SupportsDevice(
ComD3D11Texture2D D3D11DecoderConfigurator::CreateOutputTexture(
ComD3D11Device device,
- gfx::Size size) {
+ gfx::Size size,
+ uint32_t array_size) {
output_texture_desc_.Width = size.width();
output_texture_desc_.Height = size.height();
+ output_texture_desc_.ArraySize = array_size;
ComD3D11Texture2D result;
if (!SUCCEEDED(
@@ -100,7 +102,6 @@ void D3D11DecoderConfigurator::SetUpTextureDescriptor(bool supports_swap_chain,
bool is_encrypted) {
output_texture_desc_ = {};
output_texture_desc_.MipLevels = 1;
- output_texture_desc_.ArraySize = D3D11DecoderConfigurator::BUFFER_COUNT;
output_texture_desc_.Format = dxgi_format_;
output_texture_desc_.SampleDesc.Count = 1;
output_texture_desc_.Usage = D3D11_USAGE_DEFAULT;
diff --git a/chromium/media/gpu/windows/d3d11_decoder_configurator.h b/chromium/media/gpu/windows/d3d11_decoder_configurator.h
index a23535bc615..3d6cd49e90c 100644
--- a/chromium/media/gpu/windows/d3d11_decoder_configurator.h
+++ b/chromium/media/gpu/windows/d3d11_decoder_configurator.h
@@ -40,7 +40,9 @@ class MEDIA_GPU_EXPORT D3D11DecoderConfigurator {
bool SupportsDevice(ComD3D11VideoDevice video_device);
// Create the decoder's output texture.
- ComD3D11Texture2D CreateOutputTexture(ComD3D11Device device, gfx::Size size);
+ ComD3D11Texture2D CreateOutputTexture(ComD3D11Device device,
+ gfx::Size size,
+ uint32_t array_size);
const D3D11_VIDEO_DECODER_DESC* DecoderDescriptor() const {
return &decoder_desc_;
diff --git a/chromium/media/gpu/windows/d3d11_h264_accelerator.cc b/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
index df549d3a380..e87c1ece44f 100644
--- a/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
+++ b/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
@@ -45,12 +45,12 @@ void AppendSubsamples(
class D3D11H264Picture : public H264Picture {
public:
D3D11H264Picture(D3D11PictureBuffer* picture)
- : picture(picture), level_(picture->level()) {
+ : picture(picture), picture_index_(picture->picture_index()) {
picture->set_in_picture_use(true);
}
D3D11PictureBuffer* picture;
- size_t level_;
+ size_t picture_index_;
protected:
~D3D11H264Picture() override;
@@ -63,16 +63,16 @@ D3D11H264Picture::~D3D11H264Picture() {
D3D11H264Accelerator::D3D11H264Accelerator(
D3D11VideoDecoderClient* client,
MediaLog* media_log,
- ComD3D11VideoDecoder video_decoder,
ComD3D11VideoDevice video_device,
std::unique_ptr<VideoContextWrapper> video_context)
: client_(client),
media_log_(media_log),
- video_decoder_(video_decoder),
video_device_(video_device),
video_context_(std::move(video_context)) {
DCHECK(client);
DCHECK(media_log_);
+ client->SetDecoderCB(base::BindRepeating(
+ &D3D11H264Accelerator::SetVideoDecoder, base::Unretained(this)));
}
D3D11H264Accelerator::~D3D11H264Accelerator() {}
@@ -135,7 +135,7 @@ DecoderStatus D3D11H264Accelerator::SubmitFrameMetadata(
D3D11H264Picture* our_ref_pic = static_cast<D3D11H264Picture*>(it->get());
if (!our_ref_pic->ref)
continue;
- ref_frame_list_[i].Index7Bits = our_ref_pic->level_;
+ ref_frame_list_[i].Index7Bits = our_ref_pic->picture_index_;
ref_frame_list_[i].AssociatedFlag = our_ref_pic->long_term;
field_order_cnt_list_[i][0] = our_ref_pic->top_field_order_cnt;
field_order_cnt_list_[i][1] = our_ref_pic->bottom_field_order_cnt;
@@ -281,7 +281,7 @@ void D3D11H264Accelerator::PicParamsFromSliceHeader(
void D3D11H264Accelerator::PicParamsFromPic(DXVA_PicParams_H264* pic_param,
scoped_refptr<H264Picture> pic) {
pic_param->CurrPic.Index7Bits =
- static_cast<D3D11H264Picture*>(pic.get())->level_;
+ static_cast<D3D11H264Picture*>(pic.get())->picture_index_;
pic_param->RefPicFlag = pic->ref;
pic_param->frame_num = pic->frame_num;
@@ -588,4 +588,8 @@ void D3D11H264Accelerator::RecordFailure(const std::string& reason,
MEDIA_LOG(ERROR, media_log_) << hr_string << ": " << reason;
}
+void D3D11H264Accelerator::SetVideoDecoder(ComD3D11VideoDecoder video_decoder) {
+ video_decoder_ = std::move(video_decoder);
+}
+
} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_h264_accelerator.h b/chromium/media/gpu/windows/d3d11_h264_accelerator.h
index cd9dd468755..00e2bd5cecd 100644
--- a/chromium/media/gpu/windows/d3d11_h264_accelerator.h
+++ b/chromium/media/gpu/windows/d3d11_h264_accelerator.h
@@ -34,7 +34,6 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator {
public:
D3D11H264Accelerator(D3D11VideoDecoderClient* client,
MediaLog* media_log,
- ComD3D11VideoDecoder video_decoder,
ComD3D11VideoDevice video_device,
std::unique_ptr<VideoContextWrapper> video_context);
~D3D11H264Accelerator() override;
@@ -78,6 +77,8 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator {
void PicParamsFromPic(DXVA_PicParams_H264* pic_param,
scoped_refptr<H264Picture> pic);
+ void SetVideoDecoder(ComD3D11VideoDecoder video_decoder);
+
private:
bool SubmitSliceData();
bool RetrieveBitstreamBuffer();
diff --git a/chromium/media/gpu/windows/d3d11_picture_buffer.cc b/chromium/media/gpu/windows/d3d11_picture_buffer.cc
index 7c0278b690e..60d1720e92f 100644
--- a/chromium/media/gpu/windows/d3d11_picture_buffer.cc
+++ b/chromium/media/gpu/windows/d3d11_picture_buffer.cc
@@ -24,15 +24,17 @@ namespace media {
D3D11PictureBuffer::D3D11PictureBuffer(
scoped_refptr<base::SequencedTaskRunner> delete_task_runner,
ComD3D11Texture2D texture,
+ size_t array_slice,
std::unique_ptr<Texture2DWrapper> texture_wrapper,
gfx::Size size,
- size_t level)
+ size_t picture_index)
: RefCountedDeleteOnSequence<D3D11PictureBuffer>(
std::move(delete_task_runner)),
texture_(std::move(texture)),
+ array_slice_(array_slice),
texture_wrapper_(std::move(texture_wrapper)),
size_(size),
- level_(level) {}
+ picture_index_(picture_index) {}
D3D11PictureBuffer::~D3D11PictureBuffer() {
}
@@ -46,7 +48,7 @@ bool D3D11PictureBuffer::Init(
D3D11_VIDEO_DECODER_OUTPUT_VIEW_DESC view_desc = {};
view_desc.DecodeProfile = decoder_guid;
view_desc.ViewDimension = D3D11_VDOV_DIMENSION_TEXTURE2D;
- view_desc.Texture2D.ArraySlice = (UINT)level_;
+ view_desc.Texture2D.ArraySlice = array_slice_;
if (!texture_wrapper_->Init(std::move(gpu_task_runner),
std::move(get_helper_cb))) {
@@ -69,8 +71,9 @@ bool D3D11PictureBuffer::ProcessTexture(
const gfx::ColorSpace& input_color_space,
MailboxHolderArray* mailbox_dest,
gfx::ColorSpace* output_color_space) {
- return texture_wrapper_->ProcessTexture(Texture(), level_, input_color_space,
- mailbox_dest, output_color_space);
+ return texture_wrapper_->ProcessTexture(Texture(), array_slice_,
+ input_color_space, mailbox_dest,
+ output_color_space);
}
ComD3D11Texture2D D3D11PictureBuffer::Texture() const {
diff --git a/chromium/media/gpu/windows/d3d11_picture_buffer.h b/chromium/media/gpu/windows/d3d11_picture_buffer.h
index d605772d147..08e2c307725 100644
--- a/chromium/media/gpu/windows/d3d11_picture_buffer.h
+++ b/chromium/media/gpu/windows/d3d11_picture_buffer.h
@@ -47,13 +47,18 @@ class MEDIA_GPU_EXPORT D3D11PictureBuffer
public:
// |texture_wrapper| is responsible for controlling mailbox access to
// the ID3D11Texture2D,
- // |level| is the picturebuffer index inside the Array-type ID3D11Texture2D.
+ // |array_slice| is the picturebuffer index inside the Array-type
+ // ID3D11Texture2D. |picture_index| is a unique id used to identify this
+ // picture to the decoder. If a texture array is used, then it might as well
+ // be equal to the texture array index. Otherwise, any 0-based index is
+ // probably okay, though sequential makes sense.
D3D11PictureBuffer(
scoped_refptr<base::SequencedTaskRunner> delete_task_runner,
ComD3D11Texture2D texture,
+ size_t array_slice,
std::unique_ptr<Texture2DWrapper> texture_wrapper,
gfx::Size size,
- size_t level);
+ size_t picture_index);
bool Init(scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
GetCommandBufferHelperCB get_helper_cb,
@@ -71,7 +76,7 @@ class MEDIA_GPU_EXPORT D3D11PictureBuffer
ComD3D11Texture2D Texture() const;
const gfx::Size& size() const { return size_; }
- size_t level() const { return level_; }
+ size_t picture_index() const { return picture_index_; }
// Is this PictureBuffer backing a VideoFrame right now?
bool in_client_use() const { return in_client_use_; }
@@ -97,11 +102,13 @@ class MEDIA_GPU_EXPORT D3D11PictureBuffer
friend class base::DeleteHelper<D3D11PictureBuffer>;
ComD3D11Texture2D texture_;
+ uint32_t array_slice_;
+
std::unique_ptr<Texture2DWrapper> texture_wrapper_;
gfx::Size size_;
bool in_picture_use_ = false;
bool in_client_use_ = false;
- size_t level_;
+ size_t picture_index_;
ComD3D11VideoDecoderOutputView output_view_;
diff --git a/chromium/media/gpu/windows/d3d11_texture_selector.cc b/chromium/media/gpu/windows/d3d11_texture_selector.cc
index b3f0c78377d..dd3b88544ac 100644
--- a/chromium/media/gpu/windows/d3d11_texture_selector.cc
+++ b/chromium/media/gpu/windows/d3d11_texture_selector.cc
@@ -152,7 +152,7 @@ std::unique_ptr<TextureSelector> TextureSelector::Create(
// If we're trying to produce an output texture that's different from what
// the decoder is providing, then we need to copy it.
- needs_texture_copy = (decoder_output_format != output_dxgi_format);
+ needs_texture_copy |= (decoder_output_format != output_dxgi_format);
// Force texture copy on if requested for debugging.
if (base::FeatureList::IsEnabled(kD3D11VideoDecoderAlwaysCopy))
diff --git a/chromium/media/gpu/windows/d3d11_texture_wrapper.cc b/chromium/media/gpu/windows/d3d11_texture_wrapper.cc
index ab7ea22a87f..58f36986b9b 100644
--- a/chromium/media/gpu/windows/d3d11_texture_wrapper.cc
+++ b/chromium/media/gpu/windows/d3d11_texture_wrapper.cc
@@ -74,6 +74,9 @@ bool DefaultTexture2DWrapper::ProcessTexture(
if (received_error_)
return false;
+ // Temporary check to track down https://crbug.com/1077645
+ CHECK(texture);
+
// It's okay to post and forget this call, since it'll be ordered correctly
// with respect to any access on the gpu main thread.
gpu_resources_.Post(FROM_HERE, &GpuResources::PushNewTexture,
@@ -259,14 +262,24 @@ void DefaultTexture2DWrapper::GpuResources::Init(
void DefaultTexture2DWrapper::GpuResources::PushNewTexture(
ComD3D11Texture2D texture,
size_t array_slice) {
- if (!helper_ || !helper_->MakeContextCurrent()) {
- NotifyError(StatusCode::kCantMakeContextCurrent);
+ // If init didn't complete, then signal (another) error that will probably be
+ // ignored in favor of whatever we signalled earlier.
+ if (!gl_image_ || !stream_) {
+ NotifyError(StatusCode::kDecoderInitializeNeverCompleted);
return;
}
- // Notify |gl_image_| that it has a new texture.
+ // Notify |gl_image_| that it has a new texture. Do this unconditionally, so
+ // hat we can guarantee that the image isn't null. Nobody expects it to be,
+ // and failures will be noticed only asynchronously.
+ // https://crbug.com/1077645
gl_image_->SetTexture(texture, array_slice);
+ if (!helper_ || !helper_->MakeContextCurrent()) {
+ NotifyError(StatusCode::kCantMakeContextCurrent);
+ return;
+ }
+
// Notify angle that it has a new texture.
EGLAttrib frame_attributes[] = {
EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE,
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.cc b/chromium/media/gpu/windows/d3d11_video_decoder.cc
index 3ba6d9b3225..a98753cb255 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.cc
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.cc
@@ -9,6 +9,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/debug/crash_logging.h"
#include "base/debug/dump_without_crashing.h"
@@ -26,6 +27,7 @@
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
+#include "media/base/win/hresult_status_helper.h"
#include "media/gpu/windows/d3d11_picture_buffer.h"
#include "media/gpu/windows/d3d11_video_context_wrapper.h"
#include "media/gpu/windows/d3d11_video_decoder_impl.h"
@@ -166,23 +168,125 @@ HRESULT D3D11VideoDecoder::InitializeAcceleratedDecoder(
profile_ = config.profile();
if (config.codec() == kCodecVP9) {
accelerated_video_decoder_ = std::make_unique<VP9Decoder>(
- std::make_unique<D3D11VP9Accelerator>(this, media_log_.get(),
- video_decoder, video_device_,
- std::move(video_context)),
+ std::make_unique<D3D11VP9Accelerator>(
+ this, media_log_.get(), video_device_, std::move(video_context)),
profile_, config.color_space_info());
- return hr;
- }
-
- if (config.codec() == kCodecH264) {
+ } else if (config.codec() == kCodecH264) {
accelerated_video_decoder_ = std::make_unique<H264Decoder>(
- std::make_unique<D3D11H264Accelerator>(this, media_log_.get(),
- video_decoder, video_device_,
- std::move(video_context)),
+ std::make_unique<D3D11H264Accelerator>(
+ this, media_log_.get(), video_device_, std::move(video_context)),
profile_, config.color_space_info());
- return hr;
+ } else {
+ return E_FAIL;
}
- return E_FAIL;
+ // Provide the initial video decoder object.
+ DCHECK(set_accelerator_decoder_cb_);
+ set_accelerator_decoder_cb_.Run(std::move(video_decoder));
+
+ return hr;
+}
+
+ErrorOr<std::tuple<ComD3D11VideoDecoder>>
+D3D11VideoDecoder::CreateD3D11Decoder() {
+ HRESULT hr;
+
+ // TODO: supported check?
+
+ decoder_configurator_ = D3D11DecoderConfigurator::Create(
+ gpu_preferences_, gpu_workarounds_, config_, media_log_.get());
+ if (!decoder_configurator_)
+ return StatusCode::kDecoderUnsupportedProfile;
+
+ if (!decoder_configurator_->SupportsDevice(video_device_))
+ return StatusCode::kDecoderUnsupportedCodec;
+
+ FormatSupportChecker format_checker(device_);
+ if (!format_checker.Initialize()) {
+ // Don't fail; it'll just return no support a lot.
+ MEDIA_LOG(WARNING, media_log_)
+ << "Could not create format checker, continuing";
+ }
+
+ // Use IsHDRSupported to guess whether the compositor can output HDR textures.
+ // See TextureSelector for notes about why the decoder should not care.
+ texture_selector_ = TextureSelector::Create(
+ gpu_preferences_, gpu_workarounds_,
+ decoder_configurator_->TextureFormat(),
+ is_hdr_supported_ ? TextureSelector::HDRMode::kSDROrHDR
+ : TextureSelector::HDRMode::kSDROnly,
+ &format_checker, media_log_.get());
+ if (!texture_selector_)
+ return StatusCode::kCannotCreateTextureSelector;
+
+ UINT config_count = 0;
+ hr = video_device_->GetVideoDecoderConfigCount(
+ decoder_configurator_->DecoderDescriptor(), &config_count);
+
+ if (FAILED(hr)) {
+ return Status(StatusCode::kCannotGetDecoderConfigCount)
+ .AddCause(HresultToStatus(hr));
+ }
+
+ if (config_count == 0)
+ return Status(StatusCode::kCannotGetDecoderConfigCount);
+
+ D3D11_VIDEO_DECODER_CONFIG dec_config = {};
+ bool found = false;
+
+ for (UINT i = 0; i < config_count; i++) {
+ hr = video_device_->GetVideoDecoderConfig(
+ decoder_configurator_->DecoderDescriptor(), i, &dec_config);
+ if (FAILED(hr)) {
+ return Status(StatusCode::kCannotGetDecoderConfig)
+ .AddCause(HresultToStatus(hr));
+ }
+
+ if (config_.codec() == kCodecVP9 && dec_config.ConfigBitstreamRaw == 1) {
+ // DXVA VP9 specification mentions ConfigBitstreamRaw "shall be 1".
+ found = true;
+ break;
+ }
+
+ if (config_.codec() == kCodecH264 && dec_config.ConfigBitstreamRaw == 2) {
+ // ConfigBitstreamRaw == 2 means the decoder uses DXVA_Slice_H264_Short.
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return StatusCode::kDecoderUnsupportedConfig;
+
+ // Prefer whatever the config tells us about whether to use one Texture2D with
+ // multiple array slices, or multiple Texture2Ds with one slice each. If bit
+ // 14 is clear, then it's the former, else it's the latter.
+ //
+ // Let the workaround override array texture mode, if enabled.
+ //
+ // For more information, please see:
+ // https://download.microsoft.com/download/9/2/A/92A4E198-67E0-4ABD-9DB7-635D711C2752/DXVA_VPx.pdf
+ // https://download.microsoft.com/download/5/f/c/5fc4ec5c-bd8c-4624-8034-319c1bab7671/DXVA_H264.pdf
+ use_single_video_decoder_texture_ =
+ !!(dec_config.ConfigDecoderSpecific & (1 << 14)) ||
+ gpu_workarounds_.use_single_video_decoder_texture;
+ if (use_single_video_decoder_texture_)
+ MEDIA_LOG(INFO, media_log_) << "D3D11VideoDecoder is using single textures";
+ else
+ MEDIA_LOG(INFO, media_log_) << "D3D11VideoDecoder is using array texture";
+
+ Microsoft::WRL::ComPtr<ID3D11VideoDecoder> video_decoder;
+ hr = video_device_->CreateVideoDecoder(
+ decoder_configurator_->DecoderDescriptor(), &dec_config, &video_decoder);
+
+ if (!video_decoder.Get())
+ return Status(StatusCode::kDecoderFailedCreation);
+
+ if (FAILED(hr)) {
+ return Status(StatusCode::kDecoderFailedCreation)
+ .AddCause(HresultToStatus(hr));
+ }
+
+ return {std::move(video_decoder)};
}
void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
@@ -257,57 +361,20 @@ void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
- device_->GetImmediateContext(device_context_.ReleaseAndGetAddressOf());
+ device_->GetImmediateContext(&device_context_);
HRESULT hr;
// TODO(liberato): Handle cleanup better. Also consider being less chatty in
// the logs, since this will fall back.
- hr = device_.CopyTo(video_device_.ReleaseAndGetAddressOf());
- if (!SUCCEEDED(hr)) {
- NotifyError("Failed to get video device");
- return;
- }
-
- decoder_configurator_ = D3D11DecoderConfigurator::Create(
- gpu_preferences_, gpu_workarounds_, config, media_log_.get());
- if (!decoder_configurator_) {
- NotifyError("D3DD11: Config provided unsupported profile");
- return;
- }
-
- if (!decoder_configurator_->SupportsDevice(video_device_)) {
- NotifyError("D3D11: Device does not support decoder GUID");
- return;
- }
-
- FormatSupportChecker format_checker(device_);
- if (!format_checker.Initialize()) {
- // Don't fail; it'll just return no support a lot.
- MEDIA_LOG(WARNING, media_log_)
- << "Could not create format checker, continuing";
- }
-
- // Use IsHDRSupported to guess whether the compositor can output HDR textures.
- // See TextureSelector for notes about why the decoder should not care.
- texture_selector_ = TextureSelector::Create(
- gpu_preferences_, gpu_workarounds_,
- decoder_configurator_->TextureFormat(),
- is_hdr_supported_ ? TextureSelector::HDRMode::kSDROrHDR
- : TextureSelector::HDRMode::kSDROnly,
- &format_checker, media_log_.get());
- if (!texture_selector_) {
- NotifyError("D3DD11: Cannot get TextureSelector for format");
- return;
- }
-
// TODO(liberato): dxva does this. don't know if we need to.
if (!base::FeatureList::IsEnabled(kD3D11VideoDecoderSkipMultithreaded)) {
ComD3D11Multithread multi_threaded;
hr = device_->QueryInterface(IID_PPV_ARGS(&multi_threaded));
if (!SUCCEEDED(hr)) {
- NotifyError("Failed to query ID3D11Multithread");
+ NotifyError(Status(StatusCode::kCannotQueryID3D11Multithread)
+ .AddCause(HresultToStatus(hr)));
return;
}
// TODO(liberato): This is a hack, since the unittest returns
@@ -316,51 +383,20 @@ void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
multi_threaded->SetMultithreadProtected(TRUE);
}
- UINT config_count = 0;
- hr = video_device_->GetVideoDecoderConfigCount(
- decoder_configurator_->DecoderDescriptor(), &config_count);
- if (FAILED(hr) || config_count == 0) {
- NotifyError("Failed to get video decoder config count");
- return;
- }
-
- D3D11_VIDEO_DECODER_CONFIG dec_config = {};
- bool found = false;
-
- for (UINT i = 0; i < config_count; i++) {
- hr = video_device_->GetVideoDecoderConfig(
- decoder_configurator_->DecoderDescriptor(), i, &dec_config);
- if (FAILED(hr)) {
- NotifyError("Failed to get decoder config");
- return;
- }
-
- if (config.codec() == kCodecVP9 && dec_config.ConfigBitstreamRaw == 1) {
- // DXVA VP9 specification mentions ConfigBitstreamRaw "shall be 1".
- found = true;
- break;
- }
-
- if (config.codec() == kCodecH264 && dec_config.ConfigBitstreamRaw == 2) {
- // ConfigBitstreamRaw == 2 means the decoder uses DXVA_Slice_H264_Short.
- found = true;
- break;
- }
- }
- if (!found) {
- NotifyError("Failed to find decoder config");
+ hr = device_.As(&video_device_);
+ if (!SUCCEEDED(hr)) {
+ NotifyError("Failed to get video device");
return;
}
- Microsoft::WRL::ComPtr<ID3D11VideoDecoder> video_decoder;
- hr = video_device_->CreateVideoDecoder(
- decoder_configurator_->DecoderDescriptor(), &dec_config, &video_decoder);
- if (!video_decoder.Get()) {
- NotifyError("Failed to create a video decoder");
+ auto video_decoder_or_error = CreateD3D11Decoder();
+ if (video_decoder_or_error.has_error()) {
+ NotifyError(video_decoder_or_error.error());
return;
}
- hr = InitializeAcceleratedDecoder(config, video_decoder);
+ hr = InitializeAcceleratedDecoder(
+ config, std::move(std::get<0>(video_decoder_or_error.value())));
if (!SUCCEEDED(hr)) {
NotifyError("Failed to get device context");
@@ -556,13 +592,35 @@ void D3D11VideoDecoder::DoDecode() {
return;
CreatePictureBuffers();
} else if (result == media::AcceleratedVideoDecoder::kConfigChange) {
+ // TODO(liberato): I think we support this now, as long as it's the same
+ // decoder. Should update |config_| though.
if (profile_ != accelerated_video_decoder_->GetProfile()) {
// TODO(crbug.com/1022246): Handle profile change.
LOG(ERROR) << "Profile change is not supported";
NotifyError("Profile change is not supported");
return;
}
- CreatePictureBuffers();
+ // Before the first frame, we get a config change that we should ignore.
+ // We only want to take action if this is a mid-stream config change. We
+ // could wait until now to allocate the first D3D11VideoDecoder, but we
+ // don't, so that init can fail rather than decoding if there's a problem
+ // creating it. If there's a config change at the start of the stream,
+ // then this might not work.
+ if (!picture_buffers_.size())
+ continue;
+
+ // Update the config.
+ const auto new_coded_size = accelerated_video_decoder_->GetPicSize();
+ config_.set_coded_size(new_coded_size);
+ auto video_decoder_or_error = CreateD3D11Decoder();
+ if (video_decoder_or_error.has_error()) {
+ NotifyError(video_decoder_or_error.error());
+ return;
+ }
+ DCHECK(set_accelerator_decoder_cb_);
+ set_accelerator_decoder_cb_.Run(
+ std::move(std::get<0>(video_decoder_or_error.value())));
+ picture_buffers_.clear();
} else if (result == media::AcceleratedVideoDecoder::kTryAgain) {
LOG(ERROR) << "Try again is not supported";
NotifyError("Try again is not supported");
@@ -627,14 +685,6 @@ void D3D11VideoDecoder::CreatePictureBuffers() {
DCHECK(texture_selector_);
gfx::Size size = accelerated_video_decoder_->GetPicSize();
- // Create an input texture array.
- ComD3D11Texture2D in_texture =
- decoder_configurator_->CreateOutputTexture(device_, size);
- if (!in_texture) {
- NotifyError("Failed to create a Texture2D for PictureBuffers");
- return;
- }
-
HDRMetadata stream_metadata;
if (config_.hdr_metadata())
stream_metadata = *config_.hdr_metadata();
@@ -653,8 +703,24 @@ void D3D11VideoDecoder::CreatePictureBuffers() {
DCHECK(!buffer->in_picture_use());
picture_buffers_.clear();
+ ComD3D11Texture2D in_texture;
+
// Create each picture buffer.
for (size_t i = 0; i < D3D11DecoderConfigurator::BUFFER_COUNT; i++) {
+ // Create an input texture / texture array if we haven't already.
+ if (!in_texture) {
+ in_texture = decoder_configurator_->CreateOutputTexture(
+ device_, size,
+ use_single_video_decoder_texture_
+ ? 1
+ : D3D11DecoderConfigurator::BUFFER_COUNT);
+ }
+
+ if (!in_texture) {
+ NotifyError("Failed to create a Texture2D for PictureBuffers");
+ return;
+ }
+
auto tex_wrapper = texture_selector_->CreateTextureWrapper(
device_, video_device_, device_context_, size);
if (!tex_wrapper) {
@@ -662,8 +728,10 @@ void D3D11VideoDecoder::CreatePictureBuffers() {
return;
}
- picture_buffers_.push_back(new D3D11PictureBuffer(
- decoder_task_runner_, in_texture, std::move(tex_wrapper), size, i));
+ const size_t array_slice = use_single_video_decoder_texture_ ? 0 : i;
+ picture_buffers_.push_back(
+ new D3D11PictureBuffer(decoder_task_runner_, in_texture, array_slice,
+ std::move(tex_wrapper), size, i /* level */));
if (!picture_buffers_[i]->Init(
gpu_task_runner_, get_helper_cb_, video_device_,
decoder_configurator_->DecoderGuid(), media_log_->Clone())) {
@@ -671,6 +739,11 @@ void D3D11VideoDecoder::CreatePictureBuffers() {
return;
}
+ // If we're using one texture per buffer, rather than an array, then clear
+ // the ref to it so that we allocate a new one above.
+ if (use_single_video_decoder_texture_)
+ in_texture = nullptr;
+
// If we have display metadata, then tell the processor. Note that the
// order of these calls is important, and we must set the display metadata
// if we set the stream metadata, else it can crash on some AMD cards.
@@ -750,7 +823,7 @@ bool D3D11VideoDecoder::OutputResult(const CodecPicture* picture,
frame->SetReleaseMailboxCB(
base::BindOnce(release_mailbox_cb_, std::move(wait_complete_cb)));
- frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
+ frame->metadata()->power_efficient = true;
// For NV12, overlay is allowed by default. If the decoder is going to support
// non-NV12 textures, then this may have to be conditionally set. Also note
// that ALLOW_OVERLAY is required for encrypted video path.
@@ -765,28 +838,33 @@ bool D3D11VideoDecoder::OutputResult(const CodecPicture* picture,
// presenter decide if it wants to.
const bool allow_overlay =
base::FeatureList::IsEnabled(kD3D11VideoDecoderAllowOverlay);
- frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY,
- allow_overlay);
+ frame->metadata()->allow_overlay = allow_overlay;
frame->set_color_space(output_color_space);
output_cb_.Run(frame);
return true;
}
-// TODO(tmathmeyer) eventually have this take a Status and pass it through
-// to each of the callbacks.
+void D3D11VideoDecoder::SetDecoderCB(const SetAcceleratorDecoderCB& cb) {
+ set_accelerator_decoder_cb_ = cb;
+}
+
+// TODO(tmathmeyer): Please don't add new uses of this overload.
void D3D11VideoDecoder::NotifyError(const char* reason) {
+ NotifyError(Status(StatusCode::kDecoderInitializeNeverCompleted, reason));
+}
+
+void D3D11VideoDecoder::NotifyError(const Status& reason) {
TRACE_EVENT0("gpu", "D3D11VideoDecoder::NotifyError");
state_ = State::kError;
- DLOG(ERROR) << reason;
// TODO(tmathmeyer) - Remove this after plumbing Status through the
// decode_cb and input_buffer_queue cb's.
- MEDIA_LOG(ERROR, media_log_) << reason;
+ MEDIA_LOG(ERROR, media_log_)
+ << "D3D11VideoDecoder error: " << std::hex << reason.code();
if (init_cb_)
- std::move(init_cb_).Run(
- Status(StatusCode::kDecoderInitializeNeverCompleted, reason));
+ std::move(init_cb_).Run(reason);
current_buffer_ = nullptr;
if (current_decode_cb_)
@@ -876,97 +954,35 @@ D3D11VideoDecoder::GetSupportedVideoDecoderConfigs(
return {};
}
+ const auto supported_resolutions =
+ GetSupportedD3D11VideoDecoderResolutions(d3d11_device, gpu_workarounds);
+
std::vector<SupportedVideoDecoderConfig> configs;
- // VP9 has no default resolutions since it may not even be supported.
- ResolutionPair max_h264_resolutions(gfx::Size(1920, 1088), gfx::Size());
- ResolutionPair max_vp8_resolutions;
- ResolutionPair max_vp9_profile0_resolutions;
- ResolutionPair max_vp9_profile2_resolutions;
- const gfx::Size min_resolution(64, 64);
-
- GetResolutionsForDecoders(
- {D3D11_DECODER_PROFILE_H264_VLD_NOFGT}, d3d11_device, gpu_workarounds,
- &max_h264_resolutions, &max_vp8_resolutions,
- &max_vp9_profile0_resolutions, &max_vp9_profile2_resolutions);
-
- if (max_h264_resolutions.first.width() > 0) {
- // Push H264 configs, except HIGH10.
- // landscape
- configs.push_back(SupportedVideoDecoderConfig(
- H264PROFILE_MIN, // profile_min
- static_cast<VideoCodecProfile>(H264PROFILE_HIGH10PROFILE -
- 1), // profile_max
- min_resolution, // coded_size_min
- max_h264_resolutions.first, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
- configs.push_back(SupportedVideoDecoderConfig(
- static_cast<VideoCodecProfile>(H264PROFILE_HIGH10PROFILE +
- 1), // profile_min
- H264PROFILE_MAX, // profile_max
- min_resolution, // coded_size_min
- max_h264_resolutions.first, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
-
- // portrait
- configs.push_back(SupportedVideoDecoderConfig(
- H264PROFILE_MIN, // profile_min
- static_cast<VideoCodecProfile>(H264PROFILE_HIGH10PROFILE -
- 1), // profile_max
- min_resolution, // coded_size_min
- max_h264_resolutions.second, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
- configs.push_back(SupportedVideoDecoderConfig(
- static_cast<VideoCodecProfile>(H264PROFILE_HIGH10PROFILE +
- 1), // profile_min
- H264PROFILE_MAX, // profile_max
- min_resolution, // coded_size_min
- max_h264_resolutions.second, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
- }
-
- // TODO(liberato): Fill this in for VP8.
-
- if (max_vp9_profile0_resolutions.first.width()) {
- // landscape
- configs.push_back(SupportedVideoDecoderConfig(
- VP9PROFILE_PROFILE0, // profile_min
- VP9PROFILE_PROFILE0, // profile_max
- min_resolution, // coded_size_min
- max_vp9_profile0_resolutions.first, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
- // portrait
- configs.push_back(SupportedVideoDecoderConfig(
- VP9PROFILE_PROFILE0, // profile_min
- VP9PROFILE_PROFILE0, // profile_max
- min_resolution, // coded_size_min
- max_vp9_profile0_resolutions.second, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
- }
-
- if (base::FeatureList::IsEnabled(kD3D11VideoDecoderVP9Profile2)) {
- if (max_vp9_profile2_resolutions.first.width()) {
- // landscape
- configs.push_back(SupportedVideoDecoderConfig(
- VP9PROFILE_PROFILE2, // profile_min
- VP9PROFILE_PROFILE2, // profile_max
- min_resolution, // coded_size_min
- max_vp9_profile2_resolutions.first, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
- // portrait
- configs.push_back(SupportedVideoDecoderConfig(
- VP9PROFILE_PROFILE2, // profile_min
- VP9PROFILE_PROFILE2, // profile_max
- min_resolution, // coded_size_min
- max_vp9_profile2_resolutions.second, // coded_size_max
- false, // allow_encrypted
- false)); // require_encrypted
+ for (const auto& kv : supported_resolutions) {
+ const auto profile = kv.first;
+ if (profile == VP9PROFILE_PROFILE2 &&
+ !base::FeatureList::IsEnabled(kD3D11VideoDecoderVP9Profile2)) {
+ continue;
+ }
+
+ // TODO(liberato): Add VP8 and AV1 support to D3D11VideoDecoder.
+ if (profile == VP8PROFILE_ANY ||
+ (profile >= AV1PROFILE_MIN && profile <= AV1PROFILE_MAX)) {
+ continue;
+ }
+
+ const auto& resolution_range = kv.second;
+ configs.emplace_back(profile, profile, resolution_range.min_resolution,
+ resolution_range.max_landscape_resolution,
+ /*allow_encrypted=*/false,
+ /*require_encrypted=*/false);
+ if (!resolution_range.max_portrait_resolution.IsEmpty() &&
+ resolution_range.max_portrait_resolution !=
+ resolution_range.max_landscape_resolution) {
+ configs.emplace_back(profile, profile, resolution_range.min_resolution,
+ resolution_range.max_portrait_resolution,
+ /*allow_encrypted=*/false,
+ /*require_encrypted=*/false);
}
}
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.h b/chromium/media/gpu/windows/d3d11_video_decoder.h
index d9ba26ab254..70e07b81300 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.h
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.h
@@ -85,6 +85,7 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
D3D11PictureBuffer* GetPicture() override;
bool OutputResult(const CodecPicture* picture,
D3D11PictureBuffer* picture_buffer) override;
+ void SetDecoderCB(const SetAcceleratorDecoderCB&) override;
static bool GetD3D11FeatureLevel(
ComD3D11Device dev,
@@ -142,6 +143,12 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
// gpu main thread.
void CreatePictureBuffers();
+ // Create a D3D11VideoDecoder, if possible, based on the current config.
+ // TODO(liberato): we use a tuple only because ErrorOr<ComD3D111VideoDecoder>
+ // doesn't work. Something about base::Optional trying to convert to void*,
+ // but the conversion is ambiguous.
+ ErrorOr<std::tuple<ComD3D11VideoDecoder>> CreateD3D11Decoder();
+
enum class NotSupportedReason {
kVideoIsSupported = 0,
@@ -205,8 +212,10 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
};
// Enter the kError state. This will fail any pending |init_cb_| and / or
- // pending decode as well.
+ // pending decode as well. Do not add new uses of the char* overload; send a
+ // Status instead.
void NotifyError(const char* reason);
+ void NotifyError(const Status& reason);
// The implementation, which lives on the GPU main thread.
base::SequenceBound<D3D11VideoDecoderImpl> impl_;
@@ -281,7 +290,15 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
SupportedConfigs supported_configs_;
// Should we assume that we're outputting to an HDR display?
- bool is_hdr_supported_;
+ bool is_hdr_supported_ = false;
+
+ // Should we use multiple single textures for the decoder output (true) or one
+ // texture with multiple array slices (false)?
+ bool use_single_video_decoder_texture_ = false;
+
+ // Word-salad callback to set / update D3D11 Video callback to the
+ // accelerator. Needed for config changes.
+ SetAcceleratorDecoderCB set_accelerator_decoder_cb_;
base::WeakPtrFactory<D3D11VideoDecoder> weak_factory_{this};
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder_client.h b/chromium/media/gpu/windows/d3d11_video_decoder_client.h
index a80e8430c7f..0286ad41ade 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder_client.h
+++ b/chromium/media/gpu/windows/d3d11_video_decoder_client.h
@@ -5,7 +5,9 @@
#ifndef MEDIA_GPU_WINDOWS_D3D11_VIDEO_DECODER_CLIENT_H_
#define MEDIA_GPU_WINDOWS_D3D11_VIDEO_DECODER_CLIENT_H_
+#include "base/callback.h"
#include "media/base/video_color_space.h"
+#include "media/gpu/windows/d3d11_com_defs.h"
namespace media {
@@ -16,10 +18,18 @@ class D3D11PictureBuffer;
// required methods to D3D11VideoAccelerators.
class D3D11VideoDecoderClient {
public:
+ using SetAcceleratorDecoderCB =
+ base::RepeatingCallback<void(ComD3D11VideoDecoder)>;
+
virtual D3D11PictureBuffer* GetPicture() = 0;
virtual bool OutputResult(const CodecPicture* picture,
D3D11PictureBuffer* picture_buffer) = 0;
+ // Called by the accelerator to provide a callback that can be used to give
+ // the accelerator a D3D11VideoDecoder object. Must be called during
+ // construction of the accelerator.
+ virtual void SetDecoderCB(const SetAcceleratorDecoderCB&) = 0;
+
protected:
virtual ~D3D11VideoDecoderClient() = default;
};
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc b/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc
index 91908445262..bb950b7717c 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc
+++ b/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc
@@ -283,7 +283,7 @@ TEST_F(D3D11VideoDecoderTest, DoesNotSupportVP9WithLegacyGPU) {
}
TEST_F(D3D11VideoDecoderTest, DoesNotSupportVP9WithGPUWorkaroundDisableVPX) {
- gpu_workarounds_.disable_accelerated_vpx_decode = true;
+ gpu_workarounds_.disable_accelerated_vp9_decode = true;
VideoDecoderConfig configuration =
TestVideoConfig::NormalCodecProfile(kCodecVP9, VP9PROFILE_PROFILE0);
diff --git a/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc b/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
index eeec7896bde..7fe0f7f7eca 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
+++ b/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
@@ -36,17 +36,17 @@ CreateSubsampleMappingBlock(const std::vector<SubsampleEntry>& from) {
D3D11VP9Accelerator::D3D11VP9Accelerator(
D3D11VideoDecoderClient* client,
MediaLog* media_log,
- ComD3D11VideoDecoder video_decoder,
ComD3D11VideoDevice video_device,
std::unique_ptr<VideoContextWrapper> video_context)
: client_(client),
media_log_(media_log),
status_feedback_(0),
- video_decoder_(std::move(video_decoder)),
video_device_(std::move(video_device)),
video_context_(std::move(video_context)) {
DCHECK(client);
DCHECK(media_log_);
+ client->SetDecoderCB(base::BindRepeating(
+ &D3D11VP9Accelerator::SetVideoDecoder, base::Unretained(this)));
}
D3D11VP9Accelerator::~D3D11VP9Accelerator() {}
@@ -111,7 +111,7 @@ void D3D11VP9Accelerator::CopyFrameParams(const D3D11VP9Picture& pic,
pic_params->BitDepthMinus8Luma = pic_params->BitDepthMinus8Chroma =
pic.frame_hdr->bit_depth - 8;
- pic_params->CurrPic.Index7Bits = pic.level();
+ pic_params->CurrPic.Index7Bits = pic.picture_index();
pic_params->frame_type = !pic.frame_hdr->IsKeyframe();
COPY_PARAM(subsampling_x);
@@ -150,7 +150,7 @@ void D3D11VP9Accelerator::CopyReferenceFrames(
if (ref_pic) {
scoped_refptr<D3D11VP9Picture> our_ref_pic(
static_cast<D3D11VP9Picture*>(ref_pic.get()));
- pic_params->ref_frame_map[i].Index7Bits = our_ref_pic->level();
+ pic_params->ref_frame_map[i].Index7Bits = our_ref_pic->picture_index();
pic_params->ref_frame_coded_width[i] = texture_descriptor.Width;
pic_params->ref_frame_coded_height[i] = texture_descriptor.Height;
} else {
@@ -185,19 +185,16 @@ void D3D11VP9Accelerator::CopyLoopFilterParams(
// base::size(...) doesn't work well in an array initializer.
DCHECK_EQ(4lu, base::size(pic_params->ref_deltas));
- int ref_deltas[4] = {0};
for (size_t i = 0; i < base::size(pic_params->ref_deltas); i++) {
- if (loop_filter_params.update_ref_deltas[i])
- ref_deltas[i] = loop_filter_params.ref_deltas[i];
- pic_params->ref_deltas[i] = ref_deltas[i];
+ // The update_ref_deltas[i] is _only_ for parsing! it allows omission of the
+ // 6 bytes that would otherwise be needed for a new value to overwrite the
+ // global one. It has nothing to do with setting the ref_deltas here.
+ pic_params->ref_deltas[i] = loop_filter_params.ref_deltas[i];
}
- int mode_deltas[2] = {0};
DCHECK_EQ(2lu, base::size(pic_params->mode_deltas));
for (size_t i = 0; i < base::size(pic_params->mode_deltas); i++) {
- if (loop_filter_params.update_mode_deltas[i])
- mode_deltas[i] = loop_filter_params.mode_deltas[i];
- pic_params->mode_deltas[i] = mode_deltas[i];
+ pic_params->mode_deltas[i] = loop_filter_params.mode_deltas[i];
}
}
@@ -381,4 +378,8 @@ bool D3D11VP9Accelerator::GetFrameContext(scoped_refptr<VP9Picture> picture,
return false;
}
+void D3D11VP9Accelerator::SetVideoDecoder(ComD3D11VideoDecoder video_decoder) {
+ video_decoder_ = std::move(video_decoder);
+}
+
} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_vp9_accelerator.h b/chromium/media/gpu/windows/d3d11_vp9_accelerator.h
index dc262d68d26..43c2c26e595 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_accelerator.h
+++ b/chromium/media/gpu/windows/d3d11_vp9_accelerator.h
@@ -24,7 +24,6 @@ class D3D11VP9Accelerator : public VP9Decoder::VP9Accelerator {
public:
D3D11VP9Accelerator(D3D11VideoDecoderClient* client,
MediaLog* media_log,
- ComD3D11VideoDecoder video_decoder,
ComD3D11VideoDevice video_device,
std::unique_ptr<VideoContextWrapper> video_context);
~D3D11VP9Accelerator() override;
@@ -69,6 +68,8 @@ class D3D11VP9Accelerator : public VP9Decoder::VP9Accelerator {
void RecordFailure(const std::string& fail_type, const std::string& reason);
+ void SetVideoDecoder(ComD3D11VideoDecoder video_decoder);
+
D3D11VideoDecoderClient* client_;
MediaLog* const media_log_;
UINT status_feedback_;
diff --git a/chromium/media/gpu/windows/d3d11_vp9_picture.cc b/chromium/media/gpu/windows/d3d11_vp9_picture.cc
index 24ae6033294..5efa82b5be0 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_picture.cc
+++ b/chromium/media/gpu/windows/d3d11_vp9_picture.cc
@@ -7,7 +7,8 @@
namespace media {
D3D11VP9Picture::D3D11VP9Picture(D3D11PictureBuffer* picture_buffer)
- : picture_buffer_(picture_buffer), level_(picture_buffer_->level()) {
+ : picture_buffer_(picture_buffer),
+ picture_index_(picture_buffer_->picture_index()) {
picture_buffer_->set_in_picture_use(true);
}
diff --git a/chromium/media/gpu/windows/d3d11_vp9_picture.h b/chromium/media/gpu/windows/d3d11_vp9_picture.h
index 3d3bcbbb3f9..27b144402cc 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_picture.h
+++ b/chromium/media/gpu/windows/d3d11_vp9_picture.h
@@ -19,14 +19,14 @@ class D3D11VP9Picture : public VP9Picture {
D3D11PictureBuffer* picture_buffer() const { return picture_buffer_; }
- size_t level() const { return level_; }
+ size_t picture_index() const { return picture_index_; }
protected:
~D3D11VP9Picture() override;
private:
D3D11PictureBuffer* picture_buffer_;
- size_t level_;
+ size_t picture_index_;
};
} // namespace media
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
index ff451f0bb17..350458a8598 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
@@ -7,10 +7,6 @@
#include <algorithm>
#include <memory>
-#if !defined(OS_WIN)
-#error This file should only be built on Windows.
-#endif // !defined(OS_WIN)
-
#include <codecapi.h>
#include <dxgi1_2.h>
#include <ks.h>
@@ -121,13 +117,6 @@ DEFINE_GUID(MF_XVP_PLAYBACK_MODE,
0xcc,
0xe9);
-// Defines the GUID for the Intel H264 DXVA device.
-static const GUID DXVA2_Intel_ModeH264_E = {
- 0x604F8E68,
- 0x4951,
- 0x4c54,
- {0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6}};
-
static const CLSID CLSID_CAV1DecoderMFT = {
0xC843981A,
0x3359,
@@ -184,7 +173,7 @@ HRESULT g_last_device_removed_reason;
namespace media {
-static const VideoCodecProfile kSupportedProfiles[] = {
+constexpr VideoCodecProfile kSupportedProfiles[] = {
H264PROFILE_BASELINE, H264PROFILE_MAIN, H264PROFILE_HIGH,
VP8PROFILE_ANY, VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE2,
AV1PROFILE_PROFILE_MAIN, AV1PROFILE_PROFILE_HIGH, AV1PROFILE_PROFILE_PRO};
@@ -606,7 +595,11 @@ DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
enable_low_latency_(gpu_preferences.enable_low_latency_dxva),
support_share_nv12_textures_(
gpu_preferences.enable_zero_copy_dxgi_video &&
- !workarounds.disable_dxgi_zero_copy_video),
+ !workarounds.disable_dxgi_zero_copy_video &&
+ /* Sharing will use an array texture, so avoid it if arrays are being
+ * worked around. https://crbug.com/971952 .
+ */
+ !workarounds.use_single_video_decoder_texture),
num_picture_buffers_requested_(support_share_nv12_textures_
? kNumPictureBuffersForZeroCopy
: kNumPictureBuffers),
@@ -619,8 +612,12 @@ DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
use_keyed_mutex_(false),
using_angle_device_(false),
using_debug_device_(false),
- enable_accelerated_vpx_decode_(
- !workarounds.disable_accelerated_vpx_decode),
+ enable_accelerated_av1_decode_(
+ !workarounds.disable_accelerated_av1_decode),
+ enable_accelerated_vp8_decode_(
+ !workarounds.disable_accelerated_vp8_decode),
+ enable_accelerated_vp9_decode_(
+ !workarounds.disable_accelerated_vp9_decode),
processing_config_changed_(false),
use_empty_video_hdr_metadata_(workarounds.use_empty_video_hdr_metadata) {
weak_ptr_ = weak_this_factory_.GetWeakPtr();
@@ -1343,84 +1340,28 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles(
}
}
- // On Windows 7 the maximum resolution supported by media foundation is
- // 1920 x 1088. We use 1088 to account for 16x16 macroblocks.
- ResolutionPair max_h264_resolutions(gfx::Size(1920, 1088), gfx::Size());
-
- // VP8/VP9 has no default resolutions since it may not even be supported.
- ResolutionPair max_vp8_resolutions;
- ResolutionPair max_vp9_profile0_resolutions;
- ResolutionPair max_vp9_profile2_resolutions;
-
- GetResolutionsForDecoders({DXVA2_ModeH264_E, DXVA2_Intel_ModeH264_E},
- gl::QueryD3D11DeviceObjectFromANGLE(), workarounds,
- &max_h264_resolutions, &max_vp8_resolutions,
- &max_vp9_profile0_resolutions,
- &max_vp9_profile2_resolutions);
-
- for (const auto& supported_profile : kSupportedProfiles) {
- const bool is_h264 = supported_profile >= H264PROFILE_MIN &&
- supported_profile <= H264PROFILE_MAX;
- const bool is_vp9 = supported_profile >= VP9PROFILE_MIN &&
- supported_profile <= VP9PROFILE_MAX;
- const bool is_vp8 = supported_profile == VP8PROFILE_ANY;
- const bool is_av1 = supported_profile >= AV1PROFILE_MIN &&
- supported_profile <= AV1PROFILE_MAX;
- DCHECK(is_h264 || is_vp9 || is_vp8 || is_av1);
-
- ResolutionPair max_resolutions;
- if (is_h264) {
- max_resolutions = max_h264_resolutions;
- } else if (supported_profile == VP9PROFILE_PROFILE0) {
- max_resolutions = max_vp9_profile0_resolutions;
- } else if (supported_profile == VP9PROFILE_PROFILE2) {
- max_resolutions = max_vp9_profile2_resolutions;
- } else if (is_vp8) {
- max_resolutions = max_vp8_resolutions;
- } else if (is_av1) {
- if (!base::FeatureList::IsEnabled(kMediaFoundationAV1Decoding))
- continue;
-
- // TODO(dalecurtis): Update GetResolutionsForDecoders() to support AV1.
- SupportedProfile profile;
- profile.profile = supported_profile;
- profile.min_resolution = gfx::Size();
- profile.max_resolution = gfx::Size(8192, 8192);
- profiles.push_back(profile);
- continue;
- }
-
- // Skip adding VPx profiles if it's not supported or disabled.
- if ((is_vp9 || is_vp8) && max_resolutions.first.IsEmpty())
- continue;
-
- // Windows Media Foundation H.264 decoding does not support decoding videos
- // with any dimension smaller than 48 pixels:
- // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
- //
- // TODO(dalecurtis): These values are too low. We should only be using
- // hardware decode for videos above ~360p, see http://crbug.com/684792.
- const gfx::Size min_resolution =
- is_h264 ? gfx::Size(48, 48) : gfx::Size(16, 16);
-
+ const auto supported_resolutions = GetSupportedD3D11VideoDecoderResolutions(
+ gl::QueryD3D11DeviceObjectFromANGLE(), workarounds);
+ for (const auto& kv : supported_resolutions) {
+ const auto& resolution_range = kv.second;
{
SupportedProfile profile;
- profile.profile = supported_profile;
- profile.min_resolution = min_resolution;
- profile.max_resolution = max_resolutions.first;
+ profile.profile = kv.first;
+ profile.min_resolution = resolution_range.min_resolution;
+ profile.max_resolution = resolution_range.max_landscape_resolution;
profiles.push_back(profile);
}
- const gfx::Size portrait_max_resolution = max_resolutions.second;
- if (!portrait_max_resolution.IsEmpty()) {
+ if (!resolution_range.max_portrait_resolution.IsEmpty() &&
+ resolution_range.max_portrait_resolution !=
+ resolution_range.max_landscape_resolution) {
SupportedProfile profile;
- profile.profile = supported_profile;
- profile.min_resolution = min_resolution;
- profile.max_resolution = portrait_max_resolution;
+ profile.profile = kv.first;
+ profile.min_resolution = resolution_range.min_resolution;
+ profile.max_resolution = resolution_range.max_portrait_resolution;
profiles.push_back(profile);
}
}
-
return profiles;
}
@@ -1475,18 +1416,21 @@ bool DXVAVideoDecodeAccelerator::InitDecoder(VideoCodecProfile profile) {
"blacklisted version of msmpeg2vdec.dll 6.1.7140", false);
codec_ = kCodecH264;
clsid = __uuidof(CMSH264DecoderMFT);
- } else if (enable_accelerated_vpx_decode_ &&
- ((profile >= VP9PROFILE_PROFILE0 &&
- profile <= VP9PROFILE_PROFILE3) ||
- profile == VP8PROFILE_ANY)) {
+ } else if ((profile >= VP9PROFILE_PROFILE0 &&
+ profile <= VP9PROFILE_PROFILE3) ||
+ profile == VP8PROFILE_ANY) {
codec_ = profile == VP8PROFILE_ANY ? kCodecVP8 : kCodecVP9;
- clsid = CLSID_MSVPxDecoder;
- decoder_dll = ::LoadLibrary(kMSVPxDecoderDLLName);
- if (decoder_dll)
- using_ms_vpx_mft_ = true;
+ if ((codec_ == kCodecVP8 && enable_accelerated_vp8_decode_) ||
+ (codec_ == kCodecVP9 && enable_accelerated_vp9_decode_)) {
+ clsid = CLSID_MSVPxDecoder;
+ decoder_dll = ::LoadLibrary(kMSVPxDecoderDLLName);
+ if (decoder_dll)
+ using_ms_vpx_mft_ = true;
+ }
}
- if (base::FeatureList::IsEnabled(kMediaFoundationAV1Decoding) &&
+ if (enable_accelerated_av1_decode_ &&
+ base::FeatureList::IsEnabled(kMediaFoundationAV1Decoding) &&
(profile >= AV1PROFILE_MIN && profile <= AV1PROFILE_MAX)) {
codec_ = kCodecAV1;
clsid = CLSID_CAV1DecoderMFT;
@@ -1512,17 +1456,15 @@ bool DXVAVideoDecodeAccelerator::InitDecoder(VideoCodecProfile profile) {
CHECK(create_dxgi_device_manager_);
if (media_log_)
MEDIA_LOG(INFO, media_log_) << "Using D3D11 device for DXVA";
- RETURN_AND_NOTIFY_ON_FAILURE(CreateDX11DevManager(),
- "Failed to initialize DX11 device and manager",
- PLATFORM_FAILURE, false);
+ RETURN_ON_FAILURE(CreateDX11DevManager(),
+ "Failed to initialize DX11 device and manager", false);
device_manager_to_use =
reinterpret_cast<ULONG_PTR>(d3d11_device_manager_.Get());
} else {
if (media_log_)
MEDIA_LOG(INFO, media_log_) << "Using D3D9 device for DXVA";
- RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(),
- "Failed to initialize D3D device and manager",
- PLATFORM_FAILURE, false);
+ RETURN_ON_FAILURE(CreateD3DDevManager(),
+ "Failed to initialize D3D device and manager", false);
device_manager_to_use = reinterpret_cast<ULONG_PTR>(device_manager_.Get());
}
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
index 4377744a8fd..21d3ec0d3fb 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
@@ -7,18 +7,12 @@
#include <d3d11_1.h>
#include <d3d9.h>
+#include <dxva2api.h>
#include <initguid.h>
+#include <mfidl.h>
#include <stdint.h>
#include <wrl/client.h>
-// Work around bug in this header by disabling the relevant warning for it.
-// https://connect.microsoft.com/VisualStudio/feedback/details/911260/dxva2api-h-in-win8-sdk-triggers-c4201-with-w4
-#pragma warning(push)
-#pragma warning(disable : 4201)
-#include <dxva2api.h>
-#pragma warning(pop)
-#include <mfidl.h>
-
#include <list>
#include <map>
#include <memory>
@@ -588,8 +582,12 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
bool using_angle_device_;
bool using_debug_device_;
- // Enables hardware acceleration for VP9 video decoding.
- const bool enable_accelerated_vpx_decode_;
+ // Enables hardware acceleration for AV1 video decoding.
+ const bool enable_accelerated_av1_decode_;
+
+ // Enables hardware acceleration for VP8/VP9 video decoding.
+ const bool enable_accelerated_vp8_decode_;
+ const bool enable_accelerated_vp9_decode_;
// The media foundation H.264 decoder has problems handling changes like
// resolution change, bitrate change etc. If we reinitialize the decoder
diff --git a/chromium/media/gpu/windows/supported_profile_helpers.cc b/chromium/media/gpu/windows/supported_profile_helpers.cc
index 5004c5799b7..7d8622286f1 100644
--- a/chromium/media/gpu/windows/supported_profile_helpers.cc
+++ b/chromium/media/gpu/windows/supported_profile_helpers.cc
@@ -9,16 +9,14 @@
#include <memory>
#include <utility>
+#include <d3d9.h>
+#include <dxva2api.h>
+
#include "base/feature_list.h"
#include "base/trace_event/trace_event.h"
#include "base/win/windows_version.h"
-#include "build/build_config.h"
-#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "media/base/media_switches.h"
-
-#if !defined(OS_WIN)
-#error This file should only be built on Windows.
-#endif // !defined(OS_WIN)
+#include "media/gpu/windows/av1_guids.h"
namespace {
@@ -26,7 +24,7 @@ namespace {
// or earlier, and don't handle resolutions higher than 1920 x 1088 well.
//
// NOTE: This list must be kept in sorted order.
-static const uint16_t kLegacyAmdGpuList[] = {
+constexpr uint16_t kLegacyAmdGpuList[] = {
0x130f, 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707,
0x6708, 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x6720, 0x6721,
0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728, 0x6729, 0x6738,
@@ -67,14 +65,18 @@ static const uint16_t kLegacyAmdGpuList[] = {
// 1920 x 1088 are supported. Updated based on crash reports.
//
// NOTE: This list must be kept in sorted order.
-static const uint16_t kLegacyIntelGpuList[] = {
+constexpr uint16_t kLegacyIntelGpuList[] = {
0x102, 0x106, 0x116, 0x126, 0x152, 0x156, 0x166,
0x402, 0x406, 0x416, 0x41e, 0xa06, 0xa16, 0xf31,
};
-} // namespace
-
-namespace media {
+// Windows Media Foundation H.264 decoding does not support decoding videos
+// with any dimension smaller than 48 pixels:
+// http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
+//
+// TODO(dalecurtis): These values are too low. We should only be using
+// hardware decode for videos above ~360p, see http://crbug.com/684792.
+constexpr gfx::Size kMinResolution(64, 64);
// Certain AMD GPU drivers like R600, R700, Evergreen and Cayman and some second
// generation Intel GPU drivers crash if we create a video device with a
@@ -157,32 +159,12 @@ bool IsResolutionSupportedForDevice(const gfx::Size& resolution_to_test,
config_count > 0;
}
-// Returns a tuple of (LandscapeMax, PortraitMax). If landscape maximum can not
-// be computed, the value of |default_max| is returned for the landscape maximum
-// and a zero size value is returned for portrait max (erring conservatively).
-ResolutionPair GetMaxResolutionsForGUIDs(
- const gfx::Size& default_max,
+media::SupportedResolutionRange GetResolutionsForGUID(
ID3D11VideoDevice* video_device,
- const std::vector<GUID>& valid_guids,
+ const GUID& decoder_guid,
const std::vector<gfx::Size>& resolutions_to_test,
- DXGI_FORMAT format) {
- ResolutionPair result(default_max, gfx::Size());
-
- // Enumerate supported video profiles and look for the profile.
- GUID decoder_guid = GUID_NULL;
- UINT profile_count = video_device->GetVideoDecoderProfileCount();
- for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) {
- GUID profile_id = {};
- if (SUCCEEDED(
- video_device->GetVideoDecoderProfile(profile_idx, &profile_id)) &&
- std::find(valid_guids.begin(), valid_guids.end(), profile_id) !=
- valid_guids.end()) {
- decoder_guid = profile_id;
- break;
- }
- }
- if (decoder_guid == GUID_NULL)
- return result;
+ DXGI_FORMAT format = DXGI_FORMAT_NV12) {
+ media::SupportedResolutionRange result;
// Verify input is in ascending order by height.
DCHECK(std::is_sorted(resolutions_to_test.begin(), resolutions_to_test.end(),
@@ -195,32 +177,54 @@ ResolutionPair GetMaxResolutionsForGUIDs(
format)) {
break;
}
- result.first = res;
+ result.max_landscape_resolution = res;
}
// The max supported portrait resolution should be just be a w/h flip of the
// max supported landscape resolution.
- gfx::Size flipped(result.first.height(), result.first.width());
- if (IsResolutionSupportedForDevice(flipped, decoder_guid, video_device,
+ const gfx::Size flipped(result.max_landscape_resolution.height(),
+ result.max_landscape_resolution.width());
+ if (flipped == result.max_landscape_resolution ||
+ IsResolutionSupportedForDevice(flipped, decoder_guid, video_device,
format)) {
- result.second = flipped;
+ result.max_portrait_resolution = flipped;
}
+ if (!result.max_landscape_resolution.IsEmpty())
+ result.min_resolution = kMinResolution;
+
return result;
}
-// TODO(tmathmeyer) refactor this so that we don'ty call
-// GetMaxResolutionsForGUIDS so many times.
-void GetResolutionsForDecoders(std::vector<GUID> h264_guids,
- ComD3D11Device device,
- const gpu::GpuDriverBugWorkarounds& workarounds,
- ResolutionPair* h264_resolutions,
- ResolutionPair* vp8_resolutions,
- ResolutionPair* vp9_0_resolutions,
- ResolutionPair* vp9_2_resolutions) {
- TRACE_EVENT0("gpu,startup", "GetResolutionsForDecoders");
+} // namespace
+
+namespace media {
+
+SupportedResolutionRangeMap GetSupportedD3D11VideoDecoderResolutions(
+ ComD3D11Device device,
+ const gpu::GpuDriverBugWorkarounds& workarounds) {
+ TRACE_EVENT0("gpu,startup", "GetSupportedD3D11VideoDecoderResolutions");
+ SupportedResolutionRangeMap supported_resolutions;
+
+ // We always insert support for H.264 regardless of the tests below. It's old
+ // enough to be ubiquitous.
+ //
+ // On Windows 7 the maximum resolution supported by media foundation is
+ // 1920 x 1088. We use 1088 to account for 16x16 macro-blocks.
+ constexpr gfx::Size kDefaultMaxH264Resolution(1920, 1088);
+ SupportedResolutionRange h264_profile;
+ h264_profile.min_resolution = kMinResolution;
+ h264_profile.max_landscape_resolution = kDefaultMaxH264Resolution;
+
+ // We don't have a way to map DXVA support to specific H.264 profiles, so just
+ // mark all the common ones with the same level of support.
+ constexpr VideoCodecProfile kSupportedH264Profiles[] = {
+ H264PROFILE_BASELINE, H264PROFILE_MAIN, H264PROFILE_HIGH};
+ for (const auto profile : kSupportedH264Profiles)
+ supported_resolutions[profile] = h264_profile;
+
if (base::win::GetVersion() <= base::win::Version::WIN7)
- return;
+ return supported_resolutions;
// To detect if a driver supports the desired resolutions, we try and create
// a DXVA decoder instance for that resolution and profile. If that succeeds
@@ -228,43 +232,99 @@ void GetResolutionsForDecoders(std::vector<GUID> h264_guids,
// Legacy AMD drivers with UVD3 or earlier and some Intel GPU's crash while
// creating surfaces larger than 1920 x 1088.
if (!device || IsLegacyGPU(device.Get()))
- return;
+ return supported_resolutions;
ComD3D11VideoDevice video_device;
if (FAILED(device.As(&video_device)))
- return;
+ return supported_resolutions;
- *h264_resolutions = GetMaxResolutionsForGUIDs(
- h264_resolutions->first, video_device.Get(), h264_guids,
- {gfx::Size(2560, 1440), gfx::Size(3840, 2160), gfx::Size(4096, 2160),
- gfx::Size(4096, 2304)});
+ const std::vector<gfx::Size> kModernResolutions = {
+ gfx::Size(4096, 2160), gfx::Size(4096, 2304), gfx::Size(7680, 4320),
+ gfx::Size(8192, 4320), gfx::Size(8192, 8192)};
- if (workarounds.disable_accelerated_vpx_decode)
- return;
+ const bool should_test_for_av1_support =
+ base::FeatureList::IsEnabled(kMediaFoundationAV1Decoding) &&
+ !workarounds.disable_accelerated_av1_decode;
- if (base::FeatureList::IsEnabled(kMediaFoundationVP8Decoding)) {
- *vp8_resolutions = GetMaxResolutionsForGUIDs(
- vp8_resolutions->first, video_device.Get(),
- {D3D11_DECODER_PROFILE_VP8_VLD},
- {gfx::Size(4096, 2160), gfx::Size(4096, 2304)});
+ // Enumerate supported video profiles and look for the known profile for each
+ // codec. We first look through the the decoder profiles so we don't run N
+ // resolution tests for a profile that's unsupported.
+ UINT profile_count = video_device->GetVideoDecoderProfileCount();
+ for (UINT i = 0; i < profile_count; i++) {
+ GUID profile_id;
+ if (FAILED(video_device->GetVideoDecoderProfile(i, &profile_id)))
+ continue;
+
+ if (profile_id == D3D11_DECODER_PROFILE_H264_VLD_NOFGT) {
+ const auto result = GetResolutionsForGUID(
+ video_device.Get(), profile_id,
+ {gfx::Size(2560, 1440), gfx::Size(3840, 2160), gfx::Size(4096, 2160),
+ gfx::Size(4096, 2304), gfx::Size(4096, 4096)});
+
+ // Unlike the other codecs, H.264 support is assumed up to 1080p, even if
+ // our initial queries fail. If they fail, we use the defaults set above.
+ if (!result.max_landscape_resolution.IsEmpty()) {
+ for (const auto profile : kSupportedH264Profiles)
+ supported_resolutions[profile] = result;
+ }
+ continue;
+ }
+
+ // Note: Each bit depth of AV1 uses a different DXGI_FORMAT, here we only
+ // test for the 8-bit one (NV12).
+ if (should_test_for_av1_support) {
+ if (profile_id == DXVA_ModeAV1_VLD_Profile0) {
+ supported_resolutions[AV1PROFILE_PROFILE_MAIN] = GetResolutionsForGUID(
+ video_device.Get(), profile_id, kModernResolutions);
+ continue;
+ }
+ if (profile_id == DXVA_ModeAV1_VLD_Profile1) {
+ supported_resolutions[AV1PROFILE_PROFILE_HIGH] = GetResolutionsForGUID(
+ video_device.Get(), profile_id, kModernResolutions);
+ continue;
+ }
+ if (profile_id == DXVA_ModeAV1_VLD_Profile2) {
+ // TODO(dalecurtis): 12-bit profile 2 support is complicated. Ideally,
+ // we should test DXVA_ModeAV1_VLD_12bit_Profile2 and
+ // DXVA_ModeAV1_VLD_12bit_Profile2_420 when the bit depth of the content
+ // is 12-bit. However we don't know the bit depth or pixel format until
+ // too late. In these cases we'll end up initializing the decoder and
+ // failing on the first decode (which will trigger software fallback).
+ supported_resolutions[AV1PROFILE_PROFILE_PRO] = GetResolutionsForGUID(
+ video_device.Get(), profile_id, kModernResolutions);
+ continue;
+ }
+ }
+
+ if (!workarounds.disable_accelerated_vp8_decode &&
+ profile_id == D3D11_DECODER_PROFILE_VP8_VLD &&
+ base::FeatureList::IsEnabled(kMediaFoundationVP8Decoding)) {
+ supported_resolutions[VP8PROFILE_ANY] =
+ GetResolutionsForGUID(video_device.Get(), profile_id,
+ {gfx::Size(4096, 2160), gfx::Size(4096, 2304),
+ gfx::Size(4096, 4096)});
+ continue;
+ }
+
+ if (workarounds.disable_accelerated_vp9_decode)
+ continue;
+
+ if (profile_id == D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0) {
+ supported_resolutions[VP9PROFILE_PROFILE0] = GetResolutionsForGUID(
+ video_device.Get(), profile_id, kModernResolutions);
+ continue;
+ }
+
+ // RS3 has issues with VP9.2 decoding. See https://crbug.com/937108.
+ if (profile_id == D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2 &&
+ base::win::GetVersion() != base::win::Version::WIN10_RS3) {
+ supported_resolutions[VP9PROFILE_PROFILE2] = GetResolutionsForGUID(
+ video_device.Get(), profile_id, kModernResolutions, DXGI_FORMAT_P010);
+ continue;
+ }
}
- *vp9_0_resolutions = GetMaxResolutionsForGUIDs(
- vp9_0_resolutions->first, video_device.Get(),
- {D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0},
- {gfx::Size(4096, 2160), gfx::Size(4096, 2304), gfx::Size(7680, 4320),
- gfx::Size(8192, 4320), gfx::Size(8192, 8192)});
-
- // RS3 has issues with VP9.2 decoding. See https://crbug.com/937108.
- if (base::win::GetVersion() == base::win::Version::WIN10_RS3)
- return;
-
- *vp9_2_resolutions = GetMaxResolutionsForGUIDs(
- vp9_2_resolutions->first, video_device.Get(),
- {D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2},
- {gfx::Size(4096, 2160), gfx::Size(4096, 2304), gfx::Size(7680, 4320),
- gfx::Size(8192, 4320), gfx::Size(8192, 8192)},
- DXGI_FORMAT_P010);
+ return supported_resolutions;
}
} // namespace media
diff --git a/chromium/media/gpu/windows/supported_profile_helpers.h b/chromium/media/gpu/windows/supported_profile_helpers.h
index 1834f0ba62a..6e521d08ff6 100644
--- a/chromium/media/gpu/windows/supported_profile_helpers.h
+++ b/chromium/media/gpu/windows/supported_profile_helpers.h
@@ -5,47 +5,35 @@
#ifndef MEDIA_GPU_WINDOWS_SUPPORTED_PROFILE_HELPERS_H_
#define MEDIA_GPU_WINDOWS_SUPPORTED_PROFILE_HELPERS_H_
-#include <d3d11_1.h>
-#include <wrl/client.h>
-#include <memory>
-#include <utility>
-#include <vector>
-
+#include "base/containers/flat_map.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "media/base/video_codecs.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/windows/d3d11_com_defs.h"
-#include "ui/gfx/geometry/rect.h"
-
+#include "ui/gfx/geometry/size.h"
namespace media {
-using ResolutionPair = std::pair<gfx::Size, gfx::Size>;
-
-bool IsLegacyGPU(ID3D11Device* device);
-
-// Returns true if a ID3D11VideoDecoder can be created for |resolution_to_test|
-// on the given |video_device|.
-bool IsResolutionSupportedForDevice(const gfx::Size& resolution_to_test,
- const GUID& decoder_guid,
- ID3D11VideoDevice* video_device,
- DXGI_FORMAT format);
-
-ResolutionPair GetMaxResolutionsForGUIDs(
- const gfx::Size& default_max,
- ID3D11VideoDevice* video_device,
- const std::vector<GUID>& valid_guids,
- const std::vector<gfx::Size>& resolutions_to_test,
- DXGI_FORMAT format = DXGI_FORMAT_NV12);
-
-// TODO(dalecurtis): This function should be changed to use return values.
+struct SupportedResolutionRange {
+ gfx::Size min_resolution;
+ gfx::Size max_landscape_resolution;
+ gfx::Size max_portrait_resolution;
+};
+
+using SupportedResolutionRangeMap =
+ base::flat_map<VideoCodecProfile, SupportedResolutionRange>;
+
+// Enumerates the extent of hardware decoding support for H.264, VP8, VP9, and
+// AV1. If a codec is supported, its minimum and maximum supported resolutions
+// are returned under the appropriate VideoCodecProfile entry.
+//
+// Notes:
+// - VP8 and AV1 are only tested if their base::Feature entries are enabled.
+// - Only baseline, main, and high H.264 profiles are supported.
MEDIA_GPU_EXPORT
-void GetResolutionsForDecoders(std::vector<GUID> h264_guids,
- ComD3D11Device device,
- const gpu::GpuDriverBugWorkarounds& workarounds,
- ResolutionPair* h264_resolutions,
- ResolutionPair* vp8_resolutions,
- ResolutionPair* vp9_0_resolutions,
- ResolutionPair* vp9_2_resolutions);
+SupportedResolutionRangeMap GetSupportedD3D11VideoDecoderResolutions(
+ ComD3D11Device device,
+ const gpu::GpuDriverBugWorkarounds& workarounds);
} // namespace media
diff --git a/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc b/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc
index 67aeb7d45b8..0d5da5b6e47 100644
--- a/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc
+++ b/chromium/media/gpu/windows/supported_profile_helpers_unittest.cc
@@ -15,6 +15,7 @@
#include "media/base/media_switches.h"
#include "media/base/test_helpers.h"
#include "media/base/win/d3d11_mocks.h"
+#include "media/gpu/windows/av1_guids.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
@@ -31,25 +32,28 @@ using ::testing::WithArgs;
return; \
} while (0)
-HRESULT SetIfSizeLessThan(D3D11_VIDEO_DECODER_DESC* desc, UINT* count) {
- *count = 1;
- return S_OK;
-}
+namespace {
+
+using PciId = std::pair<uint16_t, uint16_t>;
+constexpr PciId kLegacyIntelGpu = {0x8086, 0x102};
+constexpr PciId kRecentIntelGpu = {0x8086, 0x100};
+constexpr PciId kLegacyAmdGpu = {0x1022, 0x130f};
+constexpr PciId kRecentAmdGpu = {0x1022, 0x130e};
+
+constexpr gfx::Size kMinResolution(64, 64);
+constexpr gfx::Size kFullHd(1920, 1088);
+constexpr gfx::Size kSquare4k(4096, 4096);
+constexpr gfx::Size kSquare8k(8192, 8192);
+
+} // namespace
namespace media {
+constexpr VideoCodecProfile kSupportedH264Profiles[] = {
+ H264PROFILE_BASELINE, H264PROFILE_MAIN, H264PROFILE_HIGH};
+
class SupportedResolutionResolverTest : public ::testing::Test {
public:
- const std::pair<uint16_t, uint16_t> LegacyIntelGPU = {0x8086, 0x102};
- const std::pair<uint16_t, uint16_t> RecentIntelGPU = {0x8086, 0x100};
- const std::pair<uint16_t, uint16_t> LegacyAMDGPU = {0x1022, 0x130f};
- const std::pair<uint16_t, uint16_t> RecentAMDGPU = {0x1022, 0x130e};
-
- const ResolutionPair ten_eighty = {{1920, 1080}, {1080, 1920}};
- const ResolutionPair zero = {{0, 0}, {0, 0}};
- const ResolutionPair tall4k = {{4096, 2304}, {2304, 4096}};
- const ResolutionPair eightKsquare = {{8192, 8192}, {8192, 8192}};
-
void SetUp() override {
gpu_workarounds_.disable_dxgi_zero_copy_video = false;
mock_d3d11_device_ = CreateD3D11Mock<NiceMock<D3D11DeviceMock>>();
@@ -68,11 +72,11 @@ class SupportedResolutionResolverTest : public ::testing::Test {
ON_CALL(*mock_dxgi_device_.Get(), GetAdapter(_))
.WillByDefault(SetComPointeeAndReturnOk<0>(mock_dxgi_adapter_.Get()));
- SetGPUProfile(RecentIntelGPU);
- SetMaxResolutionForGUID(D3D11_DECODER_PROFILE_H264_VLD_NOFGT, {4096, 4096});
+ SetGpuProfile(kRecentIntelGpu);
+ SetMaxResolution(D3D11_DECODER_PROFILE_H264_VLD_NOFGT, kSquare4k);
}
- void SetMaxResolutionForGUID(const GUID& g, const gfx::Size& max_res) {
+ void SetMaxResolution(const GUID& g, const gfx::Size& max_res) {
max_size_for_guids_[g] = max_res;
ON_CALL(*mock_d3d11_video_device_.Get(), GetVideoDecoderConfigCount(_, _))
.WillByDefault(
@@ -110,7 +114,7 @@ class SupportedResolutionResolverTest : public ::testing::Test {
})));
}
- void SetGPUProfile(std::pair<uint16_t, uint16_t> vendor_and_gpu) {
+ void SetGpuProfile(std::pair<uint16_t, uint16_t> vendor_and_gpu) {
mock_adapter_desc_.DeviceId = static_cast<UINT>(vendor_and_gpu.second);
mock_adapter_desc_.VendorId = static_cast<UINT>(vendor_and_gpu.first);
@@ -119,6 +123,39 @@ class SupportedResolutionResolverTest : public ::testing::Test {
DoAll(SetArgPointee<0>(mock_adapter_desc_), Return(S_OK)));
}
+ void AssertDefaultSupport(
+ const SupportedResolutionRangeMap& supported_resolutions,
+ size_t expected_size = 3u) {
+ ASSERT_EQ(expected_size, supported_resolutions.size());
+ for (const auto profile : kSupportedH264Profiles) {
+ auto it = supported_resolutions.find(profile);
+ ASSERT_NE(it, supported_resolutions.end());
+ EXPECT_EQ(kMinResolution, it->second.min_resolution);
+ EXPECT_EQ(kFullHd, it->second.max_landscape_resolution);
+ EXPECT_EQ(gfx::Size(), it->second.max_portrait_resolution);
+ }
+ }
+
+ void TestDecoderSupport(const GUID& decoder,
+ VideoCodecProfile profile,
+ const gfx::Size& max_res = kSquare4k,
+ const gfx::Size& max_landscape_res = kSquare4k,
+ const gfx::Size& max_portrait_res = kSquare4k) {
+ EnableDecoders({decoder});
+ SetMaxResolution(decoder, max_res);
+
+ const auto supported_resolutions = GetSupportedD3D11VideoDecoderResolutions(
+ mock_d3d11_device_, gpu_workarounds_);
+ AssertDefaultSupport(supported_resolutions,
+ base::size(kSupportedH264Profiles) + 1);
+
+ auto it = supported_resolutions.find(profile);
+ ASSERT_NE(it, supported_resolutions.end());
+ EXPECT_EQ(kMinResolution, it->second.min_resolution);
+ EXPECT_EQ(max_landscape_res, it->second.max_landscape_resolution);
+ EXPECT_EQ(max_portrait_res, it->second.max_portrait_resolution);
+ }
+
Microsoft::WRL::ComPtr<D3D11DeviceMock> mock_d3d11_device_;
Microsoft::WRL::ComPtr<DXGIAdapterMock> mock_dxgi_adapter_;
Microsoft::WRL::ComPtr<DXGIDeviceMock> mock_dxgi_device_;
@@ -131,144 +168,128 @@ class SupportedResolutionResolverTest : public ::testing::Test {
return memcmp(&a, &b, sizeof(GUID)) < 0;
}
};
- std::map<GUID, gfx::Size, GUIDComparison> max_size_for_guids_;
+ base::flat_map<GUID, gfx::Size, GUIDComparison> max_size_for_guids_;
};
-TEST_F(SupportedResolutionResolverTest, NoDeviceAllDefault) {
+TEST_F(SupportedResolutionResolverTest, HasH264SupportByDefault) {
DONT_RUN_ON_WIN_7();
+ AssertDefaultSupport(
+ GetSupportedD3D11VideoDecoderResolutions(nullptr, gpu_workarounds_));
- ResolutionPair h264_res_expected = {{1, 2}, {3, 4}};
- ResolutionPair h264_res = {{1, 2}, {3, 4}};
- ResolutionPair vp8_res;
- ResolutionPair vp9_0_res;
- ResolutionPair vp9_2_res;
- GetResolutionsForDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT}, nullptr,
- gpu_workarounds_, &h264_res, &vp8_res, &vp9_0_res,
- &vp9_2_res);
-
- ASSERT_EQ(h264_res, h264_res_expected);
- ASSERT_EQ(vp8_res, zero);
- ASSERT_EQ(vp9_0_res, zero);
- ASSERT_EQ(vp9_0_res, zero);
-}
-
-TEST_F(SupportedResolutionResolverTest, LegacyGPUAllDefault) {
- DONT_RUN_ON_WIN_7();
+ SetGpuProfile(kLegacyIntelGpu);
+ AssertDefaultSupport(GetSupportedD3D11VideoDecoderResolutions(
+ mock_d3d11_device_, gpu_workarounds_));
- SetGPUProfile(LegacyIntelGPU);
-
- ResolutionPair h264_res_expected = {{1, 2}, {3, 4}};
- ResolutionPair h264_res = {{1, 2}, {3, 4}};
- ResolutionPair vp8_res;
- ResolutionPair vp9_0_res;
- ResolutionPair vp9_2_res;
- GetResolutionsForDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT},
- mock_d3d11_device_, gpu_workarounds_, &h264_res,
- &vp8_res, &vp9_0_res, &vp9_2_res);
-
- ASSERT_EQ(h264_res, h264_res_expected);
- ASSERT_EQ(vp8_res, zero);
- ASSERT_EQ(vp9_2_res, zero);
- ASSERT_EQ(vp9_0_res, zero);
+ SetGpuProfile(kLegacyAmdGpu);
+ AssertDefaultSupport(GetSupportedD3D11VideoDecoderResolutions(
+ mock_d3d11_device_, gpu_workarounds_));
}
TEST_F(SupportedResolutionResolverTest, WorkaroundsDisableVpx) {
DONT_RUN_ON_WIN_7();
- gpu_workarounds_.disable_dxgi_zero_copy_video = true;
- EnableDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT});
+ gpu_workarounds_.disable_accelerated_vp8_decode = true;
+ gpu_workarounds_.disable_accelerated_vp9_decode = true;
+ EnableDecoders({D3D11_DECODER_PROFILE_VP8_VLD,
+ D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0,
+ D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2});
- ResolutionPair h264_res;
- ResolutionPair vp8_res;
- ResolutionPair vp9_0_res;
- ResolutionPair vp9_2_res;
- GetResolutionsForDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT},
- mock_d3d11_device_, gpu_workarounds_, &h264_res,
- &vp8_res, &vp9_0_res, &vp9_2_res);
+ AssertDefaultSupport(GetSupportedD3D11VideoDecoderResolutions(
+ mock_d3d11_device_, gpu_workarounds_));
+}
- ASSERT_EQ(h264_res, tall4k);
+TEST_F(SupportedResolutionResolverTest, H264Supports4k) {
+ DONT_RUN_ON_WIN_7();
- ASSERT_EQ(vp8_res, zero);
- ASSERT_EQ(vp9_0_res, zero);
- ASSERT_EQ(vp9_2_res, zero);
+ EnableDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT});
+ const auto supported_resolutions = GetSupportedD3D11VideoDecoderResolutions(
+ mock_d3d11_device_, gpu_workarounds_);
+
+ ASSERT_EQ(3u, supported_resolutions.size());
+ for (const auto profile : kSupportedH264Profiles) {
+ auto it = supported_resolutions.find(profile);
+ ASSERT_NE(it, supported_resolutions.end());
+ EXPECT_EQ(kMinResolution, it->second.min_resolution);
+ EXPECT_EQ(kSquare4k, it->second.max_landscape_resolution);
+ EXPECT_EQ(kSquare4k, it->second.max_portrait_resolution);
+ }
}
-TEST_F(SupportedResolutionResolverTest, VP8_Supports4k) {
+TEST_F(SupportedResolutionResolverTest, VP8Supports4k) {
DONT_RUN_ON_WIN_7();
base::test::ScopedFeatureList scoped_feature_list;
scoped_feature_list.InitAndEnableFeature(kMediaFoundationVP8Decoding);
+ TestDecoderSupport(D3D11_DECODER_PROFILE_VP8_VLD, VP8PROFILE_ANY);
+}
- EnableDecoders(
- {D3D11_DECODER_PROFILE_H264_VLD_NOFGT, D3D11_DECODER_PROFILE_VP8_VLD});
- SetMaxResolutionForGUID(D3D11_DECODER_PROFILE_VP8_VLD, {4096, 4096});
-
- ResolutionPair h264_res;
- ResolutionPair vp8_res;
- ResolutionPair vp9_0_res;
- ResolutionPair vp9_2_res;
- GetResolutionsForDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT},
- mock_d3d11_device_, gpu_workarounds_, &h264_res,
- &vp8_res, &vp9_0_res, &vp9_2_res);
-
- ASSERT_EQ(h264_res, tall4k);
-
- ASSERT_EQ(vp8_res, tall4k);
-
- ASSERT_EQ(vp9_0_res, zero);
+TEST_F(SupportedResolutionResolverTest, VP9Profile0Supports8k) {
+ DONT_RUN_ON_WIN_7();
+ TestDecoderSupport(D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0,
+ VP9PROFILE_PROFILE0, kSquare8k, kSquare8k, kSquare8k);
+}
- ASSERT_EQ(vp9_2_res, zero);
+TEST_F(SupportedResolutionResolverTest, VP9Profile2Supports8k) {
+ DONT_RUN_ON_WIN_7();
+ TestDecoderSupport(D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2,
+ VP9PROFILE_PROFILE2, kSquare8k, kSquare8k, kSquare8k);
}
-TEST_F(SupportedResolutionResolverTest, VP9_0Supports8k) {
+TEST_F(SupportedResolutionResolverTest, MultipleCodecs) {
DONT_RUN_ON_WIN_7();
+ SetGpuProfile(kRecentAmdGpu);
+
+ // H.264 and VP9.0 are the most common supported codecs.
EnableDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT,
D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0});
- SetMaxResolutionForGUID(D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0, {8192, 8192});
-
- ResolutionPair h264_res;
- ResolutionPair vp8_res;
- ResolutionPair vp9_0_res;
- ResolutionPair vp9_2_res;
- GetResolutionsForDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT},
- mock_d3d11_device_, gpu_workarounds_, &h264_res,
- &vp8_res, &vp9_0_res, &vp9_2_res);
-
- ASSERT_EQ(h264_res, tall4k);
-
- ASSERT_EQ(vp8_res, zero);
-
- ASSERT_EQ(vp9_0_res, eightKsquare);
+ SetMaxResolution(D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0, kSquare8k);
+
+ const auto supported_resolutions = GetSupportedD3D11VideoDecoderResolutions(
+ mock_d3d11_device_, gpu_workarounds_);
+
+ ASSERT_EQ(base::size(kSupportedH264Profiles) + 1,
+ supported_resolutions.size());
+ for (const auto profile : kSupportedH264Profiles) {
+ auto it = supported_resolutions.find(profile);
+ ASSERT_NE(it, supported_resolutions.end());
+ EXPECT_EQ(kMinResolution, it->second.min_resolution);
+ EXPECT_EQ(kSquare4k, it->second.max_landscape_resolution);
+ EXPECT_EQ(kSquare4k, it->second.max_portrait_resolution);
+ }
- ASSERT_EQ(vp9_2_res, zero);
+ auto it = supported_resolutions.find(VP9PROFILE_PROFILE0);
+ ASSERT_NE(it, supported_resolutions.end());
+ EXPECT_EQ(kMinResolution, it->second.min_resolution);
+ EXPECT_EQ(kSquare8k, it->second.max_landscape_resolution);
+ EXPECT_EQ(kSquare8k, it->second.max_portrait_resolution);
}
-TEST_F(SupportedResolutionResolverTest, BothVP9ProfilesSupported) {
+TEST_F(SupportedResolutionResolverTest, AV1ProfileMainSupports8k) {
DONT_RUN_ON_WIN_7();
- EnableDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT,
- D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0,
- D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2});
- SetMaxResolutionForGUID(D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0, {8192, 8192});
- SetMaxResolutionForGUID(D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2,
- {8192, 8192});
-
- ResolutionPair h264_res;
- ResolutionPair vp8_res;
- ResolutionPair vp9_0_res;
- ResolutionPair vp9_2_res;
- GetResolutionsForDecoders({D3D11_DECODER_PROFILE_H264_VLD_NOFGT},
- mock_d3d11_device_, gpu_workarounds_, &h264_res,
- &vp8_res, &vp9_0_res, &vp9_2_res);
+ base::test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.InitAndEnableFeature(kMediaFoundationAV1Decoding);
+ TestDecoderSupport(DXVA_ModeAV1_VLD_Profile0, AV1PROFILE_PROFILE_MAIN,
+ kSquare8k, kSquare8k, kSquare8k);
+}
- ASSERT_EQ(h264_res, tall4k);
+TEST_F(SupportedResolutionResolverTest, AV1ProfileHighSupports8k) {
+ DONT_RUN_ON_WIN_7();
- ASSERT_EQ(vp8_res, zero);
+ base::test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.InitAndEnableFeature(kMediaFoundationAV1Decoding);
+ TestDecoderSupport(DXVA_ModeAV1_VLD_Profile1, AV1PROFILE_PROFILE_HIGH,
+ kSquare8k, kSquare8k, kSquare8k);
+}
- ASSERT_EQ(vp9_0_res, eightKsquare);
+TEST_F(SupportedResolutionResolverTest, AV1ProfileProSupports8k) {
+ DONT_RUN_ON_WIN_7();
- ASSERT_EQ(vp9_2_res, eightKsquare);
+ base::test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.InitAndEnableFeature(kMediaFoundationAV1Decoding);
+ TestDecoderSupport(DXVA_ModeAV1_VLD_Profile2, AV1PROFILE_PROFILE_PRO,
+ kSquare8k, kSquare8k, kSquare8k);
}
} // namespace media