diff options
Diffstat (limited to 'chromium/media/gpu/chromeos')
25 files changed, 613 insertions, 234 deletions
diff --git a/chromium/media/gpu/chromeos/BUILD.gn b/chromium/media/gpu/chromeos/BUILD.gn index a209dbf6652..ce07f94b380 100644 --- a/chromium/media/gpu/chromeos/BUILD.gn +++ b/chromium/media/gpu/chromeos/BUILD.gn @@ -149,6 +149,7 @@ source_set("unit_tests") { "mailbox_video_frame_converter_unittest.cc", "platform_video_frame_pool_unittest.cc", "platform_video_frame_utils_unittest.cc", + "video_decoder_pipeline_unittest.cc", ] } diff --git a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc index b0c1595e6b5..9e2367128f3 100644 --- a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc +++ b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.cc @@ -7,6 +7,7 @@ #include <utility> #include "base/sequenced_task_runner.h" +#include "media/base/media_log.h" #include "media/base/video_decoder.h" #include "media/gpu/buildflags.h" #include "media/gpu/chromeos/mailbox_video_frame_converter.h" @@ -18,32 +19,27 @@ #endif #if BUILDFLAG(USE_V4L2_CODEC) -#include "media/gpu/v4l2/v4l2_slice_video_decoder.h" +#include "media/gpu/v4l2/v4l2_video_decoder.h" #endif namespace media { namespace { -// Get a list of the available functions for creating VideoDeocoder. -base::queue<VideoDecoderPipeline::CreateVDFunc> GetCreateVDFunctions( - VideoDecoderPipeline::CreateVDFunc cur_create_vd_func) { - static constexpr VideoDecoderPipeline::CreateVDFunc kCreateVDFuncs[] = { +// Gets a list of the available functions for creating VideoDecoders. +VideoDecoderPipeline::CreateDecoderFunctions GetCreateDecoderFunctions() { + constexpr VideoDecoderPipeline::CreateDecoderFunction kCreateVDFuncs[] = { #if BUILDFLAG(USE_VAAPI) &VaapiVideoDecoder::Create, #endif // BUILDFLAG(USE_VAAPI) #if BUILDFLAG(USE_V4L2_CODEC) - &V4L2SliceVideoDecoder::Create, + &V4L2VideoDecoder::Create, #endif // BUILDFLAG(USE_V4L2_CODEC) }; - base::queue<VideoDecoderPipeline::CreateVDFunc> ret; - for (const auto& func : kCreateVDFuncs) { - if (func != cur_create_vd_func) - ret.push(func); - } - return ret; + return VideoDecoderPipeline::CreateDecoderFunctions( + kCreateVDFuncs, kCreateVDFuncs + base::size(kCreateVDFuncs)); } } // namespace @@ -61,7 +57,7 @@ ChromeosVideoDecoderFactory::GetSupportedConfigs() { #endif // BUILDFLAG(USE_VAAPI) #if BUILDFLAG(USE_V4L2_CODEC) - configs = V4L2SliceVideoDecoder::GetSupportedConfigs(); + configs = V4L2VideoDecoder::GetSupportedConfigs(); supported_configs.insert(supported_configs.end(), configs.begin(), configs.end()); #endif // BUILDFLAG(USE_V4L2_CODEC) @@ -74,11 +70,11 @@ std::unique_ptr<VideoDecoder> ChromeosVideoDecoderFactory::Create( scoped_refptr<base::SequencedTaskRunner> client_task_runner, std::unique_ptr<DmabufVideoFramePool> frame_pool, std::unique_ptr<VideoFrameConverter> frame_converter, - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory) { + std::unique_ptr<MediaLog> media_log) { return VideoDecoderPipeline::Create( std::move(client_task_runner), std::move(frame_pool), - std::move(frame_converter), gpu_memory_buffer_factory, - base::BindRepeating(&GetCreateVDFunctions)); + std::move(frame_converter), std::move(media_log), + base::BindRepeating(&GetCreateDecoderFunctions)); } } // namespace media diff --git a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h index 15d4e5830c9..ee61ce2a5db 100644 --- a/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h +++ b/chromium/media/gpu/chromeos/chromeos_video_decoder_factory.h @@ -15,13 +15,10 @@ namespace base { class SequencedTaskRunner; } // namespace base -namespace gpu { -class GpuMemoryBufferFactory; -} // namespace gpu - namespace media { class DmabufVideoFramePool; +class MediaLog; class VideoDecoder; class VideoFrameConverter; @@ -31,13 +28,11 @@ class MEDIA_GPU_EXPORT ChromeosVideoDecoderFactory { // Create VideoDecoder instance that allocates VideoFrame from |frame_pool| // and converts the output VideoFrame |frame_converter|. - // Note the caller is responsible for keeping |gpu_memory_buffer_factory| - // alive during the returned VideoDecoder lifetime. static std::unique_ptr<VideoDecoder> Create( scoped_refptr<base::SequencedTaskRunner> client_task_runner, std::unique_ptr<DmabufVideoFramePool> frame_pool, std::unique_ptr<VideoFrameConverter> frame_converter, - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory); + std::unique_ptr<MediaLog> media_log); }; } // namespace media diff --git a/chromium/media/gpu/chromeos/fourcc.cc b/chromium/media/gpu/chromeos/fourcc.cc index e8d514df9fa..fb179e65bb1 100644 --- a/chromium/media/gpu/chromeos/fourcc.cc +++ b/chromium/media/gpu/chromeos/fourcc.cc @@ -5,6 +5,7 @@ #include "media/gpu/chromeos/fourcc.h" #include "base/logging.h" +#include "base/notreached.h" #include "base/strings/stringprintf.h" #include "media/gpu/macros.h" @@ -42,6 +43,7 @@ base::Optional<Fourcc> Fourcc::FromUint32(uint32_t fourcc) { case YM16: case MT21: case MM21: + case P010: return Fourcc(static_cast<Value>(fourcc)); } DVLOGF(3) << "Unmapped fourcc: " << FourccToString(fourcc); @@ -74,6 +76,8 @@ base::Optional<Fourcc> Fourcc::FromVideoPixelFormat( return Fourcc(NV12); case PIXEL_FORMAT_NV21: return Fourcc(NV21); + case PIXEL_FORMAT_P016LE: + return Fourcc(P010); case PIXEL_FORMAT_UYVY: NOTREACHED(); FALLTHROUGH; @@ -92,7 +96,6 @@ base::Optional<Fourcc> Fourcc::FromVideoPixelFormat( case PIXEL_FORMAT_YUV422P12: case PIXEL_FORMAT_YUV444P12: case PIXEL_FORMAT_Y16: - case PIXEL_FORMAT_P016LE: case PIXEL_FORMAT_XR30: case PIXEL_FORMAT_XB30: case PIXEL_FORMAT_UNKNOWN: @@ -186,6 +189,8 @@ VideoPixelFormat Fourcc::ToVideoPixelFormat() const { // be mapped to PIXEL_FORMAT_NV12. case MM21: return PIXEL_FORMAT_NV12; + case P010: + return PIXEL_FORMAT_P016LE; } NOTREACHED() << "Unmapped Fourcc: " << ToString(); return PIXEL_FORMAT_UNKNOWN; @@ -230,6 +235,8 @@ base::Optional<Fourcc> Fourcc::FromVAFourCC(uint32_t va_fourcc) { return Fourcc(XR24); case VA_FOURCC_ARGB: return Fourcc(RGB4); + case VA_FOURCC_P010: + return Fourcc(P010); } DVLOGF(3) << "Unmapped VAFourCC: " << FourccToString(va_fourcc); return base::nullopt; @@ -257,6 +264,8 @@ base::Optional<uint32_t> Fourcc::ToVAFourCC() const { return VA_FOURCC_BGRX; case RGB4: return VA_FOURCC_ARGB; + case P010: + return VA_FOURCC_P010; case YM12: case YM21: case NM12: @@ -287,6 +296,7 @@ base::Optional<Fourcc> Fourcc::ToSinglePlanar() const { case YUYV: case NV12: case NV21: + case P010: return Fourcc(value_); case YM12: return Fourcc(YU12); @@ -319,6 +329,7 @@ bool Fourcc::IsMultiPlanar() const { case YUYV: case NV12: case NV21: + case P010: return false; case YM12: case YM21: diff --git a/chromium/media/gpu/chromeos/fourcc.h b/chromium/media/gpu/chromeos/fourcc.h index 85172e16d52..652f203e02a 100644 --- a/chromium/media/gpu/chromeos/fourcc.h +++ b/chromium/media/gpu/chromeos/fourcc.h @@ -108,6 +108,10 @@ class MEDIA_GPU_EXPORT Fourcc { // Maps to V4L2_PIX_FMT_MM21. // It is used for MT8183 hardware video decoder. MM21 = ComposeFourcc('M', 'M', '2', '1'), + + // Two-plane 10-bit YUV 4:2:0. Each sample is a two-byte little-endian value + // with the bottom six bits ignored. + P010 = ComposeFourcc('P', '0', '1', '0'), }; explicit Fourcc(Fourcc::Value fourcc); diff --git a/chromium/media/gpu/chromeos/fourcc_unittests.cc b/chromium/media/gpu/chromeos/fourcc_unittests.cc index ade4a4b663c..d59b317ee0b 100644 --- a/chromium/media/gpu/chromeos/fourcc_unittests.cc +++ b/chromium/media/gpu/chromeos/fourcc_unittests.cc @@ -32,11 +32,11 @@ TEST(FourccTest, V4L2PixFmtToV4L2PixFmt) { CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_ABGR32); #ifdef V4L2_PIX_FMT_RGBA32 - V4L2PixFmtIsEqual(V4L2_PIX_FMT_RGBA32); + CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_RGBA32); #endif CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_XBGR32); #ifdef V4L2_PIX_FMT_RGBX32 - V4L2PixFmtIsEqual(V4L2_PIX_FMT_RGBX32); + CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_RGBX32); #endif CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_RGB32); CheckFromV4L2PixFmtAndBack(V4L2_PIX_FMT_YUV420); @@ -133,6 +133,7 @@ TEST(FourccTest, FromVaFourCCAndBack) { CheckFromVAFourCCAndBack(VA_FOURCC_BGRA); CheckFromVAFourCCAndBack(VA_FOURCC_BGRX); CheckFromVAFourCCAndBack(VA_FOURCC_ARGB); + CheckFromVAFourCCAndBack(VA_FOURCC_P010); } TEST(FourccTest, VAFourCCToVideoPixelFormat) { @@ -154,6 +155,8 @@ TEST(FourccTest, VAFourCCToVideoPixelFormat) { Fourcc::FromVAFourCC(VA_FOURCC_BGRA)->ToVideoPixelFormat()); EXPECT_EQ(PIXEL_FORMAT_XRGB, Fourcc::FromVAFourCC(VA_FOURCC_BGRX)->ToVideoPixelFormat()); + EXPECT_EQ(PIXEL_FORMAT_P016LE, + Fourcc::FromVAFourCC(VA_FOURCC_P010)->ToVideoPixelFormat()); } TEST(FourccTest, VideoPixelFormatToVAFourCC) { @@ -175,6 +178,8 @@ TEST(FourccTest, VideoPixelFormatToVAFourCC) { *Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_ARGB)->ToVAFourCC()); EXPECT_EQ(static_cast<uint32_t>(VA_FOURCC_BGRX), *Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_XRGB)->ToVAFourCC()); + EXPECT_EQ(static_cast<uint32_t>(VA_FOURCC_P010), + *Fourcc::FromVideoPixelFormat(PIXEL_FORMAT_P016LE)->ToVAFourCC()); } #endif // BUILDFLAG(USE_VAAPI) @@ -189,6 +194,7 @@ TEST(FourccTest, FourccToSinglePlanar) { EXPECT_EQ(Fourcc(Fourcc::YUYV).ToSinglePlanar(), Fourcc(Fourcc::YUYV)); EXPECT_EQ(Fourcc(Fourcc::NV12).ToSinglePlanar(), Fourcc(Fourcc::NV12)); EXPECT_EQ(Fourcc(Fourcc::NV21).ToSinglePlanar(), Fourcc(Fourcc::NV21)); + EXPECT_EQ(Fourcc(Fourcc::P010).ToSinglePlanar(), Fourcc(Fourcc::P010)); EXPECT_EQ(Fourcc(Fourcc::YM12).ToSinglePlanar(), Fourcc(Fourcc::YU12).ToSinglePlanar()); EXPECT_EQ(Fourcc(Fourcc::YM21).ToSinglePlanar(), diff --git a/chromium/media/gpu/chromeos/image_processor.cc b/chromium/media/gpu/chromeos/image_processor.cc index cde32f09a80..c3227c88154 100644 --- a/chromium/media/gpu/chromeos/image_processor.cc +++ b/chromium/media/gpu/chromeos/image_processor.cc @@ -70,6 +70,7 @@ std::unique_ptr<ImageProcessor> ImageProcessor::Create( const PortConfig& input_config, const PortConfig& output_config, const std::vector<OutputMode>& preferred_output_modes, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> client_task_runner) { scoped_refptr<base::SequencedTaskRunner> backend_task_runner = @@ -77,9 +78,9 @@ std::unique_ptr<ImageProcessor> ImageProcessor::Create( auto wrapped_error_cb = base::BindRepeating( base::IgnoreResult(&base::SequencedTaskRunner::PostTask), client_task_runner, FROM_HERE, std::move(error_cb)); - std::unique_ptr<ImageProcessorBackend> backend = - create_backend_cb.Run(input_config, output_config, preferred_output_modes, - std::move(wrapped_error_cb), backend_task_runner); + std::unique_ptr<ImageProcessorBackend> backend = create_backend_cb.Run( + input_config, output_config, preferred_output_modes, relative_rotation, + std::move(wrapped_error_cb), backend_task_runner); if (!backend) return nullptr; diff --git a/chromium/media/gpu/chromeos/image_processor.h b/chromium/media/gpu/chromeos/image_processor.h index d0ce7acc8e3..ac62dbaf8cd 100644 --- a/chromium/media/gpu/chromeos/image_processor.h +++ b/chromium/media/gpu/chromeos/image_processor.h @@ -42,6 +42,7 @@ class MEDIA_GPU_EXPORT ImageProcessor { const PortConfig& input_config, const PortConfig& output_config, const std::vector<OutputMode>& preferred_output_modes, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner)>; @@ -50,6 +51,7 @@ class MEDIA_GPU_EXPORT ImageProcessor { const PortConfig& input_config, const PortConfig& output_config, const std::vector<OutputMode>& preferred_output_modes, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> client_task_runner); diff --git a/chromium/media/gpu/chromeos/image_processor_backend.cc b/chromium/media/gpu/chromeos/image_processor_backend.cc index 27c5a056e81..0d7924766ba 100644 --- a/chromium/media/gpu/chromeos/image_processor_backend.cc +++ b/chromium/media/gpu/chromeos/image_processor_backend.cc @@ -63,11 +63,13 @@ ImageProcessorBackend::ImageProcessorBackend( const PortConfig& input_config, const PortConfig& output_config, OutputMode output_mode, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner) : input_config_(input_config), output_config_(output_config), output_mode_(output_mode), + relative_rotation_(relative_rotation), error_cb_(error_cb), backend_task_runner_(std::move(backend_task_runner)) { DETACH_FROM_SEQUENCE(backend_sequence_checker_); diff --git a/chromium/media/gpu/chromeos/image_processor_backend.h b/chromium/media/gpu/chromeos/image_processor_backend.h index 85fcdf76f59..6b0c86f5bc8 100644 --- a/chromium/media/gpu/chromeos/image_processor_backend.h +++ b/chromium/media/gpu/chromeos/image_processor_backend.h @@ -113,6 +113,7 @@ class MEDIA_GPU_EXPORT ImageProcessorBackend { const PortConfig& input_config, const PortConfig& output_config, OutputMode output_mode, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner); virtual ~ImageProcessorBackend(); @@ -125,6 +126,10 @@ class MEDIA_GPU_EXPORT ImageProcessorBackend { // works as IMPORT mode for output. const OutputMode output_mode_; + // ImageProcessor performs a rotation if the |relative_rotation_| is not equal + // to VIDEO_ROTATION_0. + const VideoRotation relative_rotation_; + // Call this callback when any error occurs. const ErrorCB error_cb_; diff --git a/chromium/media/gpu/chromeos/image_processor_factory.cc b/chromium/media/gpu/chromeos/image_processor_factory.cc index 0daaab910f5..ccdfcf4c1ca 100644 --- a/chromium/media/gpu/chromeos/image_processor_factory.cc +++ b/chromium/media/gpu/chromeos/image_processor_factory.cc @@ -81,7 +81,8 @@ std::unique_ptr<ImageProcessor> CreateV4L2ImageProcessorWithInputCandidates( return v4l2_vda_helpers::CreateImageProcessor( input_fourcc, *output_fourcc, input_size, output_size, visible_size, - num_buffers, V4L2Device::Create(), ImageProcessor::OutputMode::IMPORT, + VideoFrame::StorageType::STORAGE_GPU_MEMORY_BUFFER, num_buffers, + V4L2Device::Create(), ImageProcessor::OutputMode::IMPORT, std::move(client_task_runner), std::move(error_cb)); } return nullptr; @@ -96,6 +97,7 @@ std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create( const ImageProcessor::PortConfig& output_config, const std::vector<ImageProcessor::OutputMode>& preferred_output_modes, size_t num_buffers, + VideoRotation relative_rotation, scoped_refptr<base::SequencedTaskRunner> client_task_runner, ImageProcessor::ErrorCB error_cb) { std::vector<ImageProcessor::CreateBackendCB> create_funcs; @@ -112,9 +114,10 @@ std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create( std::unique_ptr<ImageProcessor> image_processor; for (auto& create_func : create_funcs) { - image_processor = ImageProcessor::Create( - std::move(create_func), input_config, output_config, - preferred_output_modes, error_cb, client_task_runner); + image_processor = + ImageProcessor::Create(std::move(create_func), input_config, + output_config, preferred_output_modes, + relative_rotation, error_cb, client_task_runner); if (image_processor) return image_processor; } diff --git a/chromium/media/gpu/chromeos/image_processor_factory.h b/chromium/media/gpu/chromeos/image_processor_factory.h index 7ab5b4cf56b..a81eddde318 100644 --- a/chromium/media/gpu/chromeos/image_processor_factory.h +++ b/chromium/media/gpu/chromeos/image_processor_factory.h @@ -54,6 +54,7 @@ class MEDIA_GPU_EXPORT ImageProcessorFactory { const ImageProcessor::PortConfig& output_config, const std::vector<ImageProcessor::OutputMode>& preferred_output_modes, size_t num_buffers, + VideoRotation relative_rotation, scoped_refptr<base::SequencedTaskRunner> client_task_runner, ImageProcessor::ErrorCB error_cb); diff --git a/chromium/media/gpu/chromeos/image_processor_test.cc b/chromium/media/gpu/chromeos/image_processor_test.cc index a4192c90397..84c3535203b 100644 --- a/chromium/media/gpu/chromeos/image_processor_test.cc +++ b/chromium/media/gpu/chromeos/image_processor_test.cc @@ -76,6 +76,14 @@ const base::FilePath::CharType* kNV12Image180P = const base::FilePath::CharType* kNV12Image360PIn480P = FILE_PATH_LITERAL("puppets-640x360_in_640x480.nv12.yuv"); +// Files for rotation test. +const base::FilePath::CharType* kNV12Image90 = + FILE_PATH_LITERAL("bear_192x320_90.nv12.yuv"); +const base::FilePath::CharType* kNV12Image180 = + FILE_PATH_LITERAL("bear_320x192_180.nv12.yuv"); +const base::FilePath::CharType* kNV12Image270 = + FILE_PATH_LITERAL("bear_192x320_270.nv12.yuv"); + class ImageProcessorParamTest : public ::testing::Test, public ::testing::WithParamInterface< @@ -115,6 +123,26 @@ class ImageProcessorParamTest ImageProcessor::PortConfig output_config( output_fourcc, output_image->Size(), output_layout->planes(), output_image->VisibleRect(), output_storage_types); + int rotation = + ((output_image->Rotation() - input_image.Rotation() + 4) % 4) * 90; + VideoRotation relative_rotation = VIDEO_ROTATION_0; + switch (rotation) { + case 0: + relative_rotation = VIDEO_ROTATION_0; + break; + case 90: + relative_rotation = VIDEO_ROTATION_90; + break; + case 180: + relative_rotation = VIDEO_ROTATION_180; + break; + case 270: + relative_rotation = VIDEO_ROTATION_270; + break; + default: + NOTREACHED() << "Invalid rotation: " << rotation; + return nullptr; + } // TODO(crbug.com/917951): Select more appropriate number of buffers. constexpr size_t kNumBuffers = 1; LOG_ASSERT(output_image->IsMetadataLoaded()); @@ -156,7 +184,8 @@ class ImageProcessorParamTest } auto ip_client = test::ImageProcessorClient::Create( - input_config, output_config, kNumBuffers, std::move(frame_processors)); + input_config, output_config, kNumBuffers, relative_rotation, + std::move(frame_processors)); return ip_client; } @@ -294,6 +323,17 @@ INSTANTIATE_TEST_SUITE_P(NV12CroppingAndScaling, ::testing::Values(std::make_tuple(kNV12Image360PIn480P, kNV12Image270P))); +// Rotate frame to specified rotation. +// Now only VaapiIP maybe support rotaion. +INSTANTIATE_TEST_SUITE_P( + NV12Rotation, + ImageProcessorParamTest, + ::testing::Values(std::make_tuple(kNV12Image, kNV12Image90), + std::make_tuple(kNV12Image, kNV12Image180), + std::make_tuple(kNV12Image, kNV12Image270), + std::make_tuple(kNV12Image180, kNV12Image90), + std::make_tuple(kNV12Image180, kNV12Image))); + #if defined(OS_CHROMEOS) // TODO(hiroh): Add more tests. // MEM->DMABUF (V4L2VideoEncodeAccelerator), diff --git a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc index ab55071a330..a9de6bc113d 100644 --- a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc +++ b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.cc @@ -12,6 +12,7 @@ #include "third_party/libyuv/include/libyuv/convert.h" #include "third_party/libyuv/include/libyuv/convert_from.h" #include "third_party/libyuv/include/libyuv/convert_from_argb.h" +#include "third_party/libyuv/include/libyuv/rotate.h" #include "third_party/libyuv/include/libyuv/scale.h" namespace media { @@ -72,6 +73,57 @@ void NV12Scale(uint8_t* tmp_buffer, dst_stride_uv, dst_chroma_width, dst_chroma_height); } +// TODO(https://bugs.chromium.org/p/libyuv/issues/detail?id=840): Remove +// this once libyuv implements NV12Rotate() and use the libyuv::NV12Rotate(). +bool NV12Rotate(uint8_t* tmp_buffer, + const uint8_t* src_y, + int src_stride_y, + const uint8_t* src_uv, + int src_stride_uv, + int src_width, + int src_height, + uint8_t* dst_y, + int dst_stride_y, + uint8_t* dst_uv, + int dst_stride_uv, + int dst_width, + int dst_height, + VideoRotation relative_rotation) { + libyuv::RotationModeEnum rotation = libyuv::kRotate0; + switch (relative_rotation) { + case VIDEO_ROTATION_0: + NOTREACHED() << "Unexpected rotation: " << rotation; + return false; + case VIDEO_ROTATION_90: + rotation = libyuv::kRotate90; + break; + case VIDEO_ROTATION_180: + rotation = libyuv::kRotate180; + break; + case VIDEO_ROTATION_270: + rotation = libyuv::kRotate270; + break; + } + + // Rotating. + const int tmp_uv_width = (dst_width + 1) / 2; + const int tmp_uv_height = (dst_height + 1) / 2; + uint8_t* const tmp_u = tmp_buffer; + uint8_t* const tmp_v = tmp_u + tmp_uv_width * tmp_uv_height; + + // Rotate the NV12 planes to I420. + int ret = libyuv::NV12ToI420Rotate( + src_y, src_stride_y, src_uv, src_stride_uv, dst_y, dst_stride_y, tmp_u, + tmp_uv_width, tmp_v, tmp_uv_width, src_width, src_height, rotation); + if (ret != 0) + return false; + + // Merge the UV planes into the destination. + libyuv::MergeUVPlane(tmp_u, tmp_uv_width, tmp_v, tmp_uv_width, dst_uv, + dst_stride_uv, tmp_uv_width, tmp_uv_height); + return true; +} + enum class SupportResult { Supported, SupportedWithPivot, @@ -90,7 +142,7 @@ SupportResult IsFormatSupported(Fourcc input_fourcc, Fourcc output_fourcc) { {Fourcc::YV12, Fourcc::NV12, false}, {Fourcc::AB24, Fourcc::NV12, true}, {Fourcc::XB24, Fourcc::NV12, true}, - // Scaling. + // Scaling or Rotating. {Fourcc::NV12, Fourcc::NV12, true}, }; @@ -128,6 +180,7 @@ std::unique_ptr<ImageProcessorBackend> LibYUVImageProcessorBackend::Create( const PortConfig& input_config, const PortConfig& output_config, const std::vector<OutputMode>& preferred_output_modes, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner) { VLOGF(2); @@ -206,7 +259,8 @@ std::unique_ptr<ImageProcessorBackend> LibYUVImageProcessorBackend::Create( // used as |tmp_buffer| in NV12Scale(). // TODO(hiroh): Remove this restriction once libyuv:NV12Scale() is arrived. if (!gfx::Rect(input_config.visible_rect.size()) - .Contains(gfx::Rect(output_config.visible_rect.size()))) { + .Contains(gfx::Rect(output_config.visible_rect.size())) && + relative_rotation == VIDEO_ROTATION_0) { VLOGF(2) << "Down-scaling support only, input_config.visible_rect=" << input_config.visible_rect.ToString() << ", output_config.visible_rect=" @@ -237,7 +291,7 @@ std::unique_ptr<ImageProcessorBackend> LibYUVImageProcessorBackend::Create( PortConfig(output_config.fourcc, output_config.size, output_config.planes, output_config.visible_rect, {output_storage_type}), - OutputMode::IMPORT, std::move(error_cb), + OutputMode::IMPORT, relative_rotation, std::move(error_cb), std::move(backend_task_runner))); VLOGF(2) << "LibYUVImageProcessorBackend created for converting from " << input_config.ToString() << " to " << output_config.ToString(); @@ -251,11 +305,13 @@ LibYUVImageProcessorBackend::LibYUVImageProcessorBackend( const PortConfig& input_config, const PortConfig& output_config, OutputMode output_mode, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner) : ImageProcessorBackend(input_config, output_config, output_mode, + relative_rotation, std::move(error_cb), std::move(backend_task_runner)), input_frame_mapper_(std::move(input_frame_mapper)), @@ -353,6 +409,26 @@ int LibYUVImageProcessorBackend::DoConversion(const VideoFrame* const input, return LIBYUV_FUNC(I420ToNV12, Y_U_V_DATA(intermediate_frame_), Y_UV_DATA(output)); case PIXEL_FORMAT_NV12: + // Rotation mode. + if (relative_rotation_ != VIDEO_ROTATION_0) { + // The size of |tmp_buffer| of NV12Rotate() should be + // output_visible_rect().GetArea() / 2, which used to store temporary + // U and V planes for I420 data. Although + // |intermediate_frame_->data(0)| is much larger than the required + // size, we use the frame to simplify the code. + NV12Rotate(intermediate_frame_->data(0), + input->visible_data(VideoFrame::kYPlane), + input->stride(VideoFrame::kYPlane), + input->visible_data(VideoFrame::kUPlane), + input->stride(VideoFrame::kUPlane), + input->visible_rect().width(), + input->visible_rect().height(), Y_UV_DATA(output), + output->visible_rect().width(), + output->visible_rect().height(), relative_rotation_); + return 0; + } + + // Scaling mode. // The size of |tmp_buffer| of NV12Scale() should be // input_visible_rect().GetArea() / 2 + // output_visible_rect().GetArea() / 2. Although |intermediate_frame_| diff --git a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h index f8836096bdd..cd6562bbf82 100644 --- a/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h +++ b/chromium/media/gpu/chromeos/libyuv_image_processor_backend.h @@ -32,6 +32,7 @@ class MEDIA_GPU_EXPORT LibYUVImageProcessorBackend const PortConfig& input_config, const PortConfig& output_config, const std::vector<OutputMode>& preferred_output_modes, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner); @@ -48,6 +49,7 @@ class MEDIA_GPU_EXPORT LibYUVImageProcessorBackend const PortConfig& input_config, const PortConfig& output_config, OutputMode output_mode, + VideoRotation relative_rotation, ErrorCB error_cb, scoped_refptr<base::SequencedTaskRunner> backend_task_runner); ~LibYUVImageProcessorBackend() override; diff --git a/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc b/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc index 8bb25386dce..19c3829afa8 100644 --- a/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc +++ b/chromium/media/gpu/chromeos/mailbox_video_frame_converter.cc @@ -14,6 +14,7 @@ #include "base/trace_event/trace_event.h" #include "gpu/command_buffer/common/shared_image_usage.h" #include "gpu/command_buffer/service/scheduler.h" +#include "gpu/ipc/common/gpu_client_ids.h" #include "gpu/ipc/service/gpu_channel.h" #include "gpu/ipc/service/shared_image_stub.h" #include "media/base/format_utils.h" @@ -154,7 +155,7 @@ void MailboxVideoFrameConverter::ConvertFrame(scoped_refptr<VideoFrame> frame) { DCHECK(parent_task_runner_->RunsTasksInCurrentSequence()); DVLOGF(4); - if (!frame || !frame->HasDmaBufs()) + if (!frame || frame->storage_type() != VideoFrame::STORAGE_GPU_MEMORY_BUFFER) return OnError(FROM_HERE, "Invalid frame."); VideoFrame* origin_frame = unwrap_frame_cb_.Run(*frame); @@ -225,9 +226,9 @@ void MailboxVideoFrameConverter::WrapMailboxAndVideoFrameAndOutput( frame->format(), mailbox_holders, std::move(release_mailbox_cb), frame->coded_size(), frame->visible_rect(), frame->natural_size(), frame->timestamp()); - mailbox_frame->metadata()->MergeMetadataFrom(frame->metadata()); - mailbox_frame->metadata()->SetBoolean( - VideoFrameMetadata::READ_LOCK_FENCES_ENABLED, true); + mailbox_frame->set_color_space(frame->ColorSpace()); + mailbox_frame->set_metadata(*(frame->metadata())); + mailbox_frame->metadata()->read_lock_fences_enabled = true; output_cb_.Run(mailbox_frame); } @@ -336,7 +337,7 @@ bool MailboxVideoFrameConverter::GenerateSharedImageOnGPUThread( const uint32_t shared_image_usage = gpu::SHARED_IMAGE_USAGE_DISPLAY | gpu::SHARED_IMAGE_USAGE_SCANOUT; const bool success = shared_image_stub->CreateSharedImage( - mailbox, shared_image_stub->channel()->client_id(), + mailbox, gpu::kPlatformVideoFramePoolClientId, std::move(gpu_memory_buffer_handle), *buffer_format, gpu::kNullSurfaceHandle, destination_visible_rect.size(), video_frame->ColorSpace(), shared_image_usage); diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool.cc b/chromium/media/gpu/chromeos/platform_video_frame_pool.cc index 90a7db5ae52..eebdcb4d5b8 100644 --- a/chromium/media/gpu/chromeos/platform_video_frame_pool.cc +++ b/chromium/media/gpu/chromeos/platform_video_frame_pool.cc @@ -25,9 +25,9 @@ scoped_refptr<VideoFrame> DefaultCreateFrame( const gfx::Rect& visible_rect, const gfx::Size& natural_size, base::TimeDelta timestamp) { - return CreatePlatformVideoFrame(gpu_memory_buffer_factory, format, coded_size, - visible_rect, natural_size, timestamp, - gfx::BufferUsage::SCANOUT_VDA_WRITE); + return CreateGpuMemoryBufferVideoFrame( + gpu_memory_buffer_factory, format, coded_size, visible_rect, natural_size, + timestamp, gfx::BufferUsage::SCANOUT_VDA_WRITE); } } // namespace @@ -51,6 +51,15 @@ PlatformVideoFramePool::~PlatformVideoFramePool() { weak_this_factory_.InvalidateWeakPtrs(); } +// static +gfx::GpuMemoryBufferId PlatformVideoFramePool::GetGpuMemoryBufferId( + const VideoFrame& frame) { + DCHECK_EQ(frame.storage_type(), + VideoFrame::StorageType::STORAGE_GPU_MEMORY_BUFFER); + DCHECK(frame.GetGpuMemoryBuffer()); + return frame.GetGpuMemoryBuffer()->GetId(); +} + scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() { DCHECK(parent_task_runner_->RunsTasksInCurrentSequence()); DVLOGF(4); @@ -61,7 +70,7 @@ scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() { return nullptr; } - VideoPixelFormat format = frame_layout_->fourcc().ToVideoPixelFormat(); + const VideoPixelFormat format = frame_layout_->fourcc().ToVideoPixelFormat(); const gfx::Size& coded_size = frame_layout_->size(); if (free_frames_.empty()) { if (GetTotalNumFrames_Locked() >= max_num_frames_) @@ -88,14 +97,15 @@ scoped_refptr<VideoFrame> PlatformVideoFramePool::GetFrame() { scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame( origin_frame, format, visible_rect_, natural_size_); DCHECK(wrapped_frame); - frames_in_use_.emplace(GetDmabufId(*wrapped_frame), origin_frame.get()); + frames_in_use_.emplace(GetGpuMemoryBufferId(*wrapped_frame), + origin_frame.get()); wrapped_frame->AddDestructionObserver( base::BindOnce(&PlatformVideoFramePool::OnFrameReleasedThunk, weak_this_, parent_task_runner_, std::move(origin_frame))); // Clear all metadata before returning to client, in case origin frame has any // unrelated metadata. - wrapped_frame->metadata()->Clear(); + wrapped_frame->clear_metadata(); return wrapped_frame; } @@ -134,7 +144,8 @@ base::Optional<GpuBufferLayout> PlatformVideoFramePool::Initialize( create_frame_cb_.Run(gpu_memory_buffer_factory_, format, coded_size, visible_rect_, natural_size_, base::TimeDelta()); if (!frame) { - VLOGF(1) << "Failed to create video frame"; + VLOGF(1) << "Failed to create video frame " << format << " (fourcc " + << fourcc.ToString() << ")"; return base::nullopt; } frame_layout_ = GpuBufferLayout::Create(fourcc, frame->coded_size(), @@ -168,7 +179,7 @@ VideoFrame* PlatformVideoFramePool::UnwrapFrame( DVLOGF(4); base::AutoLock auto_lock(lock_); - auto it = frames_in_use_.find(GetDmabufId(wrapped_frame)); + auto it = frames_in_use_.find(GetGpuMemoryBufferId(wrapped_frame)); return (it == frames_in_use_.end()) ? nullptr : it->second; } @@ -203,7 +214,7 @@ void PlatformVideoFramePool::OnFrameReleased( DVLOGF(4); base::AutoLock auto_lock(lock_); - DmabufId frame_id = GetDmabufId(*origin_frame); + gfx::GpuMemoryBufferId frame_id = GetGpuMemoryBufferId(*origin_frame); auto it = frames_in_use_.find(frame_id); DCHECK(it != frames_in_use_.end()); frames_in_use_.erase(it); diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool.h b/chromium/media/gpu/chromeos/platform_video_frame_pool.h index b983f7c3393..b594d107c51 100644 --- a/chromium/media/gpu/chromeos/platform_video_frame_pool.h +++ b/chromium/media/gpu/chromeos/platform_video_frame_pool.h @@ -21,6 +21,7 @@ #include "media/base/video_types.h" #include "media/gpu/chromeos/dmabuf_video_frame_pool.h" #include "media/gpu/media_gpu_export.h" +#include "ui/gfx/gpu_memory_buffer.h" namespace gpu { class GpuMemoryBufferFactory; @@ -43,6 +44,9 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool { gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory); ~PlatformVideoFramePool() override; + // Returns the ID of the GpuMemoryBuffer wrapped by |frame|. + static gfx::GpuMemoryBufferId GetGpuMemoryBufferId(const VideoFrame& frame); + // DmabufVideoFramePool implementation. base::Optional<GpuBufferLayout> Initialize(const Fourcc& fourcc, const gfx::Size& coded_size, @@ -58,12 +62,12 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool { // recycling, and bind destruction callback at original frames. VideoFrame* UnwrapFrame(const VideoFrame& wrapped_frame); - private: - friend class PlatformVideoFramePoolTest; - // Returns the number of frames in the pool for testing purposes. size_t GetPoolSizeForTesting(); + private: + friend class PlatformVideoFramePoolTest; + // Thunk to post OnFrameReleased() to |task_runner|. // Because this thunk may be called in any thread, We don't want to // dereference WeakPtr. Therefore we wrap the WeakPtr by base::Optional to @@ -116,8 +120,9 @@ class MEDIA_GPU_EXPORT PlatformVideoFramePool : public DmabufVideoFramePool { // should be the same as |format_| and |coded_size_|. base::circular_deque<scoped_refptr<VideoFrame>> free_frames_ GUARDED_BY(lock_); - // Mapping from the unique_id of the wrapped frame to the original frame. - std::map<DmabufId, VideoFrame*> frames_in_use_ GUARDED_BY(lock_); + // Mapping from the frame's GpuMemoryBuffer's ID to the original frame. + std::map<gfx::GpuMemoryBufferId, VideoFrame*> frames_in_use_ + GUARDED_BY(lock_); // The maximum number of frames created by the pool. size_t max_num_frames_ GUARDED_BY(lock_) = 0; diff --git a/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc b/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc index 19b03688c81..ac7bb4ae5b1 100644 --- a/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc +++ b/chromium/media/gpu/chromeos/platform_video_frame_pool_unittest.cc @@ -9,44 +9,34 @@ #include <memory> #include <vector> -#include "base/files/file.h" -#include "base/files/file_path.h" -#include "base/files/file_util.h" -#include "base/files/scoped_file.h" +#include "base/bind_helpers.h" #include "base/test/task_environment.h" #include "base/threading/thread_task_runner_handle.h" +#include "gpu/command_buffer/common/mailbox_holder.h" +#include "media/base/format_utils.h" #include "media/gpu/chromeos/fourcc.h" +#include "media/video/fake_gpu_memory_buffer.h" #include "testing/gtest/include/gtest/gtest.h" namespace media { namespace { -base::ScopedFD CreateTmpHandle() { - base::FilePath path; - DCHECK(CreateTemporaryFile(&path)); - base::File file(path, base::File::FLAG_OPEN | base::File::FLAG_READ); - DCHECK(file.IsValid()); - return base::ScopedFD(file.TakePlatformFile()); -} - -scoped_refptr<VideoFrame> CreateDmabufVideoFrame( +scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame( gpu::GpuMemoryBufferFactory* factory, VideoPixelFormat format, const gfx::Size& coded_size, const gfx::Rect& visible_rect, const gfx::Size& natural_size, base::TimeDelta timestamp) { - base::Optional<VideoFrameLayout> layout = - VideoFrameLayout::Create(format, coded_size); - DCHECK(layout); - - std::vector<base::ScopedFD> dmabuf_fds; - for (size_t i = 0; i < VideoFrame::NumPlanes(format); ++i) - dmabuf_fds.push_back(CreateTmpHandle()); - - return VideoFrame::WrapExternalDmabufs(*layout, visible_rect, natural_size, - std::move(dmabuf_fds), timestamp); + base::Optional<gfx::BufferFormat> gfx_format = + VideoPixelFormatToGfxBufferFormat(format); + DCHECK(gfx_format); + const gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes] = {}; + return VideoFrame::WrapExternalGpuMemoryBuffer( + visible_rect, natural_size, + std::make_unique<FakeGpuMemoryBuffer>(coded_size, *gfx_format), + mailbox_holders, base::NullCallback(), timestamp); } } // namespace @@ -54,16 +44,14 @@ scoped_refptr<VideoFrame> CreateDmabufVideoFrame( class PlatformVideoFramePoolTest : public ::testing::TestWithParam<VideoPixelFormat> { public: - using DmabufId = DmabufVideoFramePool::DmabufId; - PlatformVideoFramePoolTest() : task_environment_(base::test::TaskEnvironment::TimeSource::MOCK_TIME), pool_(new PlatformVideoFramePool(nullptr)) { - pool_->create_frame_cb_ = base::BindRepeating(&CreateDmabufVideoFrame); + SetCreateFrameCB(base::BindRepeating(&CreateGpuMemoryBufferVideoFrame)); pool_->set_parent_task_runner(base::ThreadTaskRunnerHandle::Get()); } - void Initialize(const Fourcc& fourcc) { + bool Initialize(const Fourcc& fourcc) { constexpr gfx::Size kCodedSize(320, 240); constexpr size_t kNumFrames = 10; @@ -72,7 +60,7 @@ class PlatformVideoFramePoolTest layout_ = pool_->Initialize(fourcc, kCodedSize, visible_rect_, natural_size_, kNumFrames); - EXPECT_TRUE(layout_); + return !!layout_; } scoped_refptr<VideoFrame> GetFrame(int timestamp_ms) { @@ -88,8 +76,8 @@ class PlatformVideoFramePoolTest return frame; } - void CheckPoolSize(size_t size) const { - EXPECT_EQ(size, pool_->GetPoolSizeForTesting()); + void SetCreateFrameCB(PlatformVideoFramePool::CreateFrameCB cb) { + pool_->create_frame_cb_ = cb; } protected: @@ -103,17 +91,18 @@ class PlatformVideoFramePoolTest INSTANTIATE_TEST_SUITE_P(All, PlatformVideoFramePoolTest, - testing::Values(PIXEL_FORMAT_I420, - PIXEL_FORMAT_YV12, + testing::Values(PIXEL_FORMAT_YV12, PIXEL_FORMAT_NV12, - PIXEL_FORMAT_ARGB)); + PIXEL_FORMAT_ARGB, + PIXEL_FORMAT_P016LE)); TEST_P(PlatformVideoFramePoolTest, SingleFrameReuse) { const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam()); ASSERT_TRUE(fourcc.has_value()); - Initialize(fourcc.value()); + ASSERT_TRUE(Initialize(fourcc.value())); scoped_refptr<VideoFrame> frame = GetFrame(10); - DmabufId id = DmabufVideoFramePool::GetDmabufId(*frame); + gfx::GpuMemoryBufferId id = + PlatformVideoFramePool::GetGpuMemoryBufferId(*frame); // Clear frame reference to return the frame to the pool. frame = nullptr; @@ -121,38 +110,40 @@ TEST_P(PlatformVideoFramePoolTest, SingleFrameReuse) { // Verify that the next frame from the pool uses the same memory. scoped_refptr<VideoFrame> new_frame = GetFrame(20); - EXPECT_EQ(id, DmabufVideoFramePool::GetDmabufId(*new_frame)); + EXPECT_EQ(id, PlatformVideoFramePool::GetGpuMemoryBufferId(*new_frame)); } TEST_P(PlatformVideoFramePoolTest, MultipleFrameReuse) { const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam()); ASSERT_TRUE(fourcc.has_value()); - Initialize(fourcc.value()); + ASSERT_TRUE(Initialize(fourcc.value())); scoped_refptr<VideoFrame> frame1 = GetFrame(10); scoped_refptr<VideoFrame> frame2 = GetFrame(20); - DmabufId id1 = DmabufVideoFramePool::GetDmabufId(*frame1); - DmabufId id2 = DmabufVideoFramePool::GetDmabufId(*frame2); + gfx::GpuMemoryBufferId id1 = + PlatformVideoFramePool::GetGpuMemoryBufferId(*frame1); + gfx::GpuMemoryBufferId id2 = + PlatformVideoFramePool::GetGpuMemoryBufferId(*frame2); frame1 = nullptr; task_environment_.RunUntilIdle(); frame1 = GetFrame(30); - EXPECT_EQ(id1, DmabufVideoFramePool::GetDmabufId(*frame1)); + EXPECT_EQ(id1, PlatformVideoFramePool::GetGpuMemoryBufferId(*frame1)); frame2 = nullptr; task_environment_.RunUntilIdle(); frame2 = GetFrame(40); - EXPECT_EQ(id2, DmabufVideoFramePool::GetDmabufId(*frame2)); + EXPECT_EQ(id2, PlatformVideoFramePool::GetGpuMemoryBufferId(*frame2)); frame1 = nullptr; frame2 = nullptr; task_environment_.RunUntilIdle(); - CheckPoolSize(2u); + EXPECT_EQ(2u, pool_->GetPoolSizeForTesting()); } TEST_P(PlatformVideoFramePoolTest, InitializeWithDifferentFourcc) { const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam()); ASSERT_TRUE(fourcc.has_value()); - Initialize(fourcc.value()); + ASSERT_TRUE(Initialize(fourcc.value())); scoped_refptr<VideoFrame> frame_a = GetFrame(10); scoped_refptr<VideoFrame> frame_b = GetFrame(10); @@ -162,52 +153,68 @@ TEST_P(PlatformVideoFramePoolTest, InitializeWithDifferentFourcc) { task_environment_.RunUntilIdle(); // Verify that both frames are in the pool. - CheckPoolSize(2u); + EXPECT_EQ(2u, pool_->GetPoolSizeForTesting()); // Verify that requesting a frame with a different format causes the pool // to get drained. - const Fourcc different_fourcc(Fourcc::NV21); + const Fourcc different_fourcc(Fourcc::XR24); ASSERT_NE(fourcc, different_fourcc); - Initialize(different_fourcc); + ASSERT_TRUE(Initialize(different_fourcc)); scoped_refptr<VideoFrame> new_frame = GetFrame(10); - CheckPoolSize(0u); + EXPECT_EQ(0u, pool_->GetPoolSizeForTesting()); } TEST_P(PlatformVideoFramePoolTest, UnwrapVideoFrame) { const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam()); ASSERT_TRUE(fourcc.has_value()); - Initialize(fourcc.value()); + ASSERT_TRUE(Initialize(fourcc.value())); scoped_refptr<VideoFrame> frame_1 = GetFrame(10); scoped_refptr<VideoFrame> frame_2 = VideoFrame::WrapVideoFrame( frame_1, frame_1->format(), frame_1->visible_rect(), frame_1->natural_size()); EXPECT_EQ(pool_->UnwrapFrame(*frame_1), pool_->UnwrapFrame(*frame_2)); - EXPECT_TRUE(frame_1->IsSameDmaBufsAs(*frame_2)); + EXPECT_EQ(frame_1->GetGpuMemoryBuffer(), frame_2->GetGpuMemoryBuffer()); scoped_refptr<VideoFrame> frame_3 = GetFrame(20); EXPECT_NE(pool_->UnwrapFrame(*frame_1), pool_->UnwrapFrame(*frame_3)); - EXPECT_FALSE(frame_1->IsSameDmaBufsAs(*frame_3)); + EXPECT_NE(frame_1->GetGpuMemoryBuffer(), frame_3->GetGpuMemoryBuffer()); } TEST_P(PlatformVideoFramePoolTest, InitializeWithSameFourcc) { const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam()); ASSERT_TRUE(fourcc.has_value()); - Initialize(fourcc.value()); + ASSERT_TRUE(Initialize(fourcc.value())); scoped_refptr<VideoFrame> frame1 = GetFrame(10); - DmabufId id1 = DmabufVideoFramePool::GetDmabufId(*frame1); + gfx::GpuMemoryBufferId id1 = + PlatformVideoFramePool::GetGpuMemoryBufferId(*frame1); // Clear frame references to return the frames to the pool. frame1 = nullptr; task_environment_.RunUntilIdle(); // Request frame with the same format. The pool should not request new frames. - Initialize(fourcc.value()); + ASSERT_TRUE(Initialize(fourcc.value())); scoped_refptr<VideoFrame> frame2 = GetFrame(20); - DmabufId id2 = DmabufVideoFramePool::GetDmabufId(*frame2); + gfx::GpuMemoryBufferId id2 = + PlatformVideoFramePool::GetGpuMemoryBufferId(*frame2); EXPECT_EQ(id1, id2); } +TEST_P(PlatformVideoFramePoolTest, InitializeFail) { + const auto fourcc = Fourcc::FromVideoPixelFormat(GetParam()); + ASSERT_TRUE(fourcc.has_value()); + SetCreateFrameCB(base::BindRepeating( + [](gpu::GpuMemoryBufferFactory* factory, VideoPixelFormat format, + const gfx::Size& coded_size, const gfx::Rect& visible_rect, + const gfx::Size& natural_size, base::TimeDelta timestamp) { + auto frame = scoped_refptr<VideoFrame>(nullptr); + return frame; + })); + + EXPECT_FALSE(Initialize(fourcc.value())); +} + // TODO(akahuang): Add a testcase to verify calling Initialize() only with // different |max_num_frames|. diff --git a/chromium/media/gpu/chromeos/platform_video_frame_utils.cc b/chromium/media/gpu/chromeos/platform_video_frame_utils.cc index 9e7994040b8..ce559f9a0c0 100644 --- a/chromium/media/gpu/chromeos/platform_video_frame_utils.cc +++ b/chromium/media/gpu/chromeos/platform_video_frame_utils.cc @@ -142,8 +142,7 @@ scoped_refptr<VideoFrame> CreatePlatformVideoFrame( dmabuf_fds.emplace_back(plane.fd.release()); auto frame = VideoFrame::WrapExternalDmabufs( - *layout, visible_rect, visible_rect.size(), std::move(dmabuf_fds), - timestamp); + *layout, visible_rect, natural_size, std::move(dmabuf_fds), timestamp); if (!frame) return nullptr; @@ -174,6 +173,11 @@ gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferHandle( switch (video_frame->storage_type()) { case VideoFrame::STORAGE_GPU_MEMORY_BUFFER: handle = video_frame->GetGpuMemoryBuffer()->CloneHandle(); + // TODO(crbug.com/1097956): handle a failure gracefully. + CHECK_EQ(handle.type, gfx::NATIVE_PIXMAP) + << "The cloned handle has an unexpected type: " << handle.type; + CHECK(!handle.native_pixmap_handle.planes.empty()) + << "The cloned handle has no planes"; break; case VideoFrame::STORAGE_DMABUFS: { const size_t num_planes = VideoFrame::NumPlanes(video_frame->format()); @@ -185,10 +189,8 @@ gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferHandle( while (num_planes != duped_fds.size()) { int duped_fd = -1; duped_fd = HANDLE_EINTR(dup(duped_fds.back().get())); - if (duped_fd == -1) { - DLOG(ERROR) << "Failed duplicating dmabuf fd"; - return handle; - } + // TODO(crbug.com/1097956): handle a failure gracefully. + PCHECK(duped_fd >= 0) << "Failed duplicating a dma-buf fd"; duped_fds.emplace_back(duped_fd); } diff --git a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc index 9373a3d26d4..5dd05705614 100644 --- a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc +++ b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.cc @@ -10,6 +10,7 @@ #include "base/bind.h" #include "base/location.h" #include "base/macros.h" +#include "media/base/media_util.h" #include "media/base/video_color_space.h" #include "media/base/video_decoder_config.h" #include "media/base/video_frame.h" @@ -137,7 +138,7 @@ bool VdVideoDecodeAccelerator::Initialize(const Config& config, std::make_unique<VdaVideoFramePool>(weak_this_, client_task_runner_); vd_ = create_vd_cb_.Run(client_task_runner_, std::move(frame_pool), std::make_unique<VideoFrameConverter>(), - nullptr /* gpu_memory_buffer_factory */); + std::make_unique<NullMediaLog>()); if (!vd_) return false; @@ -385,9 +386,7 @@ base::Optional<Picture> VdVideoDecodeAccelerator::GetPicture( } int32_t picture_buffer_id = it->second; int32_t bitstream_id = FakeTimestampToBitstreamId(frame.timestamp()); - bool allow_overlay = false; - ignore_result(frame.metadata()->GetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, - &allow_overlay)); + bool allow_overlay = frame.metadata()->allow_overlay; return base::make_optional(Picture(picture_buffer_id, bitstream_id, frame.visible_rect(), frame.ColorSpace(), allow_overlay)); diff --git a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h index 9b9481ca60a..ffdb43c8eb8 100644 --- a/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h +++ b/chromium/media/gpu/chromeos/vd_video_decode_accelerator.h @@ -24,12 +24,9 @@ #include "media/gpu/media_gpu_export.h" #include "media/video/video_decode_accelerator.h" -namespace gpu { -class GpuMemoryBufferFactory; -} // namespace gpu - namespace media { +class MediaLog; class VideoFrame; // Implements the VideoDecodeAccelerator backed by a VideoDecoder. @@ -52,7 +49,7 @@ class MEDIA_GPU_EXPORT VdVideoDecodeAccelerator scoped_refptr<base::SequencedTaskRunner>, std::unique_ptr<DmabufVideoFramePool>, std::unique_ptr<VideoFrameConverter>, - gpu::GpuMemoryBufferFactory* const)>; + std::unique_ptr<MediaLog>)>; // Create VdVideoDecodeAccelerator instance, and call Initialize(). // Return nullptr if Initialize() failed. diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline.cc b/chromium/media/gpu/chromeos/video_decoder_pipeline.cc index 906861ba788..3759f9f994f 100644 --- a/chromium/media/gpu/chromeos/video_decoder_pipeline.cc +++ b/chromium/media/gpu/chromeos/video_decoder_pipeline.cc @@ -14,7 +14,9 @@ #include "base/task/task_traits.h" #include "base/task/thread_pool.h" #include "build/build_config.h" +#include "media/base/async_destroy_video_decoder.h" #include "media/base/limits.h" +#include "media/base/media_log.h" #include "media/gpu/chromeos/dmabuf_video_frame_pool.h" #include "media/gpu/chromeos/image_processor.h" #include "media/gpu/chromeos/image_processor_factory.h" @@ -54,6 +56,14 @@ base::Optional<Fourcc> PickRenderableFourcc( return base::nullopt; } +// Appends |new_status| to |parent_status| unless |parent_status| is kOk, in +// that case we cannot append, just forward |new_status| then. +Status AppendOrForwardStatus(Status parent_status, Status new_status) { + if (parent_status.is_ok()) + return new_status; + return std::move(parent_status).AddCause(std::move(new_status)); +} + } // namespace DecoderInterface::DecoderInterface( @@ -68,38 +78,36 @@ std::unique_ptr<VideoDecoder> VideoDecoderPipeline::Create( scoped_refptr<base::SequencedTaskRunner> client_task_runner, std::unique_ptr<DmabufVideoFramePool> frame_pool, std::unique_ptr<VideoFrameConverter> frame_converter, - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory, - GetCreateVDFunctionsCB get_create_vd_functions_cb) { + std::unique_ptr<MediaLog> /*media_log*/, + GetCreateDecoderFunctionsCB get_create_decoder_functions_cb) { if (!client_task_runner || !frame_pool || !frame_converter) { VLOGF(1) << "One of arguments is nullptr."; return nullptr; } - if (get_create_vd_functions_cb.Run(nullptr).empty()) { + if (get_create_decoder_functions_cb.Run().empty()) { VLOGF(1) << "No available function to create video decoder."; return nullptr; } - return base::WrapUnique<VideoDecoder>(new VideoDecoderPipeline( + auto* decoder = new VideoDecoderPipeline( std::move(client_task_runner), std::move(frame_pool), - std::move(frame_converter), gpu_memory_buffer_factory, - std::move(get_create_vd_functions_cb))); + std::move(frame_converter), std::move(get_create_decoder_functions_cb)); + return std::make_unique<AsyncDestroyVideoDecoder<VideoDecoderPipeline>>( + base::WrapUnique(decoder)); } VideoDecoderPipeline::VideoDecoderPipeline( scoped_refptr<base::SequencedTaskRunner> client_task_runner, std::unique_ptr<DmabufVideoFramePool> frame_pool, std::unique_ptr<VideoFrameConverter> frame_converter, - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory, - GetCreateVDFunctionsCB get_create_vd_functions_cb) + GetCreateDecoderFunctionsCB get_create_decoder_functions_cb) : client_task_runner_(std::move(client_task_runner)), decoder_task_runner_(base::ThreadPool::CreateSingleThreadTaskRunner( {base::WithBaseSyncPrimitives(), base::TaskPriority::USER_VISIBLE}, base::SingleThreadTaskRunnerThreadMode::DEDICATED)), main_frame_pool_(std::move(frame_pool)), - gpu_memory_buffer_factory_(gpu_memory_buffer_factory), - frame_converter_(std::move(frame_converter)), - get_create_vd_functions_cb_(std::move(get_create_vd_functions_cb)) { + frame_converter_(std::move(frame_converter)) { DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_); DETACH_FROM_SEQUENCE(decoder_sequence_checker_); DCHECK(main_frame_pool_); @@ -110,6 +118,8 @@ VideoDecoderPipeline::VideoDecoderPipeline( client_weak_this_ = client_weak_this_factory_.GetWeakPtr(); decoder_weak_this_ = decoder_weak_this_factory_.GetWeakPtr(); + remaining_create_decoder_functions_ = get_create_decoder_functions_cb.Run(); + main_frame_pool_->set_parent_task_runner(decoder_task_runner_); frame_converter_->Initialize( decoder_task_runner_, @@ -118,37 +128,30 @@ VideoDecoderPipeline::VideoDecoderPipeline( } VideoDecoderPipeline::~VideoDecoderPipeline() { - // We have to destroy |main_frame_pool_| on |decoder_task_runner_|, so the - // destructor is also called on |decoder_task_runner_|. - DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_); - DVLOGF(3); -} - -void VideoDecoderPipeline::Destroy() { - DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_); - DVLOGF(2); - - client_weak_this_factory_.InvalidateWeakPtrs(); - - decoder_task_runner_->PostTask( - FROM_HERE, - base::BindOnce(&VideoDecoderPipeline::DestroyTask, decoder_weak_this_)); -} - -void VideoDecoderPipeline::DestroyTask() { + // We have to destroy |main_frame_pool_| and |frame_converter_| on + // |decoder_task_runner_|, so the destructor must be called on + // |decoder_task_runner_|. DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_); DVLOGF(3); decoder_weak_this_factory_.InvalidateWeakPtrs(); - // The frame pool and converter should be destroyed on |decoder_task_runner_|. main_frame_pool_.reset(); frame_converter_.reset(); decoder_.reset(); - used_create_vd_func_ = nullptr; + remaining_create_decoder_functions_.clear(); +} + +void VideoDecoderPipeline::DestroyAsync( + std::unique_ptr<VideoDecoderPipeline> decoder) { + DVLOGF(2); + DCHECK(decoder); + DCHECK_CALLED_ON_VALID_SEQUENCE(decoder->client_sequence_checker_); - delete this; + decoder->client_weak_this_factory_.InvalidateWeakPtrs(); + auto* decoder_task_runner = decoder->decoder_task_runner_.get(); + decoder_task_runner->DeleteSoon(FROM_HERE, std::move(decoder)); } std::string VideoDecoderPipeline::GetDisplayName() const { @@ -182,11 +185,11 @@ bool VideoDecoderPipeline::CanReadWithoutStalling() const { } void VideoDecoderPipeline::Initialize(const VideoDecoderConfig& config, - bool low_delay, + bool /* low_delay */, CdmContext* cdm_context, InitCB init_cb, const OutputCB& output_cb, - const WaitingCB& waiting_cb) { + const WaitingCB& /* waiting_cb */) { DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_); VLOGF(2) << "config: " << config.AsHumanReadableString(); @@ -217,79 +220,76 @@ void VideoDecoderPipeline::Initialize(const VideoDecoderConfig& config, void VideoDecoderPipeline::InitializeTask(const VideoDecoderConfig& config, InitCB init_cb, const OutputCB& output_cb) { + DVLOGF(3); DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_); DCHECK(!init_cb_); client_output_cb_ = std::move(output_cb); init_cb_ = std::move(init_cb); - base::queue<VideoDecoderPipeline::CreateVDFunc> create_vd_funcs = - get_create_vd_functions_cb_.Run(used_create_vd_func_); + // Initialize() and correspondingly InitializeTask(), are called both on first + // initialization and on subsequent stream |config| changes, e.g. change of + // resolution. Subsequent initializations are marked by |decoder_| already + // existing. if (!decoder_) { - CreateAndInitializeVD(std::move(create_vd_funcs), config, - StatusCode::kChromeOSVideoDecoderNoDecoders); + CreateAndInitializeVD(config, Status()); } else { decoder_->Initialize( config, - // If it fails to re-initialize current |decoder_|, it will create - // another decoder instance by trying available VD creation functions - // again. See |OnInitializeDone| for detail. base::BindOnce(&VideoDecoderPipeline::OnInitializeDone, - decoder_weak_this_, std::move(create_vd_funcs), config, - StatusCode::kChromeOSVideoDecoderNoDecoders), + decoder_weak_this_, config, Status()), base::BindRepeating(&VideoDecoderPipeline::OnFrameDecoded, decoder_weak_this_)); } } -void VideoDecoderPipeline::CreateAndInitializeVD( - base::queue<VideoDecoderPipeline::CreateVDFunc> create_vd_funcs, - VideoDecoderConfig config, - ::media::Status parent_error) { +void VideoDecoderPipeline::CreateAndInitializeVD(VideoDecoderConfig config, + Status parent_error) { DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_); DCHECK(init_cb_); DCHECK(!decoder_); - DCHECK(!used_create_vd_func_); DVLOGF(3); - if (create_vd_funcs.empty()) { - DVLOGF(2) << "No available video decoder."; + if (remaining_create_decoder_functions_.empty()) { + DVLOGF(2) << "No remaining video decoder create functions to try"; client_task_runner_->PostTask( - FROM_HERE, base::BindOnce(std::move(init_cb_), parent_error)); + FROM_HERE, + base::BindOnce( + std::move(init_cb_), + AppendOrForwardStatus( + parent_error, StatusCode::kChromeOSVideoDecoderNoDecoders))); return; } - used_create_vd_func_ = create_vd_funcs.front(); - create_vd_funcs.pop(); - decoder_ = used_create_vd_func_(decoder_task_runner_, decoder_weak_this_); + decoder_ = remaining_create_decoder_functions_.front()(decoder_task_runner_, + decoder_weak_this_); + remaining_create_decoder_functions_.pop_front(); + if (!decoder_) { - DVLOGF(2) << "Failed to create VideoDecoder."; - used_create_vd_func_ = nullptr; + DVLOGF(2) << "|decoder_| creation failed, trying again with the next " + "available create function."; return CreateAndInitializeVD( - std::move(create_vd_funcs), config, - std::move(parent_error).AddCause(StatusCode::kDecoderFailedCreation)); + config, AppendOrForwardStatus(parent_error, + StatusCode::kDecoderFailedCreation)); } decoder_->Initialize( config, base::BindOnce(&VideoDecoderPipeline::OnInitializeDone, - decoder_weak_this_, std::move(create_vd_funcs), config, - std::move(parent_error)), + decoder_weak_this_, config, std::move(parent_error)), base::BindRepeating(&VideoDecoderPipeline::OnFrameDecoded, decoder_weak_this_)); } -void VideoDecoderPipeline::OnInitializeDone( - base::queue<VideoDecoderPipeline::CreateVDFunc> create_vd_funcs, - VideoDecoderConfig config, - ::media::Status parent_error, - ::media::Status status) { +void VideoDecoderPipeline::OnInitializeDone(VideoDecoderConfig config, + Status parent_error, + Status status) { DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_); DCHECK(init_cb_); DVLOGF(4) << "Initialization status = " << status.code(); if (status.is_ok()) { - DVLOGF(2) << "Initialize VD successfully."; + DVLOGF(2) << "|decoder_| successfully initialized."; // TODO(tmathmeyer) consider logging the causes of |parent_error| as they // might have infor about why other decoders failed. client_task_runner_->PostTask( @@ -297,11 +297,11 @@ void VideoDecoderPipeline::OnInitializeDone( return; } - DVLOGF(3) << "Reset VD, try the next create function."; + DVLOGF(3) << "|decoder_| initialization failed, trying again with the next " + "available create function."; decoder_ = nullptr; - used_create_vd_func_ = nullptr; - CreateAndInitializeVD(std::move(create_vd_funcs), config, - std::move(parent_error).AddCause(std::move(status))); + CreateAndInitializeVD(config, + AppendOrForwardStatus(parent_error, std::move(status))); } void VideoDecoderPipeline::Reset(base::OnceClosure closure) { @@ -417,9 +417,9 @@ void VideoDecoderPipeline::OnFrameConverted(scoped_refptr<VideoFrame> frame) { } // Flag that the video frame is capable of being put in an overlay. - frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true); + frame->metadata()->allow_overlay = true; // Flag that the video frame was decoded in a power efficient way. - frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true); + frame->metadata()->power_efficient = true; // MojoVideoDecoderService expects the |output_cb_| to be called on the client // task runner, even though media::VideoDecoder states frames should be output diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline.h b/chromium/media/gpu/chromeos/video_decoder_pipeline.h index 030ed9058e1..c0c6ac10c5f 100644 --- a/chromium/media/gpu/chromeos/video_decoder_pipeline.h +++ b/chromium/media/gpu/chromeos/video_decoder_pipeline.h @@ -8,7 +8,6 @@ #include <memory> #include "base/callback_forward.h" -#include "base/containers/queue.h" #include "base/memory/weak_ptr.h" #include "base/optional.h" #include "base/sequence_checker.h" @@ -24,13 +23,10 @@ namespace base { class SequencedTaskRunner; } -namespace gpu { -class GpuMemoryBufferFactory; -} // namespace gpu - namespace media { class DmabufVideoFramePool; +class MediaLog; // An interface that defines methods to operate on video decoder components // inside the VideoDecoderPipeline. The interface is similar to @@ -42,7 +38,7 @@ class DmabufVideoFramePool; // Note: All methods and callbacks should be called on the same sequence. class MEDIA_GPU_EXPORT DecoderInterface { public: - using InitCB = base::OnceCallback<void(::media::Status status)>; + using InitCB = base::OnceCallback<void(Status status)>; // TODO(crbug.com/998413): Replace VideoFrame to GpuMemoryBuffer-based // instance. using OutputCB = base::RepeatingCallback<void(scoped_refptr<VideoFrame>)>; @@ -130,21 +126,22 @@ class MEDIA_GPU_EXPORT DecoderInterface { class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder, public DecoderInterface::Client { public: - // Function signature for creating VideoDecoder. - using CreateVDFunc = std::unique_ptr<DecoderInterface> (*)( + using CreateDecoderFunction = std::unique_ptr<DecoderInterface> (*)( scoped_refptr<base::SequencedTaskRunner>, base::WeakPtr<DecoderInterface::Client>); - using GetCreateVDFunctionsCB = - base::RepeatingCallback<base::queue<CreateVDFunc>(CreateVDFunc)>; + using CreateDecoderFunctions = std::list<CreateDecoderFunction>; + using GetCreateDecoderFunctionsCB = + base::RepeatingCallback<CreateDecoderFunctions()>; static std::unique_ptr<VideoDecoder> Create( scoped_refptr<base::SequencedTaskRunner> client_task_runner, std::unique_ptr<DmabufVideoFramePool> frame_pool, std::unique_ptr<VideoFrameConverter> frame_converter, - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory, - GetCreateVDFunctionsCB get_create_vd_functions_cb); + std::unique_ptr<MediaLog> media_log, + GetCreateDecoderFunctionsCB get_create_decoder_functions_cb); ~VideoDecoderPipeline() override; + static void DestroyAsync(std::unique_ptr<VideoDecoderPipeline>); // VideoDecoder implementation std::string GetDisplayName() const override; @@ -152,7 +149,6 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder, int GetMaxDecodeRequests() const override; bool NeedsBitstreamConversion() const override; bool CanReadWithoutStalling() const override; - void Initialize(const VideoDecoderConfig& config, bool low_delay, CdmContext* cdm_context, @@ -173,19 +169,13 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder, const gfx::Rect& visible_rect) override; private: - // Get a list of the available functions for creating VideoDeocoder except - // |current_func| one. - static base::queue<CreateVDFunc> GetCreateVDFunctions( - CreateVDFunc current_func); + friend class VideoDecoderPipelineTest; VideoDecoderPipeline( scoped_refptr<base::SequencedTaskRunner> client_task_runner, std::unique_ptr<DmabufVideoFramePool> frame_pool, std::unique_ptr<VideoFrameConverter> frame_converter, - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory, - GetCreateVDFunctionsCB get_create_vd_functions_cb); - void Destroy() override; - void DestroyTask(); + GetCreateDecoderFunctionsCB get_create_decoder_functions_cb); void InitializeTask(const VideoDecoderConfig& config, InitCB init_cb, @@ -193,13 +183,10 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder, void ResetTask(base::OnceClosure closure); void DecodeTask(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb); - void CreateAndInitializeVD(base::queue<CreateVDFunc> create_vd_funcs, - VideoDecoderConfig config, - ::media::Status parent_error); - void OnInitializeDone(base::queue<CreateVDFunc> create_vd_funcs, - VideoDecoderConfig config, - ::media::Status parent_error, - ::media::Status success); + void CreateAndInitializeVD(VideoDecoderConfig config, Status parent_error); + void OnInitializeDone(VideoDecoderConfig config, + Status parent_error, + Status status); void OnDecodeDone(bool eos_buffer, DecodeCB decode_cb, DecodeStatus status); void OnResetDone(); @@ -241,10 +228,6 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder, // the client should be created using this pool. // Used on |decoder_task_runner_|. std::unique_ptr<DmabufVideoFramePool> main_frame_pool_; - // Used to generate additional frame pools for intermediate results if - // required. The instance is indirectly owned by GpuChildThread, therefore - // alive as long as the GPU process is. - gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory_; // The image processor is only created when the decoder cannot output frames // with renderable format. @@ -254,14 +237,14 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder, // |client_task_runner_|. std::unique_ptr<VideoFrameConverter> frame_converter_; - // The callback to get a list of function for creating DecoderInterface. - GetCreateVDFunctionsCB get_create_vd_functions_cb_; - // The current video decoder implementation. Valid after initialization is // successfully done. std::unique_ptr<DecoderInterface> decoder_; - // The create function of |decoder_|. nullptr iff |decoder_| is nullptr. - CreateVDFunc used_create_vd_func_ = nullptr; + + // |remaining_create_decoder_functions_| holds all the potential video decoder + // creation functions. We try them all in the given order until one succeeds. + // Only used after initialization on |decoder_sequence_checker_|. + CreateDecoderFunctions remaining_create_decoder_functions_; // Callback from the client. These callback are called on // |client_task_runner_|. diff --git a/chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc b/chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc new file mode 100644 index 00000000000..b95a52d7e64 --- /dev/null +++ b/chromium/media/gpu/chromeos/video_decoder_pipeline_unittest.cc @@ -0,0 +1,229 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/gpu/chromeos/video_decoder_pipeline.h" + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/test/gmock_callback_support.h" +#include "base/test/task_environment.h" +#include "base/threading/thread_task_runner_handle.h" +#include "media/base/media_util.h" +#include "media/base/status.h" +#include "media/base/video_decoder_config.h" +#include "media/gpu/chromeos/dmabuf_video_frame_pool.h" +#include "media/gpu/chromeos/mailbox_video_frame_converter.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using base::test::RunClosure; +using ::testing::_; +using ::testing::TestWithParam; + +namespace media { + +MATCHER_P(MatchesStatusCode, status_code, "") { + // media::Status doesn't provide an operator==(...), we add here a simple one. + return arg.code() == status_code; +} + +class MockVideoFramePool : public DmabufVideoFramePool { + public: + MockVideoFramePool() = default; + ~MockVideoFramePool() override = default; + + // DmabufVideoFramePool implementation. + MOCK_METHOD5(Initialize, + base::Optional<GpuBufferLayout>(const Fourcc&, + const gfx::Size&, + const gfx::Rect&, + const gfx::Size&, + size_t)); + MOCK_METHOD0(GetFrame, scoped_refptr<VideoFrame>()); + MOCK_METHOD0(IsExhausted, bool()); + MOCK_METHOD1(NotifyWhenFrameAvailable, void(base::OnceClosure)); +}; + +constexpr gfx::Size kCodedSize(48, 36); + +class MockDecoder : public DecoderInterface { + public: + MockDecoder() + : DecoderInterface(base::ThreadTaskRunnerHandle::Get(), + base::WeakPtr<DecoderInterface::Client>(nullptr)) {} + ~MockDecoder() override = default; + + MOCK_METHOD3(Initialize, + void(const VideoDecoderConfig&, InitCB, const OutputCB&)); + MOCK_METHOD2(Decode, void(scoped_refptr<DecoderBuffer>, DecodeCB)); + MOCK_METHOD1(Reset, void(base::OnceClosure)); + MOCK_METHOD0(ApplyResolutionChange, void()); +}; + +struct DecoderPipelineTestParams { + VideoDecoderPipeline::CreateDecoderFunctions create_decoder_functions; + StatusCode status_code; +}; + +class VideoDecoderPipelineTest + : public testing::TestWithParam<DecoderPipelineTestParams> { + public: + VideoDecoderPipelineTest() + : config_(kCodecVP8, + VP8PROFILE_ANY, + VideoDecoderConfig::AlphaMode::kIsOpaque, + VideoColorSpace(), + kNoTransformation, + kCodedSize, + gfx::Rect(kCodedSize), + kCodedSize, + EmptyExtraData(), + EncryptionScheme::kUnencrypted), + pool_(new MockVideoFramePool), + converter_(new VideoFrameConverter), + decoder_(new VideoDecoderPipeline( + base::ThreadTaskRunnerHandle::Get(), + std::move(pool_), + std::move(converter_), + base::BindRepeating([]() { + // This callback needs to be configured in the individual tests. + return VideoDecoderPipeline::CreateDecoderFunctions(); + }))) {} + ~VideoDecoderPipelineTest() override = default; + + void TearDown() override { + VideoDecoderPipeline::DestroyAsync(std::move(decoder_)); + task_environment_.RunUntilIdle(); + } + MOCK_METHOD1(OnInit, void(Status)); + MOCK_METHOD1(OnOutput, void(scoped_refptr<VideoFrame>)); + + void SetCreateDecoderFunctions( + VideoDecoderPipeline::CreateDecoderFunctions functions) { + decoder_->remaining_create_decoder_functions_ = functions; + } + + void InitializeDecoder() { + decoder_->Initialize( + config_, false /* low_delay */, nullptr /* cdm_context */, + base::BindOnce(&VideoDecoderPipelineTest::OnInit, + base::Unretained(this)), + base::BindRepeating(&VideoDecoderPipelineTest::OnOutput, + base::Unretained(this)), + base::DoNothing()); + } + + static std::unique_ptr<DecoderInterface> CreateNullMockDecoder( + scoped_refptr<base::SequencedTaskRunner> /* decoder_task_runner */, + base::WeakPtr<DecoderInterface::Client> /* client */) { + return nullptr; + } + + // Creates a MockDecoder with an EXPECT_CALL on Initialize that returns ok. + static std::unique_ptr<DecoderInterface> CreateGoodMockDecoder( + scoped_refptr<base::SequencedTaskRunner> /* decoder_task_runner */, + base::WeakPtr<DecoderInterface::Client> /* client */) { + std::unique_ptr<MockDecoder> decoder(new MockDecoder()); + EXPECT_CALL(*decoder, Initialize(_, _, _)) + .WillOnce(::testing::WithArgs<1>([](VideoDecoder::InitCB init_cb) { + std::move(init_cb).Run(OkStatus()); + })); + return std::move(decoder); + } + + // Creates a MockDecoder with an EXPECT_CALL on Initialize that returns error. + static std::unique_ptr<DecoderInterface> CreateBadMockDecoder( + scoped_refptr<base::SequencedTaskRunner> /* decoder_task_runner */, + base::WeakPtr<DecoderInterface::Client> /* client */) { + std::unique_ptr<MockDecoder> decoder(new MockDecoder()); + EXPECT_CALL(*decoder, Initialize(_, _, _)) + .WillOnce(::testing::WithArgs<1>([](VideoDecoder::InitCB init_cb) { + std::move(init_cb).Run(StatusCode::kDecoderFailedInitialization); + })); + return std::move(decoder); + } + + DecoderInterface* GetUnderlyingDecoder() { return decoder_->decoder_.get(); } + + base::test::TaskEnvironment task_environment_; + const VideoDecoderConfig config_; + DecoderInterface* underlying_decoder_ptr_ = nullptr; + + std::unique_ptr<MockVideoFramePool> pool_; + std::unique_ptr<VideoFrameConverter> converter_; + std::unique_ptr<VideoDecoderPipeline> decoder_; +}; + +// Verifies the status code for several typical CreateDecoderFunctions cases. +TEST_P(VideoDecoderPipelineTest, Initialize) { + SetCreateDecoderFunctions(GetParam().create_decoder_functions); + + base::RunLoop run_loop; + base::Closure quit_closure = run_loop.QuitClosure(); + EXPECT_CALL(*this, OnInit(MatchesStatusCode(GetParam().status_code))) + .WillOnce(RunClosure(quit_closure)); + + InitializeDecoder(); + run_loop.Run(); + + EXPECT_EQ(GetParam().status_code == StatusCode::kOk, + !!GetUnderlyingDecoder()); +} + +const struct DecoderPipelineTestParams kDecoderPipelineTestParams[] = { + // An empty set of CreateDecoderFunctions. + {{}, StatusCode::kChromeOSVideoDecoderNoDecoders}, + + // Just one CreateDecoderFunctions that fails to Create() (i.e. returns a + // null Decoder) + {{&VideoDecoderPipelineTest::CreateNullMockDecoder}, + StatusCode::kDecoderFailedCreation}, + + // Just one CreateDecoderFunctions that works fine, i.e. Create()s and + // Initialize()s correctly. + {{&VideoDecoderPipelineTest::CreateGoodMockDecoder}, StatusCode::kOk}, + + // One CreateDecoderFunctions that Create()s ok but fails to Initialize() + // correctly + {{&VideoDecoderPipelineTest::CreateBadMockDecoder}, + StatusCode::kDecoderFailedInitialization}, + + // Two CreateDecoderFunctions, one that fails to Create() (i.e. returns a + // null Decoder), and one that works. The first error StatusCode is lost + // because VideoDecoderPipeline::OnInitializeDone() throws it away. + {{&VideoDecoderPipelineTest::CreateNullMockDecoder, + &VideoDecoderPipelineTest::CreateGoodMockDecoder}, + StatusCode::kOk}, + + // Two CreateDecoderFunctions, one that Create()s ok but fails to + // Initialize(), and one that works. The first error StatusCode is lost + // because VideoDecoderPipeline::OnInitializeDone() throws it away. + {{&VideoDecoderPipelineTest::CreateBadMockDecoder, + &VideoDecoderPipelineTest::CreateGoodMockDecoder}, + StatusCode::kOk}, + + // Two CreateDecoderFunctions, one that fails to Create() (i.e. returns a + // null Decoder), and one that fails to Initialize(). The first error + // StatusCode is the only one we can check here: a Status object is created + // with a "primary" StatusCode, archiving subsequent ones in a private + // member. + {{&VideoDecoderPipelineTest::CreateNullMockDecoder, + &VideoDecoderPipelineTest::CreateBadMockDecoder}, + StatusCode::kDecoderFailedCreation}, + // Previous one in reverse order. + {{&VideoDecoderPipelineTest::CreateBadMockDecoder, + &VideoDecoderPipelineTest::CreateNullMockDecoder}, + StatusCode::kDecoderFailedInitialization}, + + {{&VideoDecoderPipelineTest::CreateBadMockDecoder, + &VideoDecoderPipelineTest::CreateBadMockDecoder, + &VideoDecoderPipelineTest::CreateGoodMockDecoder}, + StatusCode::kOk}, +}; + +INSTANTIATE_TEST_SUITE_P(All, + VideoDecoderPipelineTest, + testing::ValuesIn(kDecoderPipelineTestParams)); + +} // namespace media |