summaryrefslogtreecommitdiff
path: root/chromium/media/gpu/v4l2
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/media/gpu/v4l2')
-rw-r--r--chromium/media/gpu/v4l2/BUILD.gn6
-rw-r--r--chromium/media/gpu/v4l2/v4l2_decode_surface.cc5
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.cc104
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.h28
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc18
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor_backend.h3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc4
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vda_helpers.cc7
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vda_helpers.h3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc21
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h10
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder.cc (renamed from chromium/media/gpu/v4l2/v4l2_slice_video_decoder.cc)171
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder.h (renamed from chromium/media/gpu/v4l2/v4l2_slice_video_decoder.h)39
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h16
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc608
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h151
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc6
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h3
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc15
19 files changed, 1083 insertions, 135 deletions
diff --git a/chromium/media/gpu/v4l2/BUILD.gn b/chromium/media/gpu/v4l2/BUILD.gn
index 88b72e36308..38d0bb24ef5 100644
--- a/chromium/media/gpu/v4l2/BUILD.gn
+++ b/chromium/media/gpu/v4l2/BUILD.gn
@@ -41,16 +41,18 @@ source_set("v4l2") {
"v4l2_image_processor_backend.h",
"v4l2_slice_video_decode_accelerator.cc",
"v4l2_slice_video_decode_accelerator.h",
- "v4l2_slice_video_decoder.cc",
- "v4l2_slice_video_decoder.h",
"v4l2_stateful_workaround.cc",
"v4l2_stateful_workaround.h",
"v4l2_vda_helpers.cc",
"v4l2_vda_helpers.h",
"v4l2_video_decode_accelerator.cc",
"v4l2_video_decode_accelerator.h",
+ "v4l2_video_decoder.cc",
+ "v4l2_video_decoder.h",
"v4l2_video_decoder_backend.cc",
"v4l2_video_decoder_backend.h",
+ "v4l2_video_decoder_backend_stateful.cc",
+ "v4l2_video_decoder_backend_stateful.h",
"v4l2_video_decoder_backend_stateless.cc",
"v4l2_video_decoder_backend_stateless.h",
"v4l2_video_encode_accelerator.cc",
diff --git a/chromium/media/gpu/v4l2/v4l2_decode_surface.cc b/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
index d4593868a6c..77206ba81c1 100644
--- a/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
+++ b/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
@@ -127,7 +127,7 @@ bool V4L2ConfigStoreDecodeSurface::Submit() {
case V4L2_MEMORY_MMAP:
return std::move(output_buffer()).QueueMMap();
case V4L2_MEMORY_DMABUF:
- return std::move(output_buffer()).QueueDMABuf(video_frame()->DmabufFds());
+ return std::move(output_buffer()).QueueDMABuf(video_frame());
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
}
@@ -174,8 +174,7 @@ bool V4L2RequestDecodeSurface::Submit() {
result = std::move(output_buffer()).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
- result = std::move(output_buffer())
- .QueueDMABuf(video_frame()->DmabufFds());
+ result = std::move(output_buffer()).QueueDMABuf(video_frame());
break;
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
diff --git a/chromium/media/gpu/v4l2/v4l2_device.cc b/chromium/media/gpu/v4l2/v4l2_device.cc
index 9b81f8046f2..ba9b5184914 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device.cc
@@ -27,6 +27,7 @@
#include "media/base/color_plane_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/fourcc.h"
+#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/generic_v4l2_device.h"
#include "ui/gfx/native_pixmap_handle.h"
@@ -313,7 +314,7 @@ class V4L2BufferRefBase {
base::WeakPtr<V4L2Queue> queue);
~V4L2BufferRefBase();
- bool QueueBuffer();
+ bool QueueBuffer(scoped_refptr<VideoFrame> video_frame);
void* GetPlaneMapping(const size_t plane);
scoped_refptr<VideoFrame> GetVideoFrame();
@@ -368,13 +369,13 @@ V4L2BufferRefBase::~V4L2BufferRefBase() {
return_to_->ReturnBuffer(BufferId());
}
-bool V4L2BufferRefBase::QueueBuffer() {
+bool V4L2BufferRefBase::QueueBuffer(scoped_refptr<VideoFrame> video_frame) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!queue_)
return false;
- queued = queue_->QueueBuffer(&v4l2_buffer_);
+ queued = queue_->QueueBuffer(&v4l2_buffer_, std::move(video_frame));
return queued;
}
@@ -484,14 +485,15 @@ enum v4l2_memory V4L2WritableBufferRef::Memory() const {
return static_cast<enum v4l2_memory>(buffer_data_->v4l2_buffer_.memory);
}
-bool V4L2WritableBufferRef::DoQueue(V4L2RequestRef* request_ref) && {
+bool V4L2WritableBufferRef::DoQueue(V4L2RequestRef* request_ref,
+ scoped_refptr<VideoFrame> video_frame) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
if (request_ref && buffer_data_->queue_->SupportsRequests())
request_ref->ApplyQueueBuffer(&(buffer_data_->v4l2_buffer_));
- bool queued = buffer_data_->QueueBuffer();
+ bool queued = buffer_data_->QueueBuffer(std::move(video_frame));
// Clear our own reference.
buffer_data_.reset();
@@ -512,7 +514,7 @@ bool V4L2WritableBufferRef::QueueMMap(
return false;
}
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
}
bool V4L2WritableBufferRef::QueueUserPtr(
@@ -539,7 +541,7 @@ bool V4L2WritableBufferRef::QueueUserPtr(
self.buffer_data_->v4l2_buffer_.m.planes[i].m.userptr =
reinterpret_cast<unsigned long>(ptrs[i]);
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
}
bool V4L2WritableBufferRef::QueueDMABuf(
@@ -563,7 +565,52 @@ bool V4L2WritableBufferRef::QueueDMABuf(
for (size_t i = 0; i < num_planes; i++)
self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = fds[i].get();
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
+}
+
+bool V4L2WritableBufferRef::QueueDMABuf(scoped_refptr<VideoFrame> video_frame,
+ V4L2RequestRef* request_ref) && {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ // Move ourselves so our data gets freed no matter when we return
+ V4L2WritableBufferRef self(std::move(*this));
+
+ if (self.Memory() != V4L2_MEMORY_DMABUF) {
+ VLOGF(1) << "Called on invalid buffer type!";
+ return false;
+ }
+
+ // TODO(andrescj): consider replacing this by a DCHECK.
+ if (video_frame->storage_type() != VideoFrame::STORAGE_GPU_MEMORY_BUFFER &&
+ video_frame->storage_type() != VideoFrame::STORAGE_DMABUFS) {
+ VLOGF(1) << "Only GpuMemoryBuffer and dma-buf VideoFrames are supported";
+ return false;
+ }
+
+ // The FDs duped by CreateGpuMemoryBufferHandle() will be closed after the
+ // call to DoQueue() which uses the VIDIOC_QBUF ioctl and so ends up
+ // increasing the reference count of the dma-buf. Thus, closing the FDs is
+ // safe.
+ // TODO(andrescj): for dma-buf VideoFrames, duping the FDs is unnecessary.
+ // Consider handling that path separately.
+ gfx::GpuMemoryBufferHandle gmb_handle =
+ CreateGpuMemoryBufferHandle(video_frame.get());
+ if (gmb_handle.type != gfx::GpuMemoryBufferType::NATIVE_PIXMAP) {
+ VLOGF(1) << "Failed to create GpuMemoryBufferHandle for frame!";
+ return false;
+ }
+ const std::vector<gfx::NativePixmapPlane>& planes =
+ gmb_handle.native_pixmap_handle.planes;
+
+ if (!self.buffer_data_->CheckNumFDsForFormat(planes.size()))
+ return false;
+
+ size_t num_planes = self.PlanesCount();
+ for (size_t i = 0; i < num_planes; i++)
+ self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = planes[i].fd.get();
+
+ return std::move(self).DoQueue(request_ref, std::move(video_frame));
}
bool V4L2WritableBufferRef::QueueDMABuf(
@@ -587,7 +634,7 @@ bool V4L2WritableBufferRef::QueueDMABuf(
for (size_t i = 0; i < num_planes; i++)
self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = planes[i].fd.get();
- return std::move(self).DoQueue(request_ref);
+ return std::move(self).DoQueue(request_ref, nullptr);
}
size_t V4L2WritableBufferRef::PlanesCount() const {
@@ -709,14 +756,20 @@ void V4L2WritableBufferRef::SetConfigStore(uint32_t config_store) {
}
V4L2ReadableBuffer::V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
- base::WeakPtr<V4L2Queue> queue)
+ base::WeakPtr<V4L2Queue> queue,
+ scoped_refptr<VideoFrame> video_frame)
: buffer_data_(
- std::make_unique<V4L2BufferRefBase>(v4l2_buffer, std::move(queue))) {
+ std::make_unique<V4L2BufferRefBase>(v4l2_buffer, std::move(queue))),
+ video_frame_(std::move(video_frame)) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
scoped_refptr<VideoFrame> V4L2ReadableBuffer::GetVideoFrame() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(buffer_data_);
+
+ if (buffer_data_->v4l2_buffer_.memory == V4L2_MEMORY_DMABUF && video_frame_)
+ return video_frame_;
return buffer_data_->GetVideoFrame();
}
@@ -806,8 +859,10 @@ class V4L2BufferRefFactory {
static V4L2ReadableBufferRef CreateReadableRef(
const struct v4l2_buffer& v4l2_buffer,
- base::WeakPtr<V4L2Queue> queue) {
- return new V4L2ReadableBuffer(v4l2_buffer, std::move(queue));
+ base::WeakPtr<V4L2Queue> queue,
+ scoped_refptr<VideoFrame> video_frame) {
+ return new V4L2ReadableBuffer(v4l2_buffer, std::move(queue),
+ std::move(video_frame));
}
};
@@ -1070,7 +1125,8 @@ base::Optional<V4L2WritableBufferRef> V4L2Queue::GetFreeBuffer() {
weak_this_factory_.GetWeakPtr());
}
-bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer) {
+bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer,
+ scoped_refptr<VideoFrame> video_frame) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
int ret = device_->Ioctl(VIDIOC_QBUF, v4l2_buffer);
@@ -1079,7 +1135,8 @@ bool V4L2Queue::QueueBuffer(struct v4l2_buffer* v4l2_buffer) {
return false;
}
- auto inserted = queued_buffers_.emplace(v4l2_buffer->index);
+ auto inserted =
+ queued_buffers_.emplace(v4l2_buffer->index, std::move(video_frame));
DCHECK_EQ(inserted.second, true);
device_->SchedulePoll();
@@ -1127,15 +1184,16 @@ std::pair<bool, V4L2ReadableBufferRef> V4L2Queue::DequeueBuffer() {
auto it = queued_buffers_.find(v4l2_buffer.index);
DCHECK(it != queued_buffers_.end());
- queued_buffers_.erase(*it);
+ scoped_refptr<VideoFrame> queued_frame = std::move(it->second);
+ queued_buffers_.erase(it);
if (QueuedBuffersCount() > 0)
device_->SchedulePoll();
DCHECK(free_buffers_);
- return std::make_pair(true,
- V4L2BufferRefFactory::CreateReadableRef(
- v4l2_buffer, weak_this_factory_.GetWeakPtr()));
+ return std::make_pair(true, V4L2BufferRefFactory::CreateReadableRef(
+ v4l2_buffer, weak_this_factory_.GetWeakPtr(),
+ std::move(queued_frame)));
}
bool V4L2Queue::IsStreaming() const {
@@ -1176,9 +1234,9 @@ bool V4L2Queue::Streamoff() {
return false;
}
- for (const auto& buffer_id : queued_buffers_) {
+ for (const auto& it : queued_buffers_) {
DCHECK(free_buffers_);
- free_buffers_->ReturnBuffer(buffer_id);
+ free_buffers_->ReturnBuffer(it.first);
}
queued_buffers_.clear();
@@ -1332,6 +1390,10 @@ VideoCodecProfile V4L2Device::V4L2ProfileToVideoCodecProfile(VideoCodec codec,
return H264PROFILE_EXTENDED;
case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
return H264PROFILE_HIGH;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH:
+ return H264PROFILE_STEREOHIGH;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH:
+ return H264PROFILE_MULTIVIEWHIGH;
}
break;
case kCodecVP8:
diff --git a/chromium/media/gpu/v4l2/v4l2_device.h b/chromium/media/gpu/v4l2/v4l2_device.h
index 310d4a4a1a5..bdd8585aacd 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.h
+++ b/chromium/media/gpu/v4l2/v4l2_device.h
@@ -129,6 +129,15 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
// list.
bool QueueDMABuf(const std::vector<gfx::NativePixmapPlane>& planes,
V4L2RequestRef* request_ref = nullptr) &&;
+ // Queue a |video_frame| using its file descriptors as DMABUFs. The VideoFrame
+ // must have been constructed from its file descriptors.
+ // The particularity of this method is that a reference to |video_frame| is
+ // kept and made available again when the buffer is dequeued through
+ // |V4L2ReadableBufferRef::GetVideoFrame()|. |video_frame| is thus guaranteed
+ // to be alive until either all the |V4L2ReadableBufferRef| from the dequeued
+ // buffer get out of scope, or |V4L2Queue::Streamoff()| is called.
+ bool QueueDMABuf(scoped_refptr<VideoFrame> video_frame,
+ V4L2RequestRef* request_ref = nullptr) &&;
// Returns the number of planes in this buffer.
size_t PlanesCount() const;
@@ -180,7 +189,8 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
// filled.
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
- bool DoQueue(V4L2RequestRef* request_ref) &&;
+ bool DoQueue(V4L2RequestRef* request_ref,
+ scoped_refptr<VideoFrame> video_frame) &&;
V4L2WritableBufferRef(const struct v4l2_buffer& v4l2_buffer,
base::WeakPtr<V4L2Queue> queue);
@@ -245,9 +255,14 @@ class MEDIA_GPU_EXPORT V4L2ReadableBuffer
~V4L2ReadableBuffer();
V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
- base::WeakPtr<V4L2Queue> queue);
+ base::WeakPtr<V4L2Queue> queue,
+ scoped_refptr<VideoFrame> video_frame);
std::unique_ptr<V4L2BufferRefBase> buffer_data_;
+ // If this buffer was a DMABUF buffer queued with
+ // QueueDMABuf(scoped_refptr<VideoFrame>), then this will hold the VideoFrame
+ // that has been passed at the time of queueing.
+ scoped_refptr<VideoFrame> video_frame_;
SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(V4L2ReadableBuffer);
@@ -386,7 +401,8 @@ class MEDIA_GPU_EXPORT V4L2Queue
~V4L2Queue();
// Called when clients request a buffer to be queued.
- bool QueueBuffer(struct v4l2_buffer* v4l2_buffer);
+ bool QueueBuffer(struct v4l2_buffer* v4l2_buffer,
+ scoped_refptr<VideoFrame> video_frame);
const enum v4l2_buf_type type_;
enum v4l2_memory memory_ = V4L2_MEMORY_MMAP;
@@ -402,8 +418,10 @@ class MEDIA_GPU_EXPORT V4L2Queue
// Buffers that are available for client to get and submit.
// Buffers in this list are not referenced by anyone else than ourselves.
scoped_refptr<V4L2BuffersList> free_buffers_;
- // Buffers that have been queued by the client, and not dequeued yet.
- std::set<size_t> queued_buffers_;
+ // Buffers that have been queued by the client, and not dequeued yet. The
+ // value will be set to the VideoFrame that has been passed when we queued
+ // the buffer, if any.
+ std::map<size_t, scoped_refptr<VideoFrame>> queued_buffers_;
scoped_refptr<V4L2Device> device_;
// Callback to call in this queue's destructor.
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
index 6498537e426..2a062f8b1d4 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.cc
@@ -126,11 +126,13 @@ V4L2ImageProcessorBackend::V4L2ImageProcessorBackend(
v4l2_memory input_memory_type,
v4l2_memory output_memory_type,
OutputMode output_mode,
+ VideoRotation relative_rotation,
size_t num_buffers,
ErrorCB error_cb)
: ImageProcessorBackend(input_config,
output_config,
output_mode,
+ relative_rotation,
std::move(error_cb),
std::move(backend_task_runner)),
input_memory_type_(input_memory_type),
@@ -228,12 +230,13 @@ std::unique_ptr<ImageProcessorBackend> V4L2ImageProcessorBackend::Create(
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
for (const auto& output_mode : preferred_output_modes) {
auto image_processor = V4L2ImageProcessorBackend::CreateWithOutputMode(
- device, num_buffers, input_config, output_config, output_mode, error_cb,
- backend_task_runner);
+ device, num_buffers, input_config, output_config, output_mode,
+ relative_rotation, error_cb, backend_task_runner);
if (image_processor)
return image_processor;
}
@@ -249,6 +252,7 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
const PortConfig& input_config,
const PortConfig& output_config,
const OutputMode& output_mode,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner) {
VLOGF(2);
@@ -308,6 +312,12 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
return nullptr;
}
+ // V4L2IP now doesn't support rotation case, so return nullptr.
+ if (relative_rotation != VIDEO_ROTATION_0) {
+ VLOGF(1) << "Currently V4L2IP doesn't support rotation";
+ return nullptr;
+ }
+
if (!device->Open(V4L2Device::Type::kImageProcessor,
input_config.fourcc.ToV4L2PixFmt())) {
VLOGF(1) << "Failed to open device with input fourcc: "
@@ -390,8 +400,8 @@ V4L2ImageProcessorBackend::CreateWithOutputMode(
PortConfig(output_config.fourcc, negotiated_output_size,
output_planes, output_config.visible_rect,
{output_storage_type}),
- input_memory_type, output_memory_type, output_mode, num_buffers,
- std::move(error_cb)));
+ input_memory_type, output_memory_type, output_mode, relative_rotation,
+ num_buffers, std::move(error_cb)));
// Initialize at |backend_task_runner_|.
bool success = false;
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
index bd1c78ac4e9..4652bda62b7 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor_backend.h
@@ -49,6 +49,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
const PortConfig& input_config,
const PortConfig& output_config,
const std::vector<OutputMode>& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
@@ -104,6 +105,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
const PortConfig& input_config,
const PortConfig& output_config,
const OutputMode& preferred_output_modes,
+ VideoRotation relative_rotation,
ErrorCB error_cb,
scoped_refptr<base::SequencedTaskRunner> backend_task_runner);
@@ -115,6 +117,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessorBackend
v4l2_memory input_memory_type,
v4l2_memory output_memory_type,
OutputMode output_mode,
+ VideoRotation relative_rotation,
size_t num_buffers,
ErrorCB error_cb);
~V4L2ImageProcessorBackend() override;
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
index dd2c2e853eb..594081c44f8 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
@@ -592,8 +592,8 @@ bool V4L2SliceVideoDecodeAccelerator::CreateImageProcessor() {
image_processor_ = v4l2_vda_helpers::CreateImageProcessor(
*output_format_fourcc_, *gl_image_format_fourcc_, coded_size_,
gl_image_size_, GetRectSizeFromOrigin(decoder_->GetVisibleRect()),
- output_buffer_map_.size(), image_processor_device_,
- image_processor_output_mode,
+ VideoFrame::StorageType::STORAGE_DMABUFS, output_buffer_map_.size(),
+ image_processor_device_, image_processor_output_mode,
// Unretained(this) is safe for ErrorCB because |decoder_thread_| is owned
// by this V4L2VideoDecodeAccelerator and |this| must be valid when
// ErrorCB is executed.
diff --git a/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc b/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc
index f520d93be0f..558b694af86 100644
--- a/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc
+++ b/chromium/media/gpu/v4l2/v4l2_vda_helpers.cc
@@ -71,6 +71,7 @@ std::unique_ptr<ImageProcessor> CreateImageProcessor(
const gfx::Size& vda_output_coded_size,
const gfx::Size& ip_output_coded_size,
const gfx::Size& visible_size,
+ VideoFrame::StorageType output_storage_type,
size_t nb_buffers,
scoped_refptr<V4L2Device> image_processor_device,
ImageProcessor::OutputMode image_processor_output_mode,
@@ -86,8 +87,8 @@ std::unique_ptr<ImageProcessor> CreateImageProcessor(
{VideoFrame::STORAGE_DMABUFS}),
ImageProcessor::PortConfig(ip_output_format, ip_output_coded_size, {},
gfx::Rect(visible_size),
- {VideoFrame::STORAGE_DMABUFS}),
- {image_processor_output_mode}, std::move(error_cb),
+ {output_storage_type}),
+ {image_processor_output_mode}, VIDEO_ROTATION_0, std::move(error_cb),
std::move(client_task_runner));
if (!image_processor)
return nullptr;
@@ -174,6 +175,8 @@ bool InputBufferFragmentSplitter::IsPartialFramePending() const {
H264InputBufferFragmentSplitter::H264InputBufferFragmentSplitter()
: h264_parser_(new H264Parser()) {}
+H264InputBufferFragmentSplitter::~H264InputBufferFragmentSplitter() = default;
+
bool H264InputBufferFragmentSplitter::AdvanceFrameFragment(const uint8_t* data,
size_t size,
size_t* endpos) {
diff --git a/chromium/media/gpu/v4l2/v4l2_vda_helpers.h b/chromium/media/gpu/v4l2/v4l2_vda_helpers.h
index b0c780cd734..05b74a3205d 100644
--- a/chromium/media/gpu/v4l2/v4l2_vda_helpers.h
+++ b/chromium/media/gpu/v4l2/v4l2_vda_helpers.h
@@ -41,6 +41,7 @@ base::Optional<Fourcc> FindImageProcessorOutputFormat(V4L2Device* ip_device);
// |ip_output_coded_size| is the coded size of the output buffers that the IP
// must produce.
// |visible_size| is the visible size of both the input and output buffers.
+// |output_storage_type| indicates what type of VideoFrame is used for output.
// |nb_buffers| is the exact number of output buffers that the IP must create.
// |image_processor_output_mode| specifies whether the IP must allocate its
// own buffers or rely on imported ones.
@@ -53,6 +54,7 @@ std::unique_ptr<ImageProcessor> CreateImageProcessor(
const gfx::Size& vda_output_coded_size,
const gfx::Size& ip_output_coded_size,
const gfx::Size& visible_size,
+ VideoFrame::StorageType output_storage_type,
size_t nb_buffers,
scoped_refptr<V4L2Device> image_processor_device,
ImageProcessor::OutputMode image_processor_output_mode,
@@ -97,6 +99,7 @@ class InputBufferFragmentSplitter {
class H264InputBufferFragmentSplitter : public InputBufferFragmentSplitter {
public:
explicit H264InputBufferFragmentSplitter();
+ ~H264InputBufferFragmentSplitter() override;
bool AdvanceFrameFragment(const uint8_t* data,
size_t size,
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
index 4a581cab841..e844687937b 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
@@ -1523,8 +1523,7 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord(
ret = std::move(buffer).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
- ret = std::move(buffer).QueueDMABuf(
- output_record.output_frame->DmabufFds());
+ ret = std::move(buffer).QueueDMABuf(output_record.output_frame);
break;
default:
NOTREACHED();
@@ -1880,6 +1879,10 @@ bool V4L2VideoDecodeAccelerator::StartDevicePoll() {
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
+ cancelable_service_device_task_.Reset(base::BindRepeating(
+ &V4L2VideoDecodeAccelerator::ServiceDeviceTask, base::Unretained(this)));
+ cancelable_service_device_task_callback_ =
+ cancelable_service_device_task_.callback();
device_poll_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&V4L2VideoDecodeAccelerator::DevicePollTask,
base::Unretained(this), 0));
@@ -1901,6 +1904,10 @@ bool V4L2VideoDecodeAccelerator::StopDevicePoll() {
return false;
}
device_poll_thread_.Stop();
+ // Must be done after the Stop() above to ensure
+ // |cancelable_service_device_task_callback_| is not copied.
+ cancelable_service_device_task_.Cancel();
+ cancelable_service_device_task_callback_ = {};
// Clear the interrupt now, to be sure.
if (!device_->ClearDevicePollInterrupt()) {
PLOG(ERROR) << "ClearDevicePollInterrupt: failed";
@@ -2027,8 +2034,8 @@ void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) {
// All processing should happen on ServiceDeviceTask(), since we shouldn't
// touch decoder state from this thread.
decoder_thread_.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&V4L2VideoDecodeAccelerator::ServiceDeviceTask,
- base::Unretained(this), event_pending));
+ FROM_HERE,
+ base::BindOnce(cancelable_service_device_task_callback_, event_pending));
}
bool V4L2VideoDecodeAccelerator::IsDestroyPending() {
@@ -2314,9 +2321,9 @@ bool V4L2VideoDecodeAccelerator::CreateImageProcessor() {
image_processor_ = v4l2_vda_helpers::CreateImageProcessor(
*output_format_fourcc_, *egl_image_format_fourcc_, coded_size_,
- egl_image_size_, visible_size_, output_buffer_map_.size(),
- image_processor_device_, image_processor_output_mode,
- decoder_thread_.task_runner(),
+ egl_image_size_, visible_size_, VideoFrame::StorageType::STORAGE_DMABUFS,
+ output_buffer_map_.size(), image_processor_device_,
+ image_processor_output_mode, decoder_thread_.task_runner(),
// Unretained(this) is safe for ErrorCB because |decoder_thread_| is owned
// by this V4L2VideoDecodeAccelerator and |this| must be valid when
// ErrorCB is executed.
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
index e4d27c1284b..96a23510f18 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/callback_forward.h"
+#include "base/cancelable_callback.h"
#include "base/containers/queue.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -465,6 +466,15 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
// Decoder state machine state.
State decoder_state_;
+ // Cancelable callback for running ServiceDeviceTask(). Must only be accessed
+ // on |decoder_thread_|.
+ base::CancelableRepeatingCallback<void(bool)> cancelable_service_device_task_;
+ // Concrete callback from |cancelable_service_device_task_| that can be copied
+ // on |device_poll_thread_|. This exists because
+ // CancelableRepeatingCallback::callback() creates a WeakPtr internally, which
+ // must be created/destroyed from the same thread.
+ base::RepeatingCallback<void(bool)> cancelable_service_device_task_callback_;
+
// Waitable event signaled when the decoder is destroying.
base::WaitableEvent destroy_pending_;
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
index 28e1b3b7e4a..4c747eb86f3 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/gpu/v4l2/v4l2_slice_video_decoder.h"
+#include "media/gpu/v4l2/v4l2_video_decoder.h"
#include <algorithm>
@@ -17,6 +17,7 @@
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h"
namespace media {
@@ -33,15 +34,14 @@ constexpr size_t kNumInputBuffers = 16;
// Input format V4L2 fourccs this class supports.
constexpr uint32_t kSupportedInputFourccs[] = {
- V4L2_PIX_FMT_H264_SLICE,
- V4L2_PIX_FMT_VP8_FRAME,
- V4L2_PIX_FMT_VP9_FRAME,
+ V4L2_PIX_FMT_H264_SLICE, V4L2_PIX_FMT_VP8_FRAME, V4L2_PIX_FMT_VP9_FRAME,
+ V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9,
};
} // namespace
// static
-std::unique_ptr<DecoderInterface> V4L2SliceVideoDecoder::Create(
+std::unique_ptr<DecoderInterface> V4L2VideoDecoder::Create(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<DecoderInterface::Client> client) {
DCHECK(decoder_task_runner->RunsTasksInCurrentSequence());
@@ -53,12 +53,12 @@ std::unique_ptr<DecoderInterface> V4L2SliceVideoDecoder::Create(
return nullptr;
}
- return base::WrapUnique<DecoderInterface>(new V4L2SliceVideoDecoder(
+ return base::WrapUnique<DecoderInterface>(new V4L2VideoDecoder(
std::move(decoder_task_runner), std::move(client), std::move(device)));
}
// static
-SupportedVideoDecoderConfigs V4L2SliceVideoDecoder::GetSupportedConfigs() {
+SupportedVideoDecoderConfigs V4L2VideoDecoder::GetSupportedConfigs() {
scoped_refptr<V4L2Device> device = V4L2Device::Create();
if (!device)
return SupportedVideoDecoderConfigs();
@@ -69,7 +69,7 @@ SupportedVideoDecoderConfigs V4L2SliceVideoDecoder::GetSupportedConfigs() {
false);
}
-V4L2SliceVideoDecoder::V4L2SliceVideoDecoder(
+V4L2VideoDecoder::V4L2VideoDecoder(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<DecoderInterface::Client> client,
scoped_refptr<V4L2Device> device)
@@ -82,7 +82,7 @@ V4L2SliceVideoDecoder::V4L2SliceVideoDecoder(
weak_this_ = weak_this_factory_.GetWeakPtr();
}
-V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
+V4L2VideoDecoder::~V4L2VideoDecoder() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(2);
@@ -93,7 +93,7 @@ V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
}
// Stop and Destroy device.
- StopStreamV4L2Queue();
+ StopStreamV4L2Queue(true);
if (input_queue_) {
input_queue_->DeallocateBuffers();
input_queue_ = nullptr;
@@ -106,9 +106,9 @@ V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
weak_this_factory_.InvalidateWeakPtrs();
}
-void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
- InitCB init_cb,
- const OutputCB& output_cb) {
+void V4L2VideoDecoder::Initialize(const VideoDecoderConfig& config,
+ InitCB init_cb,
+ const OutputCB& output_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(config.IsValidConfig());
DCHECK(state_ == State::kUninitialized || state_ == State::kDecoding);
@@ -116,7 +116,7 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Reset V4L2 device and queue if reinitializing decoder.
if (state_ != State::kUninitialized) {
- if (!StopStreamV4L2Queue()) {
+ if (!StopStreamV4L2Queue(true)) {
std::move(init_cb).Run(StatusCode::kV4l2FailedToStopStreamQueue);
return;
}
@@ -141,12 +141,33 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Open V4L2 device.
VideoCodecProfile profile = config.profile();
- uint32_t input_format_fourcc =
+ uint32_t input_format_fourcc_stateless =
V4L2Device::VideoCodecProfileToV4L2PixFmt(profile, true);
- if (!input_format_fourcc ||
- !device_->Open(V4L2Device::Type::kDecoder, input_format_fourcc)) {
+ if (!input_format_fourcc_stateless ||
+ !device_->Open(V4L2Device::Type::kDecoder,
+ input_format_fourcc_stateless)) {
VLOGF(1) << "Failed to open device for profile: " << profile
- << " fourcc: " << FourccToString(input_format_fourcc);
+ << " fourcc: " << FourccToString(input_format_fourcc_stateless);
+ input_format_fourcc_stateless = 0;
+ } else {
+ VLOGF(1) << "Found V4L2 device capable of stateless decoding for "
+ << FourccToString(input_format_fourcc_stateless);
+ }
+
+ uint32_t input_format_fourcc_stateful =
+ V4L2Device::VideoCodecProfileToV4L2PixFmt(profile, false);
+ if (!input_format_fourcc_stateful ||
+ !device_->Open(V4L2Device::Type::kDecoder,
+ input_format_fourcc_stateful)) {
+ VLOGF(1) << "Failed to open device for profile: " << profile
+ << " fourcc: " << FourccToString(input_format_fourcc_stateful);
+ input_format_fourcc_stateful = 0;
+ } else {
+ VLOGF(1) << "Found V4L2 device capable of stateful decoding for "
+ << FourccToString(input_format_fourcc_stateful);
+ }
+
+ if (!input_format_fourcc_stateless && !input_format_fourcc_stateful) {
std::move(init_cb).Run(StatusCode::kV4l2NoDecoder);
return;
}
@@ -172,10 +193,23 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
- // Create the backend (only stateless API supported as of now).
- backend_ = std::make_unique<V4L2StatelessVideoDecoderBackend>(
- this, device_, profile, decoder_task_runner_);
+ uint32_t input_format_fourcc;
+ if (input_format_fourcc_stateful) {
+ backend_ = std::make_unique<V4L2StatefulVideoDecoderBackend>(
+ this, device_, profile, decoder_task_runner_);
+ input_format_fourcc = input_format_fourcc_stateful;
+ } else if (input_format_fourcc_stateless) {
+ backend_ = std::make_unique<V4L2StatelessVideoDecoderBackend>(
+ this, device_, profile, decoder_task_runner_);
+ input_format_fourcc = input_format_fourcc_stateless;
+ } else {
+ VLOGF(1) << "No backend capable of taking this profile.";
+ std::move(init_cb).Run(StatusCode::kV4l2FailedResourceAllocation);
+ return;
+ }
+
if (!backend_->Initialize()) {
+ VLOGF(1) << "Failed to initialize backend.";
std::move(init_cb).Run(StatusCode::kV4l2FailedResourceAllocation);
return;
}
@@ -193,13 +227,21 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
return;
}
+ // Start streaming input queue and polling. This is required for the stateful
+ // decoder, and doesn't hurt for the stateless one.
+ if (!StartStreamV4L2Queue(false)) {
+ VLOGF(1) << "Failed to start streaming.";
+ std::move(init_cb).Run(StatusCode::kV4L2FailedToStartStreamQueue);
+ return;
+ }
+
// Call init_cb
output_cb_ = output_cb;
SetState(State::kDecoding);
std::move(init_cb).Run(::media::OkStatus());
}
-bool V4L2SliceVideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
+bool V4L2VideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK_EQ(state_, State::kUninitialized);
@@ -232,8 +274,8 @@ bool V4L2SliceVideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
return true;
}
-bool V4L2SliceVideoDecoder::SetupOutputFormat(const gfx::Size& size,
- const gfx::Rect& visible_rect) {
+bool V4L2VideoDecoder::SetupOutputFormat(const gfx::Size& size,
+ const gfx::Rect& visible_rect) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "size: " << size.ToString()
<< ", visible_rect: " << visible_rect.ToString();
@@ -307,7 +349,7 @@ bool V4L2SliceVideoDecoder::SetupOutputFormat(const gfx::Size& size,
return true;
}
-void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
+void V4L2VideoDecoder::Reset(base::OnceClosure closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
@@ -321,12 +363,13 @@ void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
// Streamoff V4L2 queues to drop input and output buffers.
// If the queues are streaming before reset, then we need to start streaming
// them after stopping.
- bool is_streaming = input_queue_->IsStreaming();
- if (!StopStreamV4L2Queue())
+ const bool is_input_streaming = input_queue_->IsStreaming();
+ const bool is_output_streaming = output_queue_->IsStreaming();
+ if (!StopStreamV4L2Queue(true))
return;
- if (is_streaming) {
- if (!StartStreamV4L2Queue())
+ if (is_input_streaming) {
+ if (!StartStreamV4L2Queue(is_output_streaming))
return;
}
@@ -337,8 +380,8 @@ void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
std::move(closure).Run();
}
-void V4L2SliceVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- DecodeCB decode_cb) {
+void V4L2VideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
+ DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK_NE(state_, State::kUninitialized);
@@ -352,20 +395,20 @@ void V4L2SliceVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
bitstream_id);
}
-bool V4L2SliceVideoDecoder::StartStreamV4L2Queue() {
+bool V4L2VideoDecoder::StartStreamV4L2Queue(bool start_output_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
- if (!input_queue_->Streamon() || !output_queue_->Streamon()) {
+ if (!input_queue_->Streamon() ||
+ (start_output_queue && !output_queue_->Streamon())) {
VLOGF(1) << "Failed to streamon V4L2 queue.";
SetState(State::kError);
return false;
}
if (!device_->StartPolling(
- base::BindRepeating(&V4L2SliceVideoDecoder::ServiceDeviceTask,
- weak_this_),
- base::BindRepeating(&V4L2SliceVideoDecoder::SetState, weak_this_,
+ base::BindRepeating(&V4L2VideoDecoder::ServiceDeviceTask, weak_this_),
+ base::BindRepeating(&V4L2VideoDecoder::SetState, weak_this_,
State::kError))) {
SetState(State::kError);
return false;
@@ -374,7 +417,7 @@ bool V4L2SliceVideoDecoder::StartStreamV4L2Queue() {
return true;
}
-bool V4L2SliceVideoDecoder::StopStreamV4L2Queue() {
+bool V4L2VideoDecoder::StopStreamV4L2Queue(bool stop_input_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
@@ -384,48 +427,48 @@ bool V4L2SliceVideoDecoder::StopStreamV4L2Queue() {
}
// Streamoff input and output queue.
- if (input_queue_)
+ if (input_queue_ && stop_input_queue)
input_queue_->Streamoff();
if (output_queue_)
output_queue_->Streamoff();
if (backend_)
- backend_->OnStreamStopped();
+ backend_->OnStreamStopped(stop_input_queue);
return true;
}
-void V4L2SliceVideoDecoder::InitiateFlush() {
+void V4L2VideoDecoder::InitiateFlush() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
SetState(State::kFlushing);
}
-void V4L2SliceVideoDecoder::CompleteFlush() {
+void V4L2VideoDecoder::CompleteFlush() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
SetState(State::kDecoding);
}
-void V4L2SliceVideoDecoder::ChangeResolution(gfx::Size pic_size,
- gfx::Rect visible_rect,
- size_t num_output_frames) {
+void V4L2VideoDecoder::ChangeResolution(gfx::Size pic_size,
+ gfx::Rect visible_rect,
+ size_t num_output_frames) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
DCHECK(!continue_change_resolution_cb_);
// After the pipeline flushes all frames, we can start changing resolution.
continue_change_resolution_cb_ =
- base::BindOnce(&V4L2SliceVideoDecoder::ContinueChangeResolution,
- weak_this_, pic_size, visible_rect, num_output_frames);
+ base::BindOnce(&V4L2VideoDecoder::ContinueChangeResolution, weak_this_,
+ pic_size, visible_rect, num_output_frames);
DCHECK(client_);
client_->PrepareChangeResolution();
}
-void V4L2SliceVideoDecoder::ApplyResolutionChange() {
+void V4L2VideoDecoder::ApplyResolutionChange() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
DCHECK(continue_change_resolution_cb_);
@@ -433,13 +476,12 @@ void V4L2SliceVideoDecoder::ApplyResolutionChange() {
std::move(continue_change_resolution_cb_).Run();
}
-void V4L2SliceVideoDecoder::ContinueChangeResolution(
+void V4L2VideoDecoder::ContinueChangeResolution(
const gfx::Size& pic_size,
const gfx::Rect& visible_rect,
const size_t num_output_frames) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
- DCHECK_EQ(input_queue_->QueuedBuffersCount(), 0u);
DCHECK_EQ(output_queue_->QueuedBuffersCount(), 0u);
// If we already reset, then skip it.
@@ -455,7 +497,9 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
num_output_frames_ = num_output_frames;
- if (!StopStreamV4L2Queue())
+ // Stateful decoders require the input queue to keep running during resolution
+ // changes, but stateless ones require it to be stopped.
+ if (!StopStreamV4L2Queue(backend_->StopInputQueueOnResChange()))
return;
if (!output_queue_->DeallocateBuffers()) {
@@ -488,7 +532,7 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
return;
}
- if (!StartStreamV4L2Queue()) {
+ if (!StartStreamV4L2Queue(true)) {
SetState(State::kError);
return;
}
@@ -500,7 +544,7 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
base::Unretained(backend_.get()), true));
}
-void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
+void V4L2VideoDecoder::ServiceDeviceTask(bool event) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "Number of queued input buffers: "
<< input_queue_->QueuedBuffersCount()
@@ -509,8 +553,9 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
// Dequeue V4L2 output buffer first to reduce output latency.
bool success;
- V4L2ReadableBufferRef dequeued_buffer;
while (output_queue_->QueuedBuffersCount() > 0) {
+ V4L2ReadableBufferRef dequeued_buffer;
+
std::tie(success, dequeued_buffer) = output_queue_->DequeueBuffer();
if (!success) {
SetState(State::kError);
@@ -524,6 +569,8 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
// Dequeue V4L2 input buffer.
while (input_queue_->QueuedBuffersCount() > 0) {
+ V4L2ReadableBufferRef dequeued_buffer;
+
std::tie(success, dequeued_buffer) = input_queue_->DequeueBuffer();
if (!success) {
SetState(State::kError);
@@ -532,13 +579,15 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
if (!dequeued_buffer)
break;
}
+
+ backend_->OnServiceDeviceTask(event);
}
-void V4L2SliceVideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
- const gfx::Rect& visible_rect,
- base::TimeDelta timestamp) {
+void V4L2VideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
+ const gfx::Rect& visible_rect,
+ base::TimeDelta timestamp) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
- DVLOGF(4) << "timestamp: " << timestamp;
+ DVLOGF(4) << "timestamp: " << timestamp.InMilliseconds() << " msec";
// Set the timestamp at which the decode operation started on the
// |frame|. If the frame has been outputted before (e.g. because of VP9
@@ -561,14 +610,14 @@ void V4L2SliceVideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
output_cb_.Run(std::move(frame));
}
-DmabufVideoFramePool* V4L2SliceVideoDecoder::GetVideoFramePool() const {
+DmabufVideoFramePool* V4L2VideoDecoder::GetVideoFramePool() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(4);
return client_->GetVideoFramePool();
}
-void V4L2SliceVideoDecoder::SetState(State new_state) {
+void V4L2VideoDecoder::SetState(State new_state) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "Change state from " << static_cast<int>(state_) << " to "
<< static_cast<int>(new_state);
@@ -613,14 +662,14 @@ void V4L2SliceVideoDecoder::SetState(State new_state) {
return;
}
-void V4L2SliceVideoDecoder::OnBackendError() {
+void V4L2VideoDecoder::OnBackendError() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(2);
SetState(State::kError);
}
-bool V4L2SliceVideoDecoder::IsDecoding() const {
+bool V4L2VideoDecoder::IsDecoding() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.h b/chromium/media/gpu/v4l2/v4l2_video_decoder.h
index d5b82bbf824..b046b17dbd7 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decoder.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_GPU_V4L2_V4L2_SLICE_VIDEO_DECODER_H_
-#define MEDIA_GPU_V4L2_V4L2_SLICE_VIDEO_DECODER_H_
+#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_H_
+#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_H_
#include <linux/videodev2.h>
@@ -36,12 +36,12 @@ namespace media {
class DmabufVideoFramePool;
-class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
+class MEDIA_GPU_EXPORT V4L2VideoDecoder
: public DecoderInterface,
public V4L2VideoDecoderBackend::Client {
public:
- // Create V4L2SliceVideoDecoder instance. The success of the creation doesn't
- // ensure V4L2SliceVideoDecoder is available on the device. It will be
+ // Create V4L2VideoDecoder instance. The success of the creation doesn't
+ // ensure V4L2VideoDecoder is available on the device. It will be
// determined in Initialize().
static std::unique_ptr<DecoderInterface> Create(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
@@ -71,13 +71,12 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
DmabufVideoFramePool* GetVideoFramePool() const override;
private:
- friend class V4L2SliceVideoDecoderTest;
+ friend class V4L2VideoDecoderTest;
- V4L2SliceVideoDecoder(
- scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
- base::WeakPtr<DecoderInterface::Client> client,
- scoped_refptr<V4L2Device> device);
- ~V4L2SliceVideoDecoder() override;
+ V4L2VideoDecoder(scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
+ base::WeakPtr<DecoderInterface::Client> client,
+ scoped_refptr<V4L2Device> device);
+ ~V4L2VideoDecoder() override;
enum class State {
// Initial state. Transitions to |kDecoding| if Initialize() is successful,
@@ -116,12 +115,12 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// in VideoFramePool. Return true if the setup is successful.
bool SetupOutputFormat(const gfx::Size& size, const gfx::Rect& visible_rect);
- // Start streaming V4L2 input and output queues. Attempt to start
- // |device_poll_thread_| before starting streaming.
- bool StartStreamV4L2Queue();
- // Stop streaming V4L2 input and output queues. Stop |device_poll_thread_|
- // before stopping streaming.
- bool StopStreamV4L2Queue();
+ // Start streaming V4L2 input and (if |start_output_queue| is true) output
+ // queues. Attempt to start |device_poll_thread_| after streaming starts.
+ bool StartStreamV4L2Queue(bool start_output_queue);
+ // Stop streaming V4L2 output and (if |stop_input_queue| is true) input
+ // queues. Stop |device_poll_thread_| before stopping streaming.
+ bool StopStreamV4L2Queue(bool stop_input_queue);
// Try to dequeue input and output buffers from device.
void ServiceDeviceTask(bool event);
@@ -167,10 +166,10 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// |weak_this_| must be dereferenced and invalidated on
// |decoder_task_runner_|.
- base::WeakPtr<V4L2SliceVideoDecoder> weak_this_;
- base::WeakPtrFactory<V4L2SliceVideoDecoder> weak_this_factory_;
+ base::WeakPtr<V4L2VideoDecoder> weak_this_;
+ base::WeakPtrFactory<V4L2VideoDecoder> weak_this_factory_;
};
} // namespace media
-#endif // MEDIA_GPU_V4L2_V4L2_SLICE_VIDEO_DECODER_H_
+#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h
index 093df178bb5..3c49de8f8dd 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend.h
@@ -72,9 +72,13 @@ class V4L2VideoDecoderBackend {
int32_t bitstream_id) = 0;
// Called by the decoder when it has dequeued a buffer from the CAPTURE queue.
virtual void OnOutputBufferDequeued(V4L2ReadableBufferRef buf) = 0;
- // Called whenever the V4L2 stream is stopped (|Streamoff| called on both
- // |V4L2Queue|s).
- virtual void OnStreamStopped() = 0;
+ // Backend can overload this method if it needs to do specific work when
+ // the device task is called.
+ virtual void OnServiceDeviceTask(bool event) {}
+ // Called whenever the V4L2 stream is stopped (|Streamoff| called on either
+ // the CAPTURE queue alone or on both queues). |input_queue_stopped| is
+ // true if the input queue has been requested to stop.
+ virtual void OnStreamStopped(bool input_queue_stopped) = 0;
// Called when the resolution has been decided, in case the backend needs
// to do something specific beyond applying these parameters to the CAPTURE
// queue.
@@ -88,6 +92,12 @@ class V4L2VideoDecoderBackend {
// with |status| as argument.
virtual void ClearPendingRequests(DecodeStatus status) = 0;
+ // Whether we should stop the input queue when changing resolution. Stateless
+ // decoders require this, but stateful ones need the input queue to keep
+ // running. Although not super elegant, this is required to express that
+ // difference.
+ virtual bool StopInputQueueOnResChange() const = 0;
+
protected:
V4L2VideoDecoderBackend(Client* const client,
scoped_refptr<V4L2Device> device);
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc
new file mode 100644
index 00000000000..417598f893c
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.cc
@@ -0,0 +1,608 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h"
+#include <cstddef>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/callback_forward.h"
+#include "base/logging.h"
+#include "base/optional.h"
+#include "base/sequence_checker.h"
+#include "base/sequenced_task_runner.h"
+#include "media/base/video_codecs.h"
+#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
+#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/v4l2/v4l2_vda_helpers.h"
+#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
+
+namespace media {
+
+V4L2StatefulVideoDecoderBackend::DecodeRequest::DecodeRequest(
+ scoped_refptr<DecoderBuffer> buf,
+ VideoDecoder::DecodeCB cb,
+ int32_t id)
+ : buffer(std::move(buf)), decode_cb(std::move(cb)), bitstream_id(id) {}
+
+V4L2StatefulVideoDecoderBackend::DecodeRequest::DecodeRequest(DecodeRequest&&) =
+ default;
+V4L2StatefulVideoDecoderBackend::DecodeRequest&
+V4L2StatefulVideoDecoderBackend::DecodeRequest::operator=(DecodeRequest&&) =
+ default;
+
+V4L2StatefulVideoDecoderBackend::DecodeRequest::~DecodeRequest() = default;
+
+bool V4L2StatefulVideoDecoderBackend::DecodeRequest::IsCompleted() const {
+ return bytes_used == buffer->data_size();
+}
+
+V4L2StatefulVideoDecoderBackend::V4L2StatefulVideoDecoderBackend(
+ Client* const client,
+ scoped_refptr<V4L2Device> device,
+ VideoCodecProfile profile,
+ scoped_refptr<base::SequencedTaskRunner> task_runner)
+ : V4L2VideoDecoderBackend(client, std::move(device)),
+ profile_(profile),
+ task_runner_(task_runner) {
+ DVLOGF(3);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+V4L2StatefulVideoDecoderBackend::~V4L2StatefulVideoDecoderBackend() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (flush_cb_ || current_decode_request_ || !decode_request_queue_.empty()) {
+ VLOGF(1) << "Should not destroy backend during pending decode!";
+ }
+
+ struct v4l2_event_subscription sub;
+ memset(&sub, 0, sizeof(sub));
+ sub.type = V4L2_EVENT_SOURCE_CHANGE;
+ if (device_->Ioctl(VIDIOC_UNSUBSCRIBE_EVENT, &sub) != 0) {
+ VLOGF(1) << "Cannot unsubscribe to event";
+ }
+}
+
+bool V4L2StatefulVideoDecoderBackend::Initialize() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (!IsSupportedProfile(profile_)) {
+ VLOGF(1) << "Unsupported profile " << GetProfileName(profile_);
+ return false;
+ }
+
+ frame_splitter_ =
+ v4l2_vda_helpers::InputBufferFragmentSplitter::CreateFromProfile(
+ profile_);
+ if (!frame_splitter_) {
+ VLOGF(1) << "Failed to create frame splitter";
+ return false;
+ }
+
+ struct v4l2_event_subscription sub;
+ memset(&sub, 0, sizeof(sub));
+ sub.type = V4L2_EVENT_SOURCE_CHANGE;
+ if (device_->Ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
+ VLOGF(1) << "Cannot subscribe to event";
+ return false;
+ }
+
+ return true;
+}
+
+void V4L2StatefulVideoDecoderBackend::EnqueueDecodeTask(
+ scoped_refptr<DecoderBuffer> buffer,
+ VideoDecoder::DecodeCB decode_cb,
+ int32_t bitstream_id) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ decode_request_queue_.push(
+ DecodeRequest(std::move(buffer), std::move(decode_cb), bitstream_id));
+
+ DoDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::DoDecodeWork() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Do not decode if a flush is in progress.
+ // This may actually be ok to do if we are changing resolution?
+ if (flush_cb_)
+ return;
+
+ // Get a new decode request if none is in progress.
+ if (!current_decode_request_) {
+ // No more decode request, nothing to do for now.
+ if (decode_request_queue_.empty())
+ return;
+
+ auto decode_request = std::move(decode_request_queue_.front());
+ decode_request_queue_.pop();
+
+ // Need to flush?
+ if (decode_request.buffer->end_of_stream()) {
+ InitiateFlush(std::move(decode_request.decode_cb));
+ return;
+ }
+
+ // This is our new decode request.
+ current_decode_request_ = std::move(decode_request);
+ DCHECK_EQ(current_decode_request_->bytes_used, 0u);
+ }
+
+ // Get a V4L2 buffer to copy the encoded data into.
+ if (!current_input_buffer_) {
+ current_input_buffer_ = input_queue_->GetFreeBuffer();
+ // We will be called again once an input buffer becomes available.
+ if (!current_input_buffer_)
+ return;
+
+ // Record timestamp of the input buffer so it propagates to the decoded
+ // frames.
+ const struct timespec timespec =
+ current_decode_request_->buffer->timestamp().ToTimeSpec();
+ struct timeval timestamp = {
+ .tv_sec = timespec.tv_sec,
+ .tv_usec = timespec.tv_nsec / 1000,
+ };
+ current_input_buffer_->SetTimeStamp(timestamp);
+ }
+
+ // From here on we have both a decode request and input buffer, so we can
+ // progress with decoding.
+ DCHECK(current_decode_request_.has_value());
+ DCHECK(current_input_buffer_.has_value());
+
+ const DecoderBuffer* current_buffer = current_decode_request_->buffer.get();
+ DCHECK_LT(current_decode_request_->bytes_used, current_buffer->data_size());
+ const uint8_t* const data =
+ current_buffer->data() + current_decode_request_->bytes_used;
+ const size_t data_size =
+ current_buffer->data_size() - current_decode_request_->bytes_used;
+ size_t bytes_to_copy = 0;
+
+ if (!frame_splitter_->AdvanceFrameFragment(data, data_size, &bytes_to_copy)) {
+ VLOGF(1) << "Invalid H.264 stream detected.";
+ std::move(current_decode_request_->decode_cb)
+ .Run(DecodeStatus::DECODE_ERROR);
+ current_decode_request_.reset();
+ current_input_buffer_.reset();
+ client_->OnBackendError();
+ return;
+ }
+
+ const size_t bytes_used = current_input_buffer_->GetPlaneBytesUsed(0);
+ if (bytes_used + bytes_to_copy > current_input_buffer_->GetPlaneSize(0)) {
+ VLOGF(1) << "V4L2 buffer size is too small to contain a whole frame.";
+ std::move(current_decode_request_->decode_cb)
+ .Run(DecodeStatus::DECODE_ERROR);
+ current_decode_request_.reset();
+ current_input_buffer_.reset();
+ client_->OnBackendError();
+ return;
+ }
+
+ uint8_t* dst =
+ static_cast<uint8_t*>(current_input_buffer_->GetPlaneMapping(0)) +
+ bytes_used;
+ memcpy(dst, data, bytes_to_copy);
+ current_input_buffer_->SetPlaneBytesUsed(0, bytes_used + bytes_to_copy);
+ current_decode_request_->bytes_used += bytes_to_copy;
+
+ // Release current_input_request_ if we reached its end.
+ if (current_decode_request_->IsCompleted()) {
+ std::move(current_decode_request_->decode_cb).Run(DecodeStatus::OK);
+ current_decode_request_.reset();
+ }
+
+ // If we have a partial frame, wait before submitting it.
+ if (frame_splitter_->IsPartialFramePending()) {
+ VLOGF(4) << "Partial frame pending, not queueing any buffer now.";
+ return;
+ }
+
+ // The V4L2 input buffer contains a decodable entity, queue it.
+ std::move(*current_input_buffer_).QueueMMap();
+ current_input_buffer_.reset();
+
+ // If we can still progress on a decode request, do it.
+ if (current_decode_request_ || !decode_request_queue_.empty())
+ ScheduleDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::ScheduleDecodeWork() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&V4L2StatefulVideoDecoderBackend::DoDecodeWork,
+ weak_this_));
+}
+
+void V4L2StatefulVideoDecoderBackend::OnServiceDeviceTask(bool event) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (event) {
+ while (base::Optional<struct v4l2_event> ev = device_->DequeueEvent()) {
+ if (ev->type == V4L2_EVENT_SOURCE_CHANGE &&
+ (ev->u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION)) {
+ ChangeResolution();
+ }
+ }
+ }
+
+ // We can enqueue dequeued output buffers immediately.
+ EnqueueOutputBuffers();
+
+ // Try to progress on our work since we may have dequeued input buffers.
+ DoDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::EnqueueOutputBuffers() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ const v4l2_memory mem_type = output_queue_->GetMemoryType();
+
+ while (base::Optional<V4L2WritableBufferRef> buffer =
+ output_queue_->GetFreeBuffer()) {
+ bool ret = false;
+
+ switch (mem_type) {
+ case V4L2_MEMORY_MMAP:
+ ret = std::move(*buffer).QueueMMap();
+ break;
+ case V4L2_MEMORY_DMABUF: {
+ scoped_refptr<VideoFrame> video_frame = GetPoolVideoFrame();
+ // Running out of frame is not an error, we will be called again
+ // once frames are available.
+ if (!video_frame)
+ return;
+ ret = std::move(*buffer).QueueDMABuf(std::move(video_frame));
+ break;
+ }
+ default:
+ NOTREACHED();
+ }
+
+ if (!ret)
+ client_->OnBackendError();
+ }
+
+ DVLOGF(3) << output_queue_->QueuedBuffersCount() << "/"
+ << output_queue_->AllocatedBuffersCount()
+ << " output buffers queued";
+}
+
+scoped_refptr<VideoFrame> V4L2StatefulVideoDecoderBackend::GetPoolVideoFrame() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ DmabufVideoFramePool* pool = client_->GetVideoFramePool();
+ DCHECK_EQ(output_queue_->GetMemoryType(), V4L2_MEMORY_DMABUF);
+ DCHECK_NE(pool, nullptr);
+
+ scoped_refptr<VideoFrame> frame = pool->GetFrame();
+ if (!frame) {
+ DVLOGF(3) << "No available videoframe for now";
+ // We will try again once a frame becomes available.
+ pool->NotifyWhenFrameAvailable(base::BindOnce(
+ base::IgnoreResult(&base::SequencedTaskRunner::PostTask), task_runner_,
+ FROM_HERE,
+ base::BindOnce(
+ base::IgnoreResult(
+ &V4L2StatefulVideoDecoderBackend::EnqueueOutputBuffers),
+ weak_this_)));
+ }
+
+ return frame;
+}
+
+// static
+void V4L2StatefulVideoDecoderBackend::ReuseOutputBufferThunk(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::Optional<base::WeakPtr<V4L2StatefulVideoDecoderBackend>> weak_this,
+ V4L2ReadableBufferRef buffer) {
+ DVLOGF(3);
+ DCHECK(weak_this);
+
+ if (task_runner->RunsTasksInCurrentSequence()) {
+ if (*weak_this)
+ (*weak_this)->ReuseOutputBuffer(std::move(buffer));
+ } else {
+ task_runner->PostTask(
+ FROM_HERE,
+ base::BindOnce(&V4L2StatefulVideoDecoderBackend::ReuseOutputBuffer,
+ *weak_this, std::move(buffer)));
+ }
+}
+
+void V4L2StatefulVideoDecoderBackend::ReuseOutputBuffer(
+ V4L2ReadableBufferRef buffer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3) << "Reuse output buffer #" << buffer->BufferId();
+
+ // Lose reference to the buffer so it goes back to the free list.
+ buffer.reset();
+
+ // Enqueue the newly available buffer.
+ EnqueueOutputBuffers();
+}
+
+void V4L2StatefulVideoDecoderBackend::OnOutputBufferDequeued(
+ V4L2ReadableBufferRef buffer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Zero-bytes buffers are returned as part of a flush and can be dismissed.
+ if (buffer->GetPlaneBytesUsed(0) > 0) {
+ const struct timeval timeval = buffer->GetTimeStamp();
+ const struct timespec timespec = {
+ .tv_sec = timeval.tv_sec,
+ .tv_nsec = timeval.tv_usec * 1000,
+ };
+ const base::TimeDelta timestamp = base::TimeDelta::FromTimeSpec(timespec);
+
+ scoped_refptr<VideoFrame> frame;
+
+ switch (output_queue_->GetMemoryType()) {
+ case V4L2_MEMORY_MMAP: {
+ // Wrap the videoframe into another one so we can be signaled when the
+ // consumer is done with it and reuse the V4L2 buffer.
+ scoped_refptr<VideoFrame> origin_frame = buffer->GetVideoFrame();
+ frame = VideoFrame::WrapVideoFrame(origin_frame, origin_frame->format(),
+ origin_frame->visible_rect(),
+ origin_frame->natural_size());
+ frame->AddDestructionObserver(base::BindOnce(
+ &V4L2StatefulVideoDecoderBackend::ReuseOutputBufferThunk,
+ task_runner_, weak_this_, buffer));
+ break;
+ }
+ case V4L2_MEMORY_DMABUF:
+ // The pool VideoFrame we passed to QueueDMABuf() has been decoded into,
+ // pass it as-is.
+ frame = buffer->GetVideoFrame();
+ break;
+ default:
+ NOTREACHED();
+ }
+
+ client_->OutputFrame(std::move(frame), *visible_rect_, timestamp);
+ }
+
+ // We were waiting for the last buffer before a resolution change
+ // The order here is important! A flush event may come after a resolution
+ // change event (but not the opposite), so we must make sure both events
+ // are processed in the correct order.
+ if (buffer->IsLast() && resolution_change_cb_) {
+ std::move(resolution_change_cb_).Run();
+ } else if (buffer->IsLast() && flush_cb_) {
+ // We were waiting for a flush to complete, and received the last buffer.
+ CompleteFlush();
+ }
+
+ EnqueueOutputBuffers();
+}
+
+bool V4L2StatefulVideoDecoderBackend::InitiateFlush(
+ VideoDecoder::DecodeCB flush_cb) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ DCHECK(!flush_cb_);
+
+ // Submit any pending input buffer at the time of flush.
+ if (current_input_buffer_) {
+ std::move(*current_input_buffer_).QueueMMap();
+ current_input_buffer_.reset();
+ }
+
+ client_->InitiateFlush();
+ flush_cb_ = std::move(flush_cb);
+
+ // Special case: if our CAPTURE queue is not streaming, we cannot receive
+ // the CAPTURE buffer with the LAST flag set that signals the end of flush.
+ // In this case, we should complete the flush immediately.
+ if (!output_queue_->IsStreaming())
+ return CompleteFlush();
+
+ // Send the STOP command to the V4L2 device. The device will let us know
+ // that the flush is completed by sending us a CAPTURE buffer with the LAST
+ // flag set.
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_STOP;
+ if (device_->Ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
+ LOG(ERROR) << "Failed to issue STOP command";
+ client_->OnBackendError();
+ return false;
+ }
+
+ return true;
+}
+
+bool V4L2StatefulVideoDecoderBackend::CompleteFlush() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+ DCHECK(flush_cb_);
+
+ // Signal that flush has properly been completed.
+ std::move(flush_cb_).Run(DecodeStatus::OK);
+
+ // If CAPTURE queue is streaming, send the START command to the V4L2 device
+ // to signal that we are resuming decoding with the same state.
+ if (output_queue_->IsStreaming()) {
+ struct v4l2_decoder_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = V4L2_DEC_CMD_START;
+ if (device_->Ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
+ LOG(ERROR) << "Failed to issue START command";
+ std::move(flush_cb_).Run(DecodeStatus::DECODE_ERROR);
+ client_->OnBackendError();
+ return false;
+ }
+ }
+
+ client_->CompleteFlush();
+
+ // Resume decoding if data is available.
+ ScheduleDecodeWork();
+
+ return true;
+}
+
+void V4L2StatefulVideoDecoderBackend::OnStreamStopped(bool stop_input_queue) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // If we are resetting, also reset the splitter.
+ if (stop_input_queue)
+ frame_splitter_->Reset();
+}
+
+void V4L2StatefulVideoDecoderBackend::ChangeResolution() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Here we just query the new resolution, visible rect, and number of output
+ // buffers before asking the client to update the resolution.
+
+ auto format = output_queue_->GetFormat().first;
+ if (!format) {
+ client_->OnBackendError();
+ return;
+ }
+ const gfx::Size pic_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
+
+ auto visible_rect = output_queue_->GetVisibleRect();
+ if (!visible_rect) {
+ client_->OnBackendError();
+ return;
+ }
+
+ auto ctrl = device_->GetCtrl(V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ constexpr size_t DEFAULT_NUM_OUTPUT_BUFFERS = 12;
+ const size_t num_output_buffers =
+ ctrl ? ctrl->value : DEFAULT_NUM_OUTPUT_BUFFERS;
+ if (!ctrl)
+ VLOGF(1) << "Using default minimum number of CAPTURE buffers";
+
+ // Signal that we are flushing and initiate the resolution change.
+ // Our flush will be done when we receive a buffer with the LAST flag on the
+ // CAPTURE queue.
+ client_->InitiateFlush();
+ DCHECK(!resolution_change_cb_);
+ resolution_change_cb_ =
+ base::BindOnce(&V4L2StatefulVideoDecoderBackend::ContinueChangeResolution,
+ weak_this_, pic_size, *visible_rect, num_output_buffers);
+
+ // ...that is, unless we are not streaming yet, in which case the resolution
+ // change can take place immediately.
+ if (!output_queue_->IsStreaming())
+ std::move(resolution_change_cb_).Run();
+}
+
+void V4L2StatefulVideoDecoderBackend::ContinueChangeResolution(
+ const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_buffers) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Flush is done, but stay in flushing state and ask our client to set the new
+ // resolution.
+ client_->ChangeResolution(pic_size, visible_rect, num_output_buffers);
+}
+
+bool V4L2StatefulVideoDecoderBackend::ApplyResolution(
+ const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_frames) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ // Use the visible rect for all new frames.
+ visible_rect_ = visible_rect;
+
+ return true;
+}
+
+void V4L2StatefulVideoDecoderBackend::OnChangeResolutionDone(bool success) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ if (!success) {
+ client_->OnBackendError();
+ return;
+ }
+
+ // Flush can be considered completed on the client side.
+ client_->CompleteFlush();
+
+ // Enqueue all available output buffers now that they are allocated.
+ EnqueueOutputBuffers();
+
+ // Also try to progress on our work.
+ DoDecodeWork();
+}
+
+void V4L2StatefulVideoDecoderBackend::ClearPendingRequests(
+ DecodeStatus status) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOGF(3);
+
+ resolution_change_cb_.Reset();
+
+ if (flush_cb_) {
+ std::move(flush_cb_).Run(status);
+ }
+
+ current_input_buffer_.reset();
+
+ if (current_decode_request_) {
+ std::move(current_decode_request_->decode_cb).Run(status);
+ current_decode_request_.reset();
+ }
+
+ while (!decode_request_queue_.empty()) {
+ std::move(decode_request_queue_.front().decode_cb).Run(status);
+ decode_request_queue_.pop();
+ }
+}
+
+// TODO(b:149663704) move into helper function shared between both backends?
+bool V4L2StatefulVideoDecoderBackend::IsSupportedProfile(
+ VideoCodecProfile profile) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(device_);
+ if (supported_profiles_.empty()) {
+ constexpr uint32_t kSupportedInputFourccs[] = {
+ V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_VP8,
+ V4L2_PIX_FMT_VP9,
+ };
+ scoped_refptr<V4L2Device> device = V4L2Device::Create();
+ VideoDecodeAccelerator::SupportedProfiles profiles =
+ device->GetSupportedDecodeProfiles(base::size(kSupportedInputFourccs),
+ kSupportedInputFourccs);
+ for (const auto& profile : profiles)
+ supported_profiles_.push_back(profile.profile);
+ }
+ return std::find(supported_profiles_.begin(), supported_profiles_.end(),
+ profile) != supported_profiles_.end();
+}
+
+bool V4L2StatefulVideoDecoderBackend::StopInputQueueOnResChange() const {
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h
new file mode 100644
index 00000000000..62d6d715f4b
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateful.h
@@ -0,0 +1,151 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATEFUL_H_
+#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATEFUL_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/containers/queue.h"
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/sequenced_task_runner.h"
+#include "media/base/video_codecs.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
+
+namespace media {
+
+namespace v4l2_vda_helpers {
+class InputBufferFragmentSplitter;
+}
+
+class V4L2StatefulVideoDecoderBackend : public V4L2VideoDecoderBackend {
+ public:
+ V4L2StatefulVideoDecoderBackend(
+ Client* const client,
+ scoped_refptr<V4L2Device> device,
+ VideoCodecProfile profile,
+ scoped_refptr<base::SequencedTaskRunner> task_runner);
+ ~V4L2StatefulVideoDecoderBackend() override;
+
+ // We don't ever want to copy or move this.
+ V4L2StatefulVideoDecoderBackend(const V4L2StatefulVideoDecoderBackend&) =
+ delete;
+ V4L2StatefulVideoDecoderBackend& operator=(
+ const V4L2StatefulVideoDecoderBackend&) = delete;
+
+ // V4L2VideoDecoderBackend implementation
+ bool Initialize() override;
+ void EnqueueDecodeTask(scoped_refptr<DecoderBuffer> buffer,
+ VideoDecoder::DecodeCB decode_cb,
+ int32_t bitstream_id) override;
+ void OnOutputBufferDequeued(V4L2ReadableBufferRef buffer) override;
+ void OnServiceDeviceTask(bool event) override;
+ void OnStreamStopped(bool stop_input_queue) override;
+ bool ApplyResolution(const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_frames) override;
+ void OnChangeResolutionDone(bool success) override;
+ void ClearPendingRequests(DecodeStatus status) override;
+ bool StopInputQueueOnResChange() const override;
+
+ private:
+ // TODO(b:149663704): merge with stateless?
+ // Request for decoding buffer. Every EnqueueDecodeTask() call generates 1
+ // DecodeRequest.
+ struct DecodeRequest {
+ // The decode buffer passed to EnqueueDecodeTask().
+ scoped_refptr<DecoderBuffer> buffer;
+ // Number of bytes used so far from |buffer|.
+ size_t bytes_used = 0;
+ // The callback function passed to EnqueueDecodeTask().
+ VideoDecoder::DecodeCB decode_cb;
+ // Identifier for the decoder buffer.
+ int32_t bitstream_id;
+
+ DecodeRequest(scoped_refptr<DecoderBuffer> buf,
+ VideoDecoder::DecodeCB cb,
+ int32_t id);
+
+ // Allow move, but not copy
+ DecodeRequest(DecodeRequest&&);
+ DecodeRequest& operator=(DecodeRequest&&);
+
+ ~DecodeRequest();
+
+ bool IsCompleted() const;
+
+ DISALLOW_COPY_AND_ASSIGN(DecodeRequest);
+ };
+
+ bool IsSupportedProfile(VideoCodecProfile profile);
+
+ void DoDecodeWork();
+
+ static void ReuseOutputBufferThunk(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::Optional<base::WeakPtr<V4L2StatefulVideoDecoderBackend>> weak_this,
+ V4L2ReadableBufferRef buffer);
+ void ReuseOutputBuffer(V4L2ReadableBufferRef buffer);
+
+ // Called when the format has changed, in order to reallocate the output
+ // buffers according to the new format.
+ void ChangeResolution();
+ // Called when the flush triggered by a resolution change has completed,
+ // to actually apply the resolution.
+ void ContinueChangeResolution(const gfx::Size& pic_size,
+ const gfx::Rect& visible_rect,
+ const size_t num_output_buffers);
+
+ // Enqueue all output buffers that are available.
+ void EnqueueOutputBuffers();
+ // When a video frame pool is in use, obtain a frame from the pool or, if
+ // none is available, schedule |EnqueueOutputBuffers()| to be called when one
+ // becomes available.
+ scoped_refptr<VideoFrame> GetPoolVideoFrame();
+
+ bool InitiateFlush(VideoDecoder::DecodeCB flush_cb);
+ bool CompleteFlush();
+
+ void ScheduleDecodeWork();
+
+ // Video profile we are decoding.
+ VideoCodecProfile profile_;
+
+ // The task runner we are running on, for convenience.
+ const scoped_refptr<base::SequencedTaskRunner> task_runner_;
+
+ // VideoCodecProfiles supported by a v4l2 stateless decoder driver.
+ std::vector<VideoCodecProfile> supported_profiles_;
+
+ // Queue of pending decode request.
+ base::queue<DecodeRequest> decode_request_queue_;
+
+ // The decode request which is currently processed.
+ base::Optional<DecodeRequest> current_decode_request_;
+ // V4L2 input buffer currently being prepared.
+ base::Optional<V4L2WritableBufferRef> current_input_buffer_;
+
+ std::unique_ptr<v4l2_vda_helpers::InputBufferFragmentSplitter>
+ frame_splitter_;
+
+ base::Optional<gfx::Rect> visible_rect_;
+
+ // Callback of the buffer that triggered a flush, to be called when the
+ // flush completes.
+ VideoDecoder::DecodeCB flush_cb_;
+ // Closure that will be called once the flush triggered by a resolution change
+ // event completes.
+ base::OnceClosure resolution_change_cb_;
+
+ base::WeakPtr<V4L2StatefulVideoDecoderBackend> weak_this_;
+ base::WeakPtrFactory<V4L2StatefulVideoDecoderBackend> weak_this_factory_{
+ this};
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATEFUL_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
index b8c3400a990..b03846c0784 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.cc
@@ -559,7 +559,7 @@ void V4L2StatelessVideoDecoderBackend::OnChangeResolutionDone(bool success) {
weak_this_));
}
-void V4L2StatelessVideoDecoderBackend::OnStreamStopped() {
+void V4L2StatelessVideoDecoderBackend::OnStreamStopped(bool stop_input_queue) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
@@ -603,6 +603,10 @@ void V4L2StatelessVideoDecoderBackend::ClearPendingRequests(
}
}
+bool V4L2StatelessVideoDecoderBackend::StopInputQueueOnResChange() const {
+ return true;
+}
+
bool V4L2StatelessVideoDecoderBackend::IsSupportedProfile(
VideoCodecProfile profile) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h
index 0dfa817309d..704d6171f7f 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h
@@ -43,12 +43,13 @@ class V4L2StatelessVideoDecoderBackend : public V4L2VideoDecoderBackend,
VideoDecoder::DecodeCB decode_cb,
int32_t bitstream_id) override;
void OnOutputBufferDequeued(V4L2ReadableBufferRef buffer) override;
- void OnStreamStopped() override;
+ void OnStreamStopped(bool stop_input_queue) override;
bool ApplyResolution(const gfx::Size& pic_size,
const gfx::Rect& visible_rect,
const size_t num_output_frames) override;
void OnChangeResolutionDone(bool success) override;
void ClearPendingRequests(DecodeStatus status) override;
+ bool StopInputQueueOnResChange() const override;
// V4L2DecodeSurfaceHandler implementation.
scoped_refptr<V4L2DecodeSurface> CreateSurface() override;
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
index 8c7ea443927..97ef7e2a648 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
@@ -381,7 +381,6 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
const gfx::Rect& output_visible_rect) {
VLOGF(2);
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
- DCHECK_NE(input_layout.format(), output_format);
auto ip_input_layout = AsMultiPlanarLayout(input_layout);
if (!ip_input_layout) {
@@ -432,7 +431,7 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
image_processor_ = ImageProcessorFactory::Create(
*input_config, *output_config, {ImageProcessor::OutputMode::IMPORT},
- kImageProcBufferCount, encoder_task_runner_,
+ kImageProcBufferCount, VIDEO_ROTATION_0, encoder_task_runner_,
base::BindRepeating(&V4L2VideoEncodeAccelerator::ImageProcessorError,
weak_this_));
if (!image_processor_) {
@@ -750,6 +749,16 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
return;
if (image_processor_) {
+ if (!frame) {
+ DCHECK(!flush_callback_.is_null());
+ NOTREACHED()
+ << "Flushing is not supported when using an image processor and this "
+ "situation should not happen for well behaved clients.";
+ NOTIFY_ERROR(kIllegalStateError);
+ child_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(std::move(flush_callback_), false));
+ return;
+ }
image_processor_input_queue_.emplace(std::move(frame), force_keyframe);
InputImageProcessorTask();
} else {
@@ -779,7 +788,7 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded(
VLOGF(1) << "Encoder resolution is changed during encoding"
<< ", frame.natural_size()=" << frame.natural_size().ToString()
<< ", encoder_input_visible_rect_="
- << input_frame_size_.ToString();
+ << encoder_input_visible_rect_.ToString();
return false;
}
if (frame.coded_size() == input_frame_size_) {