summaryrefslogtreecommitdiff
path: root/chromium/components/viz/service/display_embedder
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/components/viz/service/display_embedder
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/components/viz/service/display_embedder')
-rw-r--r--chromium/components/viz/service/display_embedder/buffer_queue.cc15
-rw-r--r--chromium/components/viz/service/display_embedder/buffer_queue_unittest.cc5
-rw-r--r--chromium/components/viz/service/display_embedder/gl_output_surface_buffer_queue.cc10
-rw-r--r--chromium/components/viz/service/display_embedder/output_presenter.cc115
-rw-r--r--chromium/components/viz/service/display_embedder/output_presenter.h117
-rw-r--r--chromium/components/viz/service/display_embedder/output_presenter_fuchsia.cc490
-rw-r--r--chromium/components/viz/service/display_embedder/output_presenter_fuchsia.h115
-rw-r--r--chromium/components/viz/service/display_embedder/output_presenter_gl.cc408
-rw-r--r--chromium/components/viz/service/display_embedder/output_presenter_gl.h78
-rw-r--r--chromium/components/viz/service/display_embedder/output_surface_provider_impl.cc46
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device.cc80
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device.h31
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc519
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.h73
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue_unittest.cc22
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_dawn.cc28
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_dawn.h12
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_gl.cc33
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_gl.h9
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_offscreen.cc2
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_vulkan.cc77
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_vulkan.h12
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_webview.cc5
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_device_x11.cc29
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_surface_dependency.h4
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_surface_impl.cc108
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_surface_impl.h3
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc364
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h31
-rw-r--r--chromium/components/viz/service/display_embedder/skia_output_surface_impl_unittest.cc1
30 files changed, 2027 insertions, 815 deletions
diff --git a/chromium/components/viz/service/display_embedder/buffer_queue.cc b/chromium/components/viz/service/display_embedder/buffer_queue.cc
index 85241086b23..ddec6ab1dae 100644
--- a/chromium/components/viz/service/display_embedder/buffer_queue.cc
+++ b/chromium/components/viz/service/display_embedder/buffer_queue.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/containers/adapters.h"
+#include "base/logging.h"
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
@@ -52,8 +53,18 @@ void BufferQueue::UpdateBufferDamage(const gfx::Rect& damage) {
}
gfx::Rect BufferQueue::CurrentBufferDamage() const {
- DCHECK(current_surface_);
- return current_surface_->damage;
+ if (current_surface_)
+ return current_surface_->damage;
+
+ // In case there is no current_surface_, we get the damage from the surface
+ // that will be set as current_surface_ by the next call to GetNextSurface.
+ if (!available_surfaces_.empty()) {
+ return available_surfaces_.back()->damage;
+ }
+
+ // If we can't determine which surface will be the next current_surface_, we
+ // conservatively invalidate the whole buffer.
+ return gfx::Rect(size_);
}
void BufferQueue::SwapBuffers(const gfx::Rect& damage) {
diff --git a/chromium/components/viz/service/display_embedder/buffer_queue_unittest.cc b/chromium/components/viz/service/display_embedder/buffer_queue_unittest.cc
index 9c921dab300..4cb71e78984 100644
--- a/chromium/components/viz/service/display_embedder/buffer_queue_unittest.cc
+++ b/chromium/components/viz/service/display_embedder/buffer_queue_unittest.cc
@@ -92,7 +92,7 @@ class StubGpuMemoryBufferManager : public TestGpuMemoryBufferManager {
gfx::BufferFormat format,
gfx::BufferUsage usage,
gpu::SurfaceHandle surface_handle) override {
- if (!surface_handle) {
+ if (surface_handle == gpu::kNullSurfaceHandle) {
return TestGpuMemoryBufferManager::CreateGpuMemoryBuffer(
size, format, usage, surface_handle);
}
@@ -110,6 +110,9 @@ class StubGpuMemoryBufferManager : public TestGpuMemoryBufferManager {
#if defined(OS_WIN)
const gpu::SurfaceHandle kFakeSurfaceHandle =
reinterpret_cast<gpu::SurfaceHandle>(1);
+#elif defined(USE_X11)
+const gpu::SurfaceHandle kFakeSurfaceHandle =
+ static_cast<gpu::SurfaceHandle>(1);
#else
const gpu::SurfaceHandle kFakeSurfaceHandle = 1;
#endif
diff --git a/chromium/components/viz/service/display_embedder/gl_output_surface_buffer_queue.cc b/chromium/components/viz/service/display_embedder/gl_output_surface_buffer_queue.cc
index cde84eac27f..dfbcf050e87 100644
--- a/chromium/components/viz/service/display_embedder/gl_output_surface_buffer_queue.cc
+++ b/chromium/components/viz/service/display_embedder/gl_output_surface_buffer_queue.cc
@@ -8,6 +8,8 @@
#include "base/bind.h"
#include "base/command_line.h"
+#include "base/logging.h"
+#include "build/build_config.h"
#include "components/viz/common/frame_sinks/begin_frame_source.h"
#include "components/viz/common/gpu/context_provider.h"
#include "components/viz/common/switches.h"
@@ -40,7 +42,13 @@ GLOutputSurfaceBufferQueue::GLOutputSurfaceBufferQueue(
// shifts the start of the new frame forward relative to the old
// implementation.
capabilities_.max_frames_pending = 2;
-
+ // GetCurrentFramebufferDamage will return an upper bound of the part of the
+ // buffer that needs to be recomposited.
+#if defined(OS_MACOSX)
+ capabilities_.supports_target_damage = false;
+#else
+ capabilities_.supports_target_damage = true;
+#endif
// Force the number of max pending frames to one when the switch
// "double-buffer-compositing" is passed.
// This will keep compositing in double buffered mode assuming |buffer_queue_|
diff --git a/chromium/components/viz/service/display_embedder/output_presenter.cc b/chromium/components/viz/service/display_embedder/output_presenter.cc
new file mode 100644
index 00000000000..240c7d442a8
--- /dev/null
+++ b/chromium/components/viz/service/display_embedder/output_presenter.cc
@@ -0,0 +1,115 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/viz/service/display_embedder/output_presenter.h"
+
+#include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+
+namespace viz {
+
+OutputPresenter::Image::Image() = default;
+
+OutputPresenter::Image::~Image() {
+ // TODO(vasilyt): As we are going to delete image anyway we should be able
+ // to abort write to avoid unnecessary flush to submit semaphores.
+ if (scoped_skia_write_access_) {
+ EndWriteSkia();
+ }
+ DCHECK(!scoped_skia_write_access_);
+}
+
+bool OutputPresenter::Image::Initialize(
+ gpu::SharedImageFactory* factory,
+ gpu::SharedImageRepresentationFactory* representation_factory,
+ const gpu::Mailbox& mailbox,
+ SkiaOutputSurfaceDependency* deps) {
+ skia_representation_ = representation_factory->ProduceSkia(
+ mailbox, deps->GetSharedContextState());
+ if (!skia_representation_) {
+ DLOG(ERROR) << "ProduceSkia() failed.";
+ return false;
+ }
+
+ // Initialize |shared_image_deleter_| to make sure the shared image backing
+ // will be released with the Image.
+ shared_image_deleter_.ReplaceClosure(base::BindOnce(
+ base::IgnoreResult(&gpu::SharedImageFactory::DestroySharedImage),
+ base::Unretained(factory), mailbox));
+
+ return true;
+}
+
+void OutputPresenter::Image::BeginWriteSkia() {
+ DCHECK(!scoped_skia_write_access_);
+ DCHECK(!present_count());
+ DCHECK(end_semaphores_.empty());
+
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ // LegacyFontHost will get LCD text and skia figures out what type to use.
+ SkSurfaceProps surface_props(0 /* flags */,
+ SkSurfaceProps::kLegacyFontHost_InitType);
+
+ // Buffer queue is internal to GPU proc and handles texture initialization,
+ // so allow uncleared access.
+ // TODO(vasilyt): Props and MSAA
+ scoped_skia_write_access_ = skia_representation_->BeginScopedWriteAccess(
+ 0 /* final_msaa_count */, surface_props, &begin_semaphores,
+ &end_semaphores_,
+ gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ DCHECK(scoped_skia_write_access_);
+ if (!begin_semaphores.empty()) {
+ scoped_skia_write_access_->surface()->wait(begin_semaphores.size(),
+ begin_semaphores.data());
+ }
+}
+
+SkSurface* OutputPresenter::Image::sk_surface() {
+ return scoped_skia_write_access_ ? scoped_skia_write_access_->surface()
+ : nullptr;
+}
+
+std::vector<GrBackendSemaphore>
+OutputPresenter::Image::TakeEndWriteSkiaSemaphores() {
+ std::vector<GrBackendSemaphore> result;
+ result.swap(end_semaphores_);
+ return result;
+}
+
+void OutputPresenter::Image::EndWriteSkia() {
+ // The Flush now takes place in finishPaintCurrentBuffer on the CPU side.
+ // check if end_semaphores is not empty then flash here
+ DCHECK(scoped_skia_write_access_);
+ if (!end_semaphores_.empty()) {
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = end_semaphores_.size(),
+ .fSignalSemaphores = end_semaphores_.data(),
+ };
+ scoped_skia_write_access_->surface()->flush(
+ SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ DCHECK(scoped_skia_write_access_->surface()->getContext());
+ scoped_skia_write_access_->surface()->getContext()->submit();
+ }
+ scoped_skia_write_access_.reset();
+ end_semaphores_.clear();
+
+ // SkiaRenderer always draws the full frame.
+ skia_representation_->SetCleared();
+}
+
+OutputPresenter::OverlayData::OverlayData(
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay> representation,
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
+ scoped_read_access)
+ : representation_(std::move(representation)),
+ scoped_read_access_(std::move(scoped_read_access)) {}
+OutputPresenter::OverlayData::OverlayData(OverlayData&&) = default;
+OutputPresenter::OverlayData::~OverlayData() = default;
+OutputPresenter::OverlayData& OutputPresenter::OverlayData::operator=(
+ OverlayData&&) = default;
+
+} // namespace viz
diff --git a/chromium/components/viz/service/display_embedder/output_presenter.h b/chromium/components/viz/service/display_embedder/output_presenter.h
new file mode 100644
index 00000000000..70a30fb3fbe
--- /dev/null
+++ b/chromium/components/viz/service/display_embedder/output_presenter.h
@@ -0,0 +1,117 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_H_
+#define COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_H_
+
+#include <memory>
+#include <vector>
+
+#include "components/viz/service/display/output_surface.h"
+#include "components/viz/service/display/overlay_processor_interface.h"
+#include "components/viz/service/display/skia_output_surface.h"
+#include "components/viz/service/viz_service_export.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "ui/gfx/presentation_feedback.h"
+#include "ui/gfx/swap_result.h"
+
+namespace viz {
+
+class SkiaOutputSurfaceDependency;
+
+class VIZ_SERVICE_EXPORT OutputPresenter {
+ public:
+ class Image {
+ public:
+ Image();
+ virtual ~Image();
+
+ Image(const Image&) = delete;
+ Image& operator=(const Image&) = delete;
+
+ bool Initialize(
+ gpu::SharedImageFactory* factory,
+ gpu::SharedImageRepresentationFactory* representation_factory,
+ const gpu::Mailbox& mailbox,
+ SkiaOutputSurfaceDependency* deps);
+
+ gpu::SharedImageRepresentationSkia* skia_representation() {
+ return skia_representation_.get();
+ }
+
+ void BeginWriteSkia();
+ SkSurface* sk_surface();
+ std::vector<GrBackendSemaphore> TakeEndWriteSkiaSemaphores();
+ void EndWriteSkia();
+
+ virtual void BeginPresent() = 0;
+ virtual void EndPresent() = 0;
+ virtual int present_count() const = 0;
+
+ base::WeakPtr<Image> GetWeakPtr() { return weak_ptr_factory_.GetWeakPtr(); }
+
+ private:
+ base::ScopedClosureRunner shared_image_deleter_;
+ std::unique_ptr<gpu::SharedImageRepresentationSkia> skia_representation_;
+ std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedWriteAccess>
+ scoped_skia_write_access_;
+
+ std::vector<GrBackendSemaphore> end_semaphores_;
+ base::WeakPtrFactory<Image> weak_ptr_factory_{this};
+ };
+
+ class OverlayData {
+ public:
+ OverlayData(
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay> representation,
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
+ scoped_read_access);
+ OverlayData(OverlayData&&);
+ ~OverlayData();
+ OverlayData& operator=(OverlayData&&);
+
+ private:
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay> representation_;
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
+ scoped_read_access_;
+ };
+
+ OutputPresenter() = default;
+ virtual ~OutputPresenter() = default;
+
+ using BufferPresentedCallback =
+ base::OnceCallback<void(const gfx::PresentationFeedback& feedback)>;
+ using SwapCompletionCallback =
+ base::OnceCallback<void(gfx::SwapCompletionResult)>;
+
+ virtual void InitializeCapabilities(
+ OutputSurface::Capabilities* capabilities) = 0;
+ virtual bool Reshape(const gfx::Size& size,
+ float device_scale_factor,
+ const gfx::ColorSpace& color_space,
+ gfx::BufferFormat format,
+ gfx::OverlayTransform transform) = 0;
+ virtual std::vector<std::unique_ptr<Image>> AllocateImages(
+ gfx::ColorSpace color_space,
+ gfx::Size image_size,
+ size_t num_images) = 0;
+ virtual void SwapBuffers(SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) = 0;
+ virtual void PostSubBuffer(const gfx::Rect& rect,
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) = 0;
+ virtual void CommitOverlayPlanes(
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) = 0;
+ virtual void SchedulePrimaryPlane(
+ const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane,
+ Image* image,
+ bool is_submitted) = 0;
+ virtual std::vector<OverlayData> ScheduleOverlays(
+ SkiaOutputSurface::OverlayList overlays) = 0;
+};
+
+} // namespace viz
+
+#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_H_
diff --git a/chromium/components/viz/service/display_embedder/output_presenter_fuchsia.cc b/chromium/components/viz/service/display_embedder/output_presenter_fuchsia.cc
new file mode 100644
index 00000000000..50a65e98c66
--- /dev/null
+++ b/chromium/components/viz/service/display_embedder/output_presenter_fuchsia.cc
@@ -0,0 +1,490 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/viz/service/display_embedder/output_presenter_fuchsia.h"
+
+#include <fuchsia/sysmem/cpp/fidl.h>
+#include <lib/sys/cpp/component_context.h>
+#include <lib/sys/inspect/cpp/component.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/feature_list.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/process_context.h"
+#include "base/trace_event/trace_event.h"
+#include "components/viz/common/features.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/ipc/common/gpu_client_ids.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "ui/ozone/public/platform_window_surface.h"
+
+namespace viz {
+
+namespace {
+
+void GrSemaphoresToZxEvents(gpu::VulkanImplementation* vulkan_implementation,
+ VkDevice vk_device,
+ const std::vector<GrBackendSemaphore>& semaphores,
+ std::vector<zx::event>* events) {
+ for (auto& semaphore : semaphores) {
+ gpu::SemaphoreHandle handle = vulkan_implementation->GetSemaphoreHandle(
+ vk_device, semaphore.vkSemaphore());
+ DCHECK(handle.is_valid());
+ events->push_back(handle.TakeHandle());
+ }
+}
+
+class PresenterImageFuchsia : public OutputPresenter::Image {
+ public:
+ explicit PresenterImageFuchsia(uint32_t image_id);
+ ~PresenterImageFuchsia() override;
+
+ void BeginPresent() final;
+ void EndPresent() final;
+ int present_count() const final;
+
+ uint32_t image_id() const { return image_id_; }
+
+ void TakeSemaphores(std::vector<GrBackendSemaphore>* read_begin_semaphores,
+ std::vector<GrBackendSemaphore>* read_end_semaphores);
+
+ private:
+ const uint32_t image_id_;
+
+ int present_count_ = 0;
+
+ std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedReadAccess>
+ read_access_;
+
+ std::vector<GrBackendSemaphore> read_begin_semaphores_;
+ std::vector<GrBackendSemaphore> read_end_semaphores_;
+};
+
+PresenterImageFuchsia::PresenterImageFuchsia(uint32_t image_id)
+ : image_id_(image_id) {}
+
+PresenterImageFuchsia::~PresenterImageFuchsia() {
+ DCHECK(read_begin_semaphores_.empty());
+ DCHECK(read_end_semaphores_.empty());
+}
+
+void PresenterImageFuchsia::BeginPresent() {
+ ++present_count_;
+
+ if (present_count_ == 1) {
+ DCHECK(!read_access_);
+ DCHECK(read_begin_semaphores_.empty());
+ DCHECK(read_end_semaphores_.empty());
+ read_access_ = skia_representation()->BeginScopedReadAccess(
+ &read_begin_semaphores_, &read_end_semaphores_);
+ }
+}
+
+void PresenterImageFuchsia::EndPresent() {
+ DCHECK(present_count_);
+ --present_count_;
+ if (!present_count_)
+ read_access_.reset();
+}
+
+int PresenterImageFuchsia::present_count() const {
+ return present_count_;
+}
+
+void PresenterImageFuchsia::TakeSemaphores(
+ std::vector<GrBackendSemaphore>* read_begin_semaphores,
+ std::vector<GrBackendSemaphore>* read_end_semaphores) {
+ DCHECK(read_begin_semaphores->empty());
+ std::swap(*read_begin_semaphores, read_begin_semaphores_);
+
+ DCHECK(read_end_semaphores->empty());
+ std::swap(*read_end_semaphores, read_end_semaphores_);
+}
+
+} // namespace
+
+OutputPresenterFuchsia::PendingFrame::PendingFrame() = default;
+OutputPresenterFuchsia::PendingFrame::~PendingFrame() = default;
+
+OutputPresenterFuchsia::PendingFrame::PendingFrame(PendingFrame&&) = default;
+OutputPresenterFuchsia::PendingFrame&
+OutputPresenterFuchsia::PendingFrame::operator=(PendingFrame&&) = default;
+
+// static
+std::unique_ptr<OutputPresenterFuchsia> OutputPresenterFuchsia::Create(
+ ui::PlatformWindowSurface* window_surface,
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker) {
+ auto* inspector = base::ComponentInspectorForProcess();
+
+ if (!base::FeatureList::IsEnabled(
+ features::kUseSkiaOutputDeviceBufferQueue)) {
+ inspector->root().CreateString("output_presenter", "swapchain", inspector);
+ return {};
+ }
+
+ inspector->root().CreateString("output_presenter",
+ "SkiaOutputDeviceBufferQueue", inspector);
+
+ // SetTextureToNewImagePipe() will call ScenicSession::Present() to send
+ // CreateImagePipe2Cmd creation command, but it will be processed only after
+ // vsync, which will delay buffer allocation of buffers in AllocateImages(),
+ // but that shouldn't cause any issues.
+ fuchsia::images::ImagePipe2Ptr image_pipe;
+ if (!window_surface->SetTextureToNewImagePipe(image_pipe.NewRequest()))
+ return {};
+
+ return std::make_unique<OutputPresenterFuchsia>(std::move(image_pipe), deps,
+ memory_tracker);
+}
+
+OutputPresenterFuchsia::OutputPresenterFuchsia(
+ fuchsia::images::ImagePipe2Ptr image_pipe,
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker)
+ : image_pipe_(std::move(image_pipe)),
+ dependency_(deps),
+ shared_image_factory_(deps->GetGpuPreferences(),
+ deps->GetGpuDriverBugWorkarounds(),
+ deps->GetGpuFeatureInfo(),
+ deps->GetSharedContextState().get(),
+ deps->GetMailboxManager(),
+ deps->GetSharedImageManager(),
+ deps->GetGpuImageFactory(),
+ memory_tracker,
+ true),
+ shared_image_representation_factory_(deps->GetSharedImageManager(),
+ memory_tracker) {
+ sysmem_allocator_ = base::ComponentContextForProcess()
+ ->svc()
+ ->Connect<fuchsia::sysmem::Allocator>();
+
+ image_pipe_.set_error_handler([this](zx_status_t status) {
+ ZX_LOG(ERROR, status) << "ImagePipe disconnected";
+
+ for (auto& frame : pending_frames_) {
+ std::move(frame.completion_callback)
+ .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_FAILED));
+ }
+ pending_frames_.clear();
+ });
+}
+
+OutputPresenterFuchsia::~OutputPresenterFuchsia() {}
+
+void OutputPresenterFuchsia::InitializeCapabilities(
+ OutputSurface::Capabilities* capabilities) {
+ // We expect origin of buffers is at top left.
+ capabilities->output_surface_origin = gfx::SurfaceOrigin::kTopLeft;
+ capabilities->supports_post_sub_buffer = false;
+ capabilities->supports_commit_overlay_planes = false;
+
+ capabilities->sk_color_type = kRGBA_8888_SkColorType;
+ capabilities->gr_backend_format =
+ dependency_->GetSharedContextState()->gr_context()->defaultBackendFormat(
+ capabilities->sk_color_type, GrRenderable::kYes);
+}
+
+bool OutputPresenterFuchsia::Reshape(const gfx::Size& size,
+ float device_scale_factor,
+ const gfx::ColorSpace& color_space,
+ gfx::BufferFormat format,
+ gfx::OverlayTransform transform) {
+ if (!image_pipe_)
+ return false;
+
+ frame_size_ = size;
+
+ return true;
+}
+
+std::vector<std::unique_ptr<OutputPresenter::Image>>
+OutputPresenterFuchsia::AllocateImages(gfx::ColorSpace color_space,
+ gfx::Size image_size,
+ size_t num_images) {
+ if (!image_pipe_)
+ return {};
+
+ // If we already allocated buffer collection then it needs to be released.
+ if (last_buffer_collection_id_) {
+ // If there are pending frames for the old buffer collection then remove the
+ // collection only after that frame is presented. Otherwise remove it now.
+ if (!pending_frames_.empty() &&
+ pending_frames_.back().buffer_collection_id ==
+ last_buffer_collection_id_) {
+ DCHECK(!pending_frames_.back().remove_buffer_collection);
+ pending_frames_.back().remove_buffer_collection = true;
+ } else {
+ image_pipe_->RemoveBufferCollection(last_buffer_collection_id_);
+ }
+ }
+
+ buffer_collection_.reset();
+
+ // Create buffer collection with 2 extra tokens: one for Vulkan and one for
+ // the ImagePipe.
+ fuchsia::sysmem::BufferCollectionTokenPtr collection_token;
+ sysmem_allocator_->AllocateSharedCollection(collection_token.NewRequest());
+
+ fidl::InterfaceHandle<fuchsia::sysmem::BufferCollectionToken>
+ token_for_scenic;
+ collection_token->Duplicate(ZX_RIGHT_SAME_RIGHTS,
+ token_for_scenic.NewRequest());
+
+ fidl::InterfaceHandle<fuchsia::sysmem::BufferCollectionToken>
+ token_for_vulkan;
+ collection_token->Duplicate(ZX_RIGHT_SAME_RIGHTS,
+ token_for_vulkan.NewRequest());
+
+ fuchsia::sysmem::BufferCollectionSyncPtr collection;
+ sysmem_allocator_->BindSharedCollection(std::move(collection_token),
+ collection.NewRequest());
+
+ zx_status_t status = collection->Sync();
+ if (status != ZX_OK) {
+ ZX_DLOG(ERROR, status) << "fuchsia.sysmem.BufferCollection.Sync()";
+ return {};
+ }
+
+ auto* vulkan =
+ dependency_->GetVulkanContextProvider()->GetVulkanImplementation();
+
+ // Set constraints for the new collection.
+ fuchsia::sysmem::BufferCollectionConstraints constraints;
+ constraints.min_buffer_count = num_images;
+ constraints.usage.none = fuchsia::sysmem::noneUsage;
+ constraints.image_format_constraints_count = 1;
+ constraints.image_format_constraints[0].pixel_format.type =
+ fuchsia::sysmem::PixelFormatType::R8G8B8A8;
+ constraints.image_format_constraints[0].min_coded_width = frame_size_.width();
+ constraints.image_format_constraints[0].min_coded_height =
+ frame_size_.height();
+ constraints.image_format_constraints[0].color_spaces_count = 1;
+ constraints.image_format_constraints[0].color_space[0].type =
+ fuchsia::sysmem::ColorSpaceType::SRGB;
+ collection->SetConstraints(true, constraints);
+
+ // Register the new buffer collection with the ImagePipe.
+ last_buffer_collection_id_++;
+ image_pipe_->AddBufferCollection(last_buffer_collection_id_,
+ std::move(token_for_scenic));
+
+ // Register the new buffer collection with Vulkan.
+ gfx::SysmemBufferCollectionId buffer_collection_id =
+ gfx::SysmemBufferCollectionId::Create();
+
+ VkDevice vk_device = dependency_->GetVulkanContextProvider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ buffer_collection_ = vulkan->RegisterSysmemBufferCollection(
+ vk_device, buffer_collection_id, token_for_vulkan.TakeChannel(),
+ buffer_format_, gfx::BufferUsage::SCANOUT);
+
+ // Wait for the images to be allocated.
+ zx_status_t wait_status;
+ fuchsia::sysmem::BufferCollectionInfo_2 buffers_info;
+ status = collection->WaitForBuffersAllocated(&wait_status, &buffers_info);
+ if (status != ZX_OK) {
+ ZX_DLOG(ERROR, status) << "fuchsia.sysmem.BufferCollection failed";
+ return {};
+ }
+
+ if (wait_status != ZX_OK) {
+ ZX_DLOG(ERROR, wait_status)
+ << "Sysmem buffer collection allocation failed.";
+ return {};
+ }
+
+ DCHECK_GE(buffers_info.buffer_count, num_images);
+
+ // We no longer need the BufferCollection connection. Close it to ensure
+ // ImagePipe can still use the collection after BufferCollection connection
+ // is dropped below.
+ collection->Close();
+
+ // Create PresenterImageFuchsia for each buffer in the collection.
+ uint32_t image_usage = gpu::SHARED_IMAGE_USAGE_RASTER;
+ if (vulkan->enforce_protected_memory())
+ image_usage |= gpu::SHARED_IMAGE_USAGE_PROTECTED;
+
+ std::vector<std::unique_ptr<OutputPresenter::Image>> images;
+ images.reserve(num_images);
+
+ fuchsia::sysmem::ImageFormat_2 image_format;
+ image_format.coded_width = frame_size_.width();
+ image_format.coded_height = frame_size_.height();
+
+ // Create an image for each buffer in the collection.
+ for (size_t i = 0; i < num_images; ++i) {
+ last_image_id_++;
+ image_pipe_->AddImage(last_image_id_, last_buffer_collection_id_, i,
+ image_format);
+
+ gfx::GpuMemoryBufferHandle gmb_handle;
+ gmb_handle.type = gfx::GpuMemoryBufferType::NATIVE_PIXMAP;
+ gmb_handle.native_pixmap_handle.buffer_collection_id = buffer_collection_id;
+ gmb_handle.native_pixmap_handle.buffer_index = i;
+
+ auto mailbox = gpu::Mailbox::GenerateForSharedImage();
+ if (!shared_image_factory_.CreateSharedImage(
+ mailbox, gpu::kInProcessCommandBufferClientId,
+ std::move(gmb_handle), buffer_format_, gpu::kNullSurfaceHandle,
+ frame_size_, color_space, image_usage)) {
+ return {};
+ }
+
+ auto image = std::make_unique<PresenterImageFuchsia>(last_image_id_);
+ if (!image->Initialize(&shared_image_factory_,
+ &shared_image_representation_factory_, mailbox,
+ dependency_)) {
+ return {};
+ }
+ images.push_back(std::move(image));
+ }
+
+ return images;
+}
+
+void OutputPresenterFuchsia::SwapBuffers(
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) {
+ if (!image_pipe_) {
+ std::move(completion_callback)
+ .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_FAILED));
+ return;
+ }
+
+ // SwapBuffer() should be called only after SchedulePrimaryPlane().
+ DCHECK(next_frame_);
+
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(
+ "viz", "OutputPresenterFuchsia::PresentQueue", TRACE_ID_LOCAL(this),
+ "image_id", next_frame_->image_id);
+
+ next_frame_->completion_callback = std::move(completion_callback);
+ next_frame_->presentation_callback = std::move(presentation_callback);
+
+ pending_frames_.push_back(std::move(next_frame_.value()));
+ next_frame_.reset();
+
+ if (!present_is_pending_)
+ PresentNextFrame();
+}
+
+void OutputPresenterFuchsia::PostSubBuffer(
+ const gfx::Rect& rect,
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) {
+ // Sub buffer presentation is not supported.
+ NOTREACHED();
+}
+
+void OutputPresenterFuchsia::CommitOverlayPlanes(
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) {
+ // Overlays are not supported yet.
+ NOTREACHED();
+}
+
+void OutputPresenterFuchsia::SchedulePrimaryPlane(
+ const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane,
+ Image* image,
+ bool is_submitted) {
+ auto* image_fuchsia = static_cast<PresenterImageFuchsia*>(image);
+
+ DCHECK(!next_frame_);
+ next_frame_ = PendingFrame();
+ next_frame_->image_id = image_fuchsia->image_id();
+ next_frame_->buffer_collection_id = last_buffer_collection_id_;
+
+ // Take semaphores for the image and covert them to zx::events that are later
+ // passed to ImagePipe::PresentImage().
+ std::vector<GrBackendSemaphore> read_begin_semaphores;
+ std::vector<GrBackendSemaphore> read_end_semaphores;
+ image_fuchsia->TakeSemaphores(&read_begin_semaphores, &read_end_semaphores);
+
+ auto* vulkan_context_provider = dependency_->GetVulkanContextProvider();
+ auto* vulkan_implementation =
+ vulkan_context_provider->GetVulkanImplementation();
+ VkDevice vk_device =
+ vulkan_context_provider->GetDeviceQueue()->GetVulkanDevice();
+
+ GrSemaphoresToZxEvents(vulkan_implementation, vk_device,
+ read_begin_semaphores, &(next_frame_->acquire_fences));
+ GrSemaphoresToZxEvents(vulkan_implementation, vk_device, read_end_semaphores,
+ &(next_frame_->release_fences));
+
+ // Destroy |read_begin_semaphores|, but not |read_end_semaphores|, since
+ // SharedImageRepresentationSkia::BeginScopedReadAccess() keeps ownership of
+ // the end_semaphores.
+ for (auto& semaphore : read_begin_semaphores) {
+ vkDestroySemaphore(vk_device, semaphore.vkSemaphore(), nullptr);
+ }
+}
+
+std::vector<OutputPresenter::OverlayData>
+OutputPresenterFuchsia::ScheduleOverlays(
+ SkiaOutputSurface::OverlayList overlays) {
+ // Overlays are not supported yet.
+ NOTREACHED();
+ return {};
+}
+
+void OutputPresenterFuchsia::PresentNextFrame() {
+ DCHECK(!present_is_pending_);
+ DCHECK(!pending_frames_.empty());
+
+ TRACE_EVENT_NESTABLE_ASYNC_END1("viz", "OutputPresenterFuchsia::PresentQueue",
+ TRACE_ID_LOCAL(this), "image_id",
+ pending_frames_.front().image_id);
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(
+ "viz", "OutputPresenterFuchsia::PresentFrame", TRACE_ID_LOCAL(this),
+ "image_id", pending_frames_.front().image_id);
+
+ present_is_pending_ = true;
+ uint64_t target_presentation_time = zx_clock_get_monotonic();
+ image_pipe_->PresentImage(
+ pending_frames_.front().image_id, target_presentation_time,
+ std::move(pending_frames_.front().acquire_fences),
+ std::move(pending_frames_.front().release_fences),
+ fit::bind_member(this, &OutputPresenterFuchsia::OnPresentComplete));
+}
+
+void OutputPresenterFuchsia::OnPresentComplete(
+ fuchsia::images::PresentationInfo presentation_info) {
+ DCHECK(present_is_pending_);
+ present_is_pending_ = false;
+
+ TRACE_EVENT_NESTABLE_ASYNC_END1("viz", "OutputPresenterFuchsia::PresentFrame",
+ TRACE_ID_LOCAL(this), "image_id",
+ pending_frames_.front().image_id);
+
+ std::move(pending_frames_.front().completion_callback)
+ .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK));
+ std::move(pending_frames_.front().presentation_callback)
+ .Run(gfx::PresentationFeedback(
+ base::TimeTicks::FromZxTime(presentation_info.presentation_time),
+ base::TimeDelta::FromZxDuration(
+ presentation_info.presentation_interval),
+ gfx::PresentationFeedback::kVSync));
+
+ if (pending_frames_.front().remove_buffer_collection) {
+ image_pipe_->RemoveBufferCollection(
+ pending_frames_.front().buffer_collection_id);
+ }
+
+ pending_frames_.pop_front();
+ if (!pending_frames_.empty())
+ PresentNextFrame();
+}
+
+} // namespace viz
diff --git a/chromium/components/viz/service/display_embedder/output_presenter_fuchsia.h b/chromium/components/viz/service/display_embedder/output_presenter_fuchsia.h
new file mode 100644
index 00000000000..bfee93dae49
--- /dev/null
+++ b/chromium/components/viz/service/display_embedder/output_presenter_fuchsia.h
@@ -0,0 +1,115 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_FUCHSIA_H_
+#define COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_FUCHSIA_H_
+
+#include <fuchsia/images/cpp/fidl.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/containers/circular_deque.h"
+#include "components/viz/service/display_embedder/output_presenter.h"
+#include "components/viz/service/viz_service_export.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+
+namespace ui {
+class PlatformWindowSurface;
+} // namespace ui
+
+namespace viz {
+
+class VIZ_SERVICE_EXPORT OutputPresenterFuchsia : public OutputPresenter {
+ public:
+ static std::unique_ptr<OutputPresenterFuchsia> Create(
+ ui::PlatformWindowSurface* window_surface,
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker);
+
+ OutputPresenterFuchsia(fuchsia::images::ImagePipe2Ptr image_pipe,
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker);
+ ~OutputPresenterFuchsia() override;
+
+ // OutputPresenter implementation:
+ void InitializeCapabilities(OutputSurface::Capabilities* capabilities) final;
+ bool Reshape(const gfx::Size& size,
+ float device_scale_factor,
+ const gfx::ColorSpace& color_space,
+ gfx::BufferFormat format,
+ gfx::OverlayTransform transform) final;
+ std::vector<std::unique_ptr<Image>> AllocateImages(
+ gfx::ColorSpace color_space,
+ gfx::Size image_size,
+ size_t num_images) final;
+ void SwapBuffers(SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) final;
+ void PostSubBuffer(const gfx::Rect& rect,
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) final;
+ void CommitOverlayPlanes(SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) final;
+ void SchedulePrimaryPlane(
+ const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane,
+ Image* image,
+ bool is_submitted) final;
+ std::vector<OverlayData> ScheduleOverlays(
+ SkiaOutputSurface::OverlayList overlays) final;
+
+ private:
+ struct PendingFrame {
+ PendingFrame();
+ ~PendingFrame();
+
+ PendingFrame(PendingFrame&&);
+ PendingFrame& operator=(PendingFrame&&);
+
+ uint32_t buffer_collection_id;
+ uint32_t image_id;
+
+ std::vector<zx::event> acquire_fences;
+ std::vector<zx::event> release_fences;
+
+ SwapCompletionCallback completion_callback;
+ BufferPresentedCallback presentation_callback;
+
+ // Indicates that this is the last frame for this buffer collection and that
+ // the collection can be removed after the frame is presented.
+ bool remove_buffer_collection = false;
+ };
+
+ void PresentNextFrame();
+ void OnPresentComplete(fuchsia::images::PresentationInfo presentation_info);
+
+ fuchsia::sysmem::AllocatorPtr sysmem_allocator_;
+ fuchsia::images::ImagePipe2Ptr image_pipe_;
+ SkiaOutputSurfaceDependency* const dependency_;
+ gpu::SharedImageFactory shared_image_factory_;
+ gpu::SharedImageRepresentationFactory shared_image_representation_factory_;
+
+ gfx::Size frame_size_;
+ gfx::BufferFormat buffer_format_ = gfx::BufferFormat::RGBA_8888;
+
+ // Last buffer collection ID for the ImagePipe. Incremented every time buffers
+ // are reallocated.
+ uint32_t last_buffer_collection_id_ = 0;
+
+ // Counter to generate image IDs for the ImagePipe.
+ uint32_t last_image_id_ = 0;
+
+ std::unique_ptr<gpu::SysmemBufferCollection> buffer_collection_;
+
+ // The next frame to be submitted by SwapBuffers().
+ base::Optional<PendingFrame> next_frame_;
+
+ base::circular_deque<PendingFrame> pending_frames_;
+
+ bool present_is_pending_ = false;
+};
+
+} // namespace viz
+
+#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_FUCHSIA_H_
diff --git a/chromium/components/viz/service/display_embedder/output_presenter_gl.cc b/chromium/components/viz/service/display_embedder/output_presenter_gl.cc
new file mode 100644
index 00000000000..289ab589939
--- /dev/null
+++ b/chromium/components/viz/service/display_embedder/output_presenter_gl.cc
@@ -0,0 +1,408 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/viz/service/display_embedder/output_presenter_gl.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/ipc/common/gpu_surface_lookup.h"
+#include "ui/display/types/display_snapshot.h"
+#include "ui/gfx/buffer_format_util.h"
+#include "ui/gfx/geometry/rect_conversions.h"
+#include "ui/gl/gl_fence.h"
+#include "ui/gl/gl_surface.h"
+
+#if defined(OS_ANDROID)
+#include "ui/gl/gl_surface_egl_surface_control.h"
+#endif
+
+#if defined(USE_OZONE)
+#include "ui/base/ui_base_features.h"
+#endif
+
+namespace viz {
+
+namespace {
+
+class PresenterImageGL : public OutputPresenter::Image {
+ public:
+ PresenterImageGL() = default;
+ ~PresenterImageGL() override = default;
+
+ bool Initialize(gpu::SharedImageFactory* factory,
+ gpu::SharedImageRepresentationFactory* representation_factory,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ ResourceFormat format,
+ SkiaOutputSurfaceDependency* deps,
+ uint32_t shared_image_usage);
+
+ void BeginPresent() final;
+ void EndPresent() final;
+ int present_count() const final;
+
+ gl::GLImage* GetGLImage(std::unique_ptr<gfx::GpuFence>* fence);
+
+ private:
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay>
+ overlay_representation_;
+ std::unique_ptr<gpu::SharedImageRepresentationGLTexture> gl_representation_;
+ std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
+ scoped_overlay_read_access_;
+ std::unique_ptr<gpu::SharedImageRepresentationGLTexture::ScopedAccess>
+ scoped_gl_read_access_;
+
+ int present_count_ = 0;
+};
+
+bool PresenterImageGL::Initialize(
+ gpu::SharedImageFactory* factory,
+ gpu::SharedImageRepresentationFactory* representation_factory,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ ResourceFormat format,
+ SkiaOutputSurfaceDependency* deps,
+ uint32_t shared_image_usage) {
+ auto mailbox = gpu::Mailbox::GenerateForSharedImage();
+
+ if (!factory->CreateSharedImage(mailbox, format, size, color_space,
+ deps->GetSurfaceHandle(),
+ shared_image_usage)) {
+ DLOG(ERROR) << "CreateSharedImage failed.";
+ return false;
+ }
+
+ if (!Image::Initialize(factory, representation_factory, mailbox, deps))
+ return false;
+
+ overlay_representation_ = representation_factory->ProduceOverlay(mailbox);
+
+ // If the backing doesn't support overlay, then fallback to GL.
+ if (!overlay_representation_)
+ gl_representation_ = representation_factory->ProduceGLTexture(mailbox);
+
+ if (!overlay_representation_ && !gl_representation_) {
+ DLOG(ERROR) << "ProduceOverlay() and ProduceGLTexture() failed.";
+ return false;
+ }
+
+ return true;
+}
+
+void PresenterImageGL::BeginPresent() {
+ if (++present_count_ != 1) {
+ DCHECK(scoped_overlay_read_access_ || scoped_gl_read_access_);
+ return;
+ }
+
+ DCHECK(!sk_surface());
+ DCHECK(!scoped_overlay_read_access_);
+
+ if (overlay_representation_) {
+ scoped_overlay_read_access_ =
+ overlay_representation_->BeginScopedReadAccess(
+ true /* need_gl_image */);
+ DCHECK(scoped_overlay_read_access_);
+ return;
+ }
+
+ scoped_gl_read_access_ = gl_representation_->BeginScopedAccess(
+ GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
+ gpu::SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ DCHECK(scoped_gl_read_access_);
+}
+
+void PresenterImageGL::EndPresent() {
+ DCHECK(present_count_);
+ if (--present_count_)
+ return;
+ scoped_overlay_read_access_.reset();
+ scoped_gl_read_access_.reset();
+}
+
+int PresenterImageGL::present_count() const {
+ return present_count_;
+}
+
+gl::GLImage* PresenterImageGL::GetGLImage(
+ std::unique_ptr<gfx::GpuFence>* fence) {
+ if (scoped_overlay_read_access_)
+ return scoped_overlay_read_access_->gl_image();
+
+ DCHECK(scoped_gl_read_access_);
+
+ if (gl::GLFence::IsGpuFenceSupported() && fence) {
+ if (auto gl_fence = gl::GLFence::CreateForGpuFence())
+ *fence = gl_fence->GetGpuFence();
+ }
+ auto* texture = gl_representation_->GetTexture();
+ return texture->GetLevelImage(texture->target(), 0);
+}
+
+} // namespace
+
+// static
+const uint32_t OutputPresenterGL::kDefaultSharedImageUsage =
+ gpu::SHARED_IMAGE_USAGE_SCANOUT | gpu::SHARED_IMAGE_USAGE_DISPLAY |
+ gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT;
+
+// static
+std::unique_ptr<OutputPresenterGL> OutputPresenterGL::Create(
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker) {
+#if defined(OS_ANDROID)
+ if (deps->GetGpuFeatureInfo()
+ .status_values[gpu::GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] !=
+ gpu::kGpuFeatureStatusEnabled) {
+ return nullptr;
+ }
+
+ bool can_be_used_with_surface_control = false;
+ ANativeWindow* window =
+ gpu::GpuSurfaceLookup::GetInstance()->AcquireNativeWidget(
+ deps->GetSurfaceHandle(), &can_be_used_with_surface_control);
+ if (!window || !can_be_used_with_surface_control)
+ return nullptr;
+ // TODO(https://crbug.com/1012401): don't depend on GL.
+ auto gl_surface = base::MakeRefCounted<gl::GLSurfaceEGLSurfaceControl>(
+ window, base::ThreadTaskRunnerHandle::Get());
+ if (!gl_surface->Initialize(gl::GLSurfaceFormat())) {
+ LOG(ERROR) << "Failed to initialize GLSurfaceEGLSurfaceControl.";
+ return nullptr;
+ }
+
+ if (!deps->GetSharedContextState()->MakeCurrent(gl_surface.get(),
+ true /* needs_gl*/)) {
+ LOG(ERROR) << "MakeCurrent failed.";
+ return nullptr;
+ }
+
+ return std::make_unique<OutputPresenterGL>(
+ std::move(gl_surface), deps, memory_tracker, kDefaultSharedImageUsage);
+#else
+ return nullptr;
+#endif
+}
+
+OutputPresenterGL::OutputPresenterGL(scoped_refptr<gl::GLSurface> gl_surface,
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker,
+ uint32_t shared_image_usage)
+ : gl_surface_(gl_surface),
+ dependency_(deps),
+ supports_async_swap_(gl_surface_->SupportsAsyncSwap()),
+ shared_image_factory_(deps->GetGpuPreferences(),
+ deps->GetGpuDriverBugWorkarounds(),
+ deps->GetGpuFeatureInfo(),
+ deps->GetSharedContextState().get(),
+ deps->GetMailboxManager(),
+ deps->GetSharedImageManager(),
+ deps->GetGpuImageFactory(),
+ memory_tracker,
+ true),
+ shared_image_representation_factory_(deps->GetSharedImageManager(),
+ memory_tracker),
+ shared_image_usage_(shared_image_usage) {
+ // GL is origin is at bottom left normally, all Surfaceless implementations
+ // are flipped.
+ DCHECK_EQ(gl_surface_->GetOrigin(), gfx::SurfaceOrigin::kTopLeft);
+
+ // TODO(https://crbug.com/958166): The initial |image_format_| should not be
+ // used, and the gfx::BufferFormat specified in Reshape should be used
+ // instead, because it may be updated to reflect changes in the content being
+ // displayed (e.g, HDR content appearing on-screen).
+#if defined(OS_MACOSX)
+ image_format_ = BGRA_8888;
+#else
+#if defined(USE_OZONE)
+ if (features::IsUsingOzonePlatform()) {
+ image_format_ =
+ GetResourceFormat(display::DisplaySnapshot::PrimaryFormat());
+ return;
+ }
+#endif
+ image_format_ = RGBA_8888;
+#endif
+}
+
+OutputPresenterGL::~OutputPresenterGL() = default;
+
+void OutputPresenterGL::InitializeCapabilities(
+ OutputSurface::Capabilities* capabilities) {
+ capabilities->android_surface_control_feature_enabled = true;
+ capabilities->supports_post_sub_buffer = gl_surface_->SupportsPostSubBuffer();
+ capabilities->supports_commit_overlay_planes =
+ gl_surface_->SupportsCommitOverlayPlanes();
+
+ // Set supports_surfaceless to enable overlays.
+ capabilities->supports_surfaceless = true;
+ // We expect origin of buffers is at top left.
+ capabilities->output_surface_origin = gfx::SurfaceOrigin::kTopLeft;
+
+ // TODO(penghuang): Use defaultBackendFormat() in shared image implementation
+ // to make sure backend format is consistent.
+ capabilities->sk_color_type = ResourceFormatToClosestSkColorType(
+ true /* gpu_compositing */, image_format_);
+ capabilities->gr_backend_format =
+ dependency_->GetSharedContextState()->gr_context()->defaultBackendFormat(
+ capabilities->sk_color_type, GrRenderable::kYes);
+}
+
+bool OutputPresenterGL::Reshape(const gfx::Size& size,
+ float device_scale_factor,
+ const gfx::ColorSpace& color_space,
+ gfx::BufferFormat format,
+ gfx::OverlayTransform transform) {
+ return gl_surface_->Resize(size, device_scale_factor, color_space,
+ gfx::AlphaBitsForBufferFormat(format));
+}
+
+std::vector<std::unique_ptr<OutputPresenter::Image>>
+OutputPresenterGL::AllocateImages(gfx::ColorSpace color_space,
+ gfx::Size image_size,
+ size_t num_images) {
+ std::vector<std::unique_ptr<Image>> images;
+ for (size_t i = 0; i < num_images; ++i) {
+ auto image = std::make_unique<PresenterImageGL>();
+ if (!image->Initialize(&shared_image_factory_,
+ &shared_image_representation_factory_, image_size,
+ color_space, image_format_, dependency_,
+ shared_image_usage_)) {
+ DLOG(ERROR) << "Failed to initialize image.";
+ return {};
+ }
+ images.push_back(std::move(image));
+ }
+
+ return images;
+}
+
+void OutputPresenterGL::SwapBuffers(
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) {
+ if (supports_async_swap_) {
+ gl_surface_->SwapBuffersAsync(std::move(completion_callback),
+ std::move(presentation_callback));
+ } else {
+ auto result = gl_surface_->SwapBuffers(std::move(presentation_callback));
+ std::move(completion_callback).Run(gfx::SwapCompletionResult(result));
+ }
+}
+
+void OutputPresenterGL::PostSubBuffer(
+ const gfx::Rect& rect,
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) {
+ if (supports_async_swap_) {
+ gl_surface_->PostSubBufferAsync(
+ rect.x(), rect.y(), rect.width(), rect.height(),
+ std::move(completion_callback), std::move(presentation_callback));
+ } else {
+ auto result = gl_surface_->PostSubBuffer(rect.x(), rect.y(), rect.width(),
+ rect.height(),
+ std::move(presentation_callback));
+ std::move(completion_callback).Run(gfx::SwapCompletionResult(result));
+ }
+}
+
+void OutputPresenterGL::SchedulePrimaryPlane(
+ const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane,
+ Image* image,
+ bool is_submitted) {
+ std::unique_ptr<gfx::GpuFence> fence;
+ // If the submitted_image() is being scheduled, we don't new a new fence.
+ auto* gl_image = reinterpret_cast<PresenterImageGL*>(image)->GetGLImage(
+ is_submitted ? nullptr : &fence);
+
+ // Output surface is also z-order 0.
+ constexpr int kPlaneZOrder = 0;
+ // Output surface always uses the full texture.
+ constexpr gfx::RectF kUVRect(0.f, 0.f, 1.0f, 1.0f);
+ gl_surface_->ScheduleOverlayPlane(kPlaneZOrder, plane.transform, gl_image,
+ ToNearestRect(plane.display_rect), kUVRect,
+ plane.enable_blending, std::move(fence));
+}
+
+void OutputPresenterGL::CommitOverlayPlanes(
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) {
+ if (supports_async_swap_) {
+ gl_surface_->CommitOverlayPlanesAsync(std::move(completion_callback),
+ std::move(presentation_callback));
+ } else {
+ auto result =
+ gl_surface_->CommitOverlayPlanes(std::move(presentation_callback));
+ std::move(completion_callback).Run(gfx::SwapCompletionResult(result));
+ }
+}
+
+std::vector<OutputPresenter::OverlayData> OutputPresenterGL::ScheduleOverlays(
+ SkiaOutputSurface::OverlayList overlays) {
+ std::vector<OverlayData> pending_overlays;
+#if defined(OS_ANDROID) || defined(OS_MACOSX)
+ // Note while reading through this for-loop that |overlay| has different
+ // types on different platforms. On Android and Ozone it is an
+ // OverlayCandidate, on Windows it is a DCLayerOverlay, and on macOS it is
+ // a CALayerOverlay.
+ for (auto& overlay : overlays) {
+ // Extract the shared image and GLImage for the overlay. Note that for
+ // solid color overlays, this will remain nullptr.
+ gl::GLImage* gl_image = nullptr;
+ if (overlay.mailbox.IsSharedImage()) {
+ auto shared_image =
+ shared_image_representation_factory_.ProduceOverlay(overlay.mailbox);
+ // When display is re-opened, the first few frames might not have video
+ // resource ready. Possible investigation crbug.com/1023971.
+ if (!shared_image) {
+ LOG(ERROR) << "Invalid mailbox.";
+ continue;
+ }
+
+ auto shared_image_access =
+ shared_image->BeginScopedReadAccess(true /* needs_gl_image */);
+ if (!shared_image_access) {
+ LOG(ERROR) << "Could not access SharedImage for read.";
+ continue;
+ }
+
+ gl_image = shared_image_access->gl_image();
+ DLOG_IF(ERROR, !gl_image) << "Cannot get GLImage.";
+
+ pending_overlays.emplace_back(std::move(shared_image),
+ std::move(shared_image_access));
+ }
+
+#if defined(OS_ANDROID)
+ if (gl_image) {
+ DCHECK(!overlay.gpu_fence_id);
+ gl_surface_->ScheduleOverlayPlane(
+ overlay.plane_z_order, overlay.transform, gl_image,
+ ToNearestRect(overlay.display_rect), overlay.uv_rect,
+ !overlay.is_opaque, nullptr /* gpu_fence */);
+ }
+#elif defined(OS_MACOSX)
+ gl_surface_->ScheduleCALayer(ui::CARendererLayerParams(
+ overlay.shared_state->is_clipped,
+ gfx::ToEnclosingRect(overlay.shared_state->clip_rect),
+ overlay.shared_state->rounded_corner_bounds,
+ overlay.shared_state->sorting_context_id,
+ gfx::Transform(overlay.shared_state->transform), gl_image,
+ overlay.contents_rect, gfx::ToEnclosingRect(overlay.bounds_rect),
+ overlay.background_color, overlay.edge_aa_mask,
+ overlay.shared_state->opacity, overlay.filter));
+#endif
+ }
+#endif // defined(OS_ANDROID) || defined(OS_MACOSX)
+
+ return pending_overlays;
+}
+
+} // namespace viz
diff --git a/chromium/components/viz/service/display_embedder/output_presenter_gl.h b/chromium/components/viz/service/display_embedder/output_presenter_gl.h
new file mode 100644
index 00000000000..bfbf51996d1
--- /dev/null
+++ b/chromium/components/viz/service/display_embedder/output_presenter_gl.h
@@ -0,0 +1,78 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_GL_H_
+#define COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_GL_H_
+
+#include <memory>
+#include <vector>
+
+#include "components/viz/service/display_embedder/output_presenter.h"
+#include "components/viz/service/viz_service_export.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+
+namespace gl {
+class GLSurface;
+} // namespace gl
+
+namespace viz {
+
+class VIZ_SERVICE_EXPORT OutputPresenterGL : public OutputPresenter {
+ public:
+ static const uint32_t kDefaultSharedImageUsage;
+
+ static std::unique_ptr<OutputPresenterGL> Create(
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker);
+
+ OutputPresenterGL(scoped_refptr<gl::GLSurface> gl_surface,
+ SkiaOutputSurfaceDependency* deps,
+ gpu::MemoryTracker* memory_tracker,
+ uint32_t shared_image_usage = kDefaultSharedImageUsage);
+ ~OutputPresenterGL() override;
+
+ gl::GLSurface* gl_surface() { return gl_surface_.get(); }
+
+ // OutputPresenter implementation:
+ void InitializeCapabilities(OutputSurface::Capabilities* capabilities) final;
+ bool Reshape(const gfx::Size& size,
+ float device_scale_factor,
+ const gfx::ColorSpace& color_space,
+ gfx::BufferFormat format,
+ gfx::OverlayTransform transform) final;
+ std::vector<std::unique_ptr<Image>> AllocateImages(
+ gfx::ColorSpace color_space,
+ gfx::Size image_size,
+ size_t num_images) final;
+ void SwapBuffers(SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) final;
+ void PostSubBuffer(const gfx::Rect& rect,
+ SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) final;
+ void CommitOverlayPlanes(SwapCompletionCallback completion_callback,
+ BufferPresentedCallback presentation_callback) final;
+ void SchedulePrimaryPlane(
+ const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane,
+ Image* image,
+ bool is_submitted) final;
+ std::vector<OverlayData> ScheduleOverlays(
+ SkiaOutputSurface::OverlayList overlays) final;
+
+ private:
+ scoped_refptr<gl::GLSurface> gl_surface_;
+ SkiaOutputSurfaceDependency* dependency_;
+ const bool supports_async_swap_;
+
+ ResourceFormat image_format_;
+
+ // Shared Image factories
+ gpu::SharedImageFactory shared_image_factory_;
+ gpu::SharedImageRepresentationFactory shared_image_representation_factory_;
+ uint32_t shared_image_usage_;
+};
+
+} // namespace viz
+
+#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_OUTPUT_PRESENTER_GL_H_
diff --git a/chromium/components/viz/service/display_embedder/output_surface_provider_impl.cc b/chromium/components/viz/service/display_embedder/output_surface_provider_impl.cc
index b4d4b1c1c59..da8936a87cf 100644
--- a/chromium/components/viz/service/display_embedder/output_surface_provider_impl.cc
+++ b/chromium/components/viz/service/display_embedder/output_surface_provider_impl.cc
@@ -57,6 +57,7 @@
#if defined(USE_OZONE)
#include "components/viz/service/display_embedder/software_output_device_ozone.h"
+#include "ui/base/ui_base_features.h"
#include "ui/display/types/display_snapshot.h"
#include "ui/ozone/public/ozone_platform.h"
#include "ui/ozone/public/platform_window_surface.h"
@@ -115,11 +116,6 @@ std::unique_ptr<OutputSurface> OutputSurfaceProviderImpl::CreateOutputSurface(
output_surface = std::make_unique<SoftwareOutputSurface>(
CreateSoftwareOutputDeviceForPlatform(surface_handle, display_client));
} else if (renderer_settings.use_skia_renderer) {
-#if defined(OS_MACOSX)
- // TODO(penghuang): Support SkiaRenderer for all platforms.
- NOTIMPLEMENTED();
- return nullptr;
-#else
{
gpu::ScopedAllowScheduleGpuTask allow_schedule_gpu_task;
output_surface = SkiaOutputSurfaceImpl::Create(
@@ -138,7 +134,6 @@ std::unique_ptr<OutputSurface> OutputSurfaceProviderImpl::CreateOutputSurface(
#endif
return nullptr;
}
-#endif
} else {
DCHECK(task_executor_);
@@ -183,6 +178,10 @@ std::unique_ptr<OutputSurface> OutputSurfaceProviderImpl::CreateOutputSurface(
std::move(context_provider));
} else if (context_provider->ContextCapabilities().surfaceless) {
#if defined(USE_OZONE) || defined(OS_MACOSX) || defined(OS_ANDROID)
+#if defined(USE_OZONE)
+ if (!features::IsUsingOzonePlatform())
+ NOTREACHED();
+#endif
output_surface = std::make_unique<GLOutputSurfaceBufferQueue>(
std::move(context_provider), surface_handle,
std::make_unique<BufferQueue>(
@@ -234,24 +233,31 @@ OutputSurfaceProviderImpl::CreateSoftwareOutputDeviceForPlatform(
NOTREACHED();
return nullptr;
#elif defined(USE_OZONE)
- ui::SurfaceFactoryOzone* factory =
- ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone();
- std::unique_ptr<ui::PlatformWindowSurface> platform_window_surface =
- factory->CreatePlatformWindowSurface(surface_handle);
- bool in_host_process =
- !gpu_service_impl_ || gpu_service_impl_->in_host_process();
- std::unique_ptr<ui::SurfaceOzoneCanvas> surface_ozone =
- factory->CreateCanvasForWidget(
- surface_handle,
- in_host_process ? nullptr : gpu_service_impl_->main_runner());
- CHECK(surface_ozone);
- return std::make_unique<SoftwareOutputDeviceOzone>(
- std::move(platform_window_surface), std::move(surface_ozone));
-#elif defined(USE_X11)
+ if (features::IsUsingOzonePlatform()) {
+ ui::SurfaceFactoryOzone* factory =
+ ui::OzonePlatform::GetInstance()->GetSurfaceFactoryOzone();
+ std::unique_ptr<ui::PlatformWindowSurface> platform_window_surface =
+ factory->CreatePlatformWindowSurface(surface_handle);
+ bool in_host_process =
+ !gpu_service_impl_ || gpu_service_impl_->in_host_process();
+ std::unique_ptr<ui::SurfaceOzoneCanvas> surface_ozone =
+ factory->CreateCanvasForWidget(
+ surface_handle,
+ in_host_process ? nullptr : gpu_service_impl_->main_runner());
+ CHECK(surface_ozone);
+ return std::make_unique<SoftwareOutputDeviceOzone>(
+ std::move(platform_window_surface), std::move(surface_ozone));
+ }
+#endif
+
+#if defined(USE_X11)
return std::make_unique<SoftwareOutputDeviceX11>(
surface_handle, gpu_service_impl_->in_host_process()
? nullptr
: gpu_service_impl_->main_runner());
+#else
+ NOTREACHED();
+ return nullptr;
#endif
}
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device.cc b/chromium/components/viz/service/display_embedder/skia_output_device.cc
index 3bec0806cec..d767dd45e7a 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device.cc
@@ -6,15 +6,43 @@
#include <utility>
+#include "base/bind.h"
#include "base/check_op.h"
#include "base/notreached.h"
+#include "base/task/task_traits.h"
+#include "base/task/thread_pool.h"
+#include "base/task/thread_pool/thread_pool_instance.h"
#include "components/viz/service/display/dc_layer_overlay.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/presentation_feedback.h"
+#include "ui/latency/latency_tracker.h"
namespace viz {
+namespace {
+
+scoped_refptr<base::SequencedTaskRunner> CreateLatencyTracerRunner() {
+ if (!base::ThreadPoolInstance::Get())
+ return nullptr;
+ return base::ThreadPool::CreateSequencedTaskRunner(
+ {base::TaskPriority::BEST_EFFORT,
+ base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
+}
+
+void ReportLatency(const gfx::SwapTimings& timings,
+ ui::LatencyTracker* tracker,
+ std::vector<ui::LatencyInfo> latency_info) {
+ for (auto& latency : latency_info) {
+ latency.AddLatencyNumberWithTimestamp(
+ ui::INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT, timings.swap_start);
+ latency.AddLatencyNumberWithTimestamp(
+ ui::INPUT_EVENT_LATENCY_FRAME_SWAP_COMPONENT, timings.swap_end);
+ }
+ tracker->OnGpuSwapBuffersCompleted(std::move(latency_info));
+}
+
+} // namespace
SkiaOutputDevice::ScopedPaint::ScopedPaint(SkiaOutputDevice* device)
: device_(device), sk_surface_(device->BeginPaint(&end_semaphores_)) {
@@ -31,9 +59,14 @@ SkiaOutputDevice::SkiaOutputDevice(
: did_swap_buffer_complete_callback_(
std::move(did_swap_buffer_complete_callback)),
memory_type_tracker_(
- std::make_unique<gpu::MemoryTypeTracker>(memory_tracker)) {}
+ std::make_unique<gpu::MemoryTypeTracker>(memory_tracker)),
+ latency_tracker_(std::make_unique<ui::LatencyTracker>()),
+ latency_tracker_runner_(CreateLatencyTracerRunner()) {}
-SkiaOutputDevice::~SkiaOutputDevice() = default;
+SkiaOutputDevice::~SkiaOutputDevice() {
+ if (latency_tracker_runner_)
+ latency_tracker_runner_->DeleteSoon(FROM_HERE, std::move(latency_tracker_));
+}
void SkiaOutputDevice::CommitOverlayPlanes(
BufferPresentedCallback feedback,
@@ -48,7 +81,9 @@ void SkiaOutputDevice::PostSubBuffer(
NOTREACHED();
}
-void SkiaOutputDevice::SetDrawRectangle(const gfx::Rect& draw_rectangle) {}
+bool SkiaOutputDevice::SetDrawRectangle(const gfx::Rect& draw_rectangle) {
+ return false;
+}
void SkiaOutputDevice::SetGpuVSyncEnabled(bool enabled) {
NOTIMPLEMENTED();
@@ -74,6 +109,9 @@ void SkiaOutputDevice::SetEnableDCLayers(bool enable) {
}
#endif
+void SkiaOutputDevice::EnsureBackbuffer() {}
+void SkiaOutputDevice::DiscardBackbuffer() {}
+
void SkiaOutputDevice::StartSwapBuffers(BufferPresentedCallback feedback) {
DCHECK_LT(static_cast<int>(pending_swaps_.size()),
capabilities_.max_frames_pending);
@@ -82,34 +120,33 @@ void SkiaOutputDevice::StartSwapBuffers(BufferPresentedCallback feedback) {
}
void SkiaOutputDevice::FinishSwapBuffers(
- gfx::SwapResult result,
+ gfx::SwapCompletionResult result,
const gfx::Size& size,
- std::vector<ui::LatencyInfo> latency_info) {
+ std::vector<ui::LatencyInfo> latency_info,
+ const base::Optional<gfx::Rect>& damage_area) {
DCHECK(!pending_swaps_.empty());
const gpu::SwapBuffersCompleteParams& params =
- pending_swaps_.front().Complete(result);
+ pending_swaps_.front().Complete(std::move(result), damage_area);
did_swap_buffer_complete_callback_.Run(params, size);
pending_swaps_.front().CallFeedback();
- for (auto& latency : latency_info) {
- latency.AddLatencyNumberWithTimestamp(
- ui::INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT,
- params.swap_response.timings.swap_start);
- latency.AddLatencyNumberWithTimestamp(
- ui::INPUT_EVENT_LATENCY_FRAME_SWAP_COMPONENT,
- params.swap_response.timings.swap_end);
+ if (latency_tracker_runner_) {
+ // Report latency off GPU main thread.
+ latency_tracker_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&ReportLatency, params.swap_response.timings,
+ latency_tracker_.get(), std::move(latency_info)));
+ } else {
+ ReportLatency(params.swap_response.timings, latency_tracker_.get(),
+ std::move(latency_info));
}
- latency_tracker_.OnGpuSwapBuffersCompleted(latency_info);
pending_swaps_.pop();
}
-void SkiaOutputDevice::EnsureBackbuffer() {}
-void SkiaOutputDevice::DiscardBackbuffer() {}
-
SkiaOutputDevice::SwapInfo::SwapInfo(
uint64_t swap_id,
SkiaOutputDevice::BufferPresentedCallback feedback)
@@ -123,10 +160,13 @@ SkiaOutputDevice::SwapInfo::SwapInfo(SwapInfo&& other) = default;
SkiaOutputDevice::SwapInfo::~SwapInfo() = default;
const gpu::SwapBuffersCompleteParams& SkiaOutputDevice::SwapInfo::Complete(
- gfx::SwapResult result) {
- params_.swap_response.result = result;
+ gfx::SwapCompletionResult result,
+ const base::Optional<gfx::Rect>& damage_rect) {
+ params_.swap_response.result = result.swap_result;
params_.swap_response.timings.swap_end = base::TimeTicks::Now();
-
+ params_.frame_buffer_damage_area = damage_rect;
+ if (result.ca_layer_params)
+ params_.ca_layer_params = *result.ca_layer_params;
return params_;
}
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device.h b/chromium/components/viz/service/display_embedder/skia_output_device.h
index 80dab35e5c3..0aa9857cd2c 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_device.h
@@ -11,6 +11,7 @@
#include "base/callback.h"
#include "base/containers/queue.h"
#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
#include "base/optional.h"
#include "build/build_config.h"
#include "components/viz/service/display/output_surface.h"
@@ -20,10 +21,13 @@
#include "third_party/skia/include/core/SkRefCnt.h"
#include "third_party/skia/src/gpu/GrSemaphore.h"
#include "ui/gfx/swap_result.h"
-#include "ui/latency/latency_tracker.h"
class SkSurface;
+namespace base {
+class SequencedTaskRunner;
+}
+
namespace gfx {
class ColorSpace;
class Rect;
@@ -36,6 +40,10 @@ class MemoryTracker;
class MemoryTypeTracker;
} // namespace gpu
+namespace ui {
+class LatencyTracker;
+}
+
namespace viz {
class SkiaOutputDevice {
@@ -89,7 +97,7 @@ class SkiaOutputDevice {
std::vector<ui::LatencyInfo> latency_info);
// Set the rectangle that will be drawn into on the surface.
- virtual void SetDrawRectangle(const gfx::Rect& draw_rectangle);
+ virtual bool SetDrawRectangle(const gfx::Rect& draw_rectangle);
virtual void SetGpuVSyncEnabled(bool enabled);
@@ -129,7 +137,9 @@ class SkiaOutputDevice {
SwapInfo(uint64_t swap_id, BufferPresentedCallback feedback);
SwapInfo(SwapInfo&& other);
~SwapInfo();
- const gpu::SwapBuffersCompleteParams& Complete(gfx::SwapResult result);
+ const gpu::SwapBuffersCompleteParams& Complete(
+ gfx::SwapCompletionResult result,
+ const base::Optional<gfx::Rect>& damage_area);
void CallFeedback();
private:
@@ -150,9 +160,11 @@ class SkiaOutputDevice {
// Helper method for SwapBuffers() and PostSubBuffer(). It should be called
// at the end of SwapBuffers() and PostSubBuffer() implementations
- void FinishSwapBuffers(gfx::SwapResult result,
- const gfx::Size& size,
- std::vector<ui::LatencyInfo> latency_info);
+ void FinishSwapBuffers(
+ gfx::SwapCompletionResult result,
+ const gfx::Size& size,
+ std::vector<ui::LatencyInfo> latency_info,
+ const base::Optional<gfx::Rect>& damage_area = base::nullopt);
OutputSurface::Capabilities capabilities_;
@@ -161,13 +173,16 @@ class SkiaOutputDevice {
base::queue<SwapInfo> pending_swaps_;
- ui::LatencyTracker latency_tracker_;
-
// RGBX format is emulated with RGBA.
bool is_emulated_rgbx_ = false;
std::unique_ptr<gpu::MemoryTypeTracker> memory_type_tracker_;
+ private:
+ std::unique_ptr<ui::LatencyTracker> latency_tracker_;
+ // task runner for latency tracker.
+ scoped_refptr<base::SequencedTaskRunner> latency_tracker_runner_;
+
DISALLOW_COPY_AND_ASSIGN(SkiaOutputDevice);
};
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc b/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc
index 1ba33bf3fe3..1969bdfedcc 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.cc
@@ -4,284 +4,36 @@
#include "components/viz/service/display_embedder/skia_output_device_buffer_queue.h"
+#include <memory>
+#include <utility>
+#include <vector>
+
#include "base/command_line.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "build/build_config.h"
-#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/switches.h"
#include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
#include "gpu/command_buffer/common/capabilities.h"
-#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/config/gpu_finch_features.h"
-#include "gpu/ipc/common/gpu_surface_lookup.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
-#include "ui/display/types/display_snapshot.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/geometry/rect_conversions.h"
-#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_surface.h"
-#if defined(OS_ANDROID)
-#include "ui/gl/gl_surface_egl_surface_control.h"
-#endif
-
namespace viz {
-namespace {
-
-constexpr uint32_t kSharedImageUsage =
- gpu::SHARED_IMAGE_USAGE_SCANOUT | gpu::SHARED_IMAGE_USAGE_DISPLAY |
- gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT;
-
-} // namespace
-
-class SkiaOutputDeviceBufferQueue::Image {
- public:
- Image(gpu::SharedImageFactory* factory,
- gpu::SharedImageRepresentationFactory* representation_factory)
- : factory_(factory), representation_factory_(representation_factory) {}
- ~Image() {
- // TODO(vasilyt): As we are going to delete image anyway we should be able
- // to abort write to avoid unnecessary flush to submit semaphores.
- if (scoped_skia_write_access_) {
- EndWriteSkia();
- }
- DCHECK(!scoped_skia_write_access_);
- }
-
- bool Initialize(const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- ResourceFormat format,
- SkiaOutputSurfaceDependency* deps,
- uint32_t shared_image_usage) {
- auto mailbox = gpu::Mailbox::GenerateForSharedImage();
- // TODO(penghuang): This should pass the surface handle for ChromeOS
- if (!factory_->CreateSharedImage(mailbox, format, size, color_space,
- gpu::kNullSurfaceHandle,
- shared_image_usage)) {
- DLOG(ERROR) << "CreateSharedImage failed.";
- return false;
- }
-
- // Initialize |shared_image_deletor_| to make sure the shared image backing
- // will be released with the Image.
- shared_image_deletor_.ReplaceClosure(base::BindOnce(
- base::IgnoreResult(&gpu::SharedImageFactory::DestroySharedImage),
- base::Unretained(factory_), mailbox));
-
- skia_representation_ = representation_factory_->ProduceSkia(
- mailbox, deps->GetSharedContextState());
- if (!skia_representation_) {
- DLOG(ERROR) << "ProduceSkia() failed.";
- return false;
- }
-
- overlay_representation_ = representation_factory_->ProduceOverlay(mailbox);
-
- // If the backing doesn't support overlay, then fallback to GL.
- if (!overlay_representation_)
- gl_representation_ = representation_factory_->ProduceGLTexture(mailbox);
-
- if (!overlay_representation_ && !gl_representation_) {
- DLOG(ERROR) << "ProduceOverlay() and ProduceGLTexture() failed.";
- return false;
- }
-
- return true;
- }
-
- void BeginWriteSkia() {
- DCHECK(!scoped_skia_write_access_);
- DCHECK(!scoped_overlay_read_access_);
- DCHECK(end_semaphores_.empty());
-
- std::vector<GrBackendSemaphore> begin_semaphores;
- // LegacyFontHost will get LCD text and skia figures out what type to use.
- SkSurfaceProps surface_props(0 /* flags */,
- SkSurfaceProps::kLegacyFontHost_InitType);
-
- // Buffer queue is internal to GPU proc and handles texture initialization,
- // so allow uncleared access.
- // TODO(vasilyt): Props and MSAA
- scoped_skia_write_access_ = skia_representation_->BeginScopedWriteAccess(
- 0 /* final_msaa_count */, surface_props, &begin_semaphores,
- &end_semaphores_,
- gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
- DCHECK(scoped_skia_write_access_);
- if (!begin_semaphores.empty()) {
- scoped_skia_write_access_->surface()->wait(begin_semaphores.size(),
- begin_semaphores.data());
- }
- }
-
- SkSurface* sk_surface() {
- return scoped_skia_write_access_ ? scoped_skia_write_access_->surface()
- : nullptr;
- }
-
- std::vector<GrBackendSemaphore> TakeEndWriteSkiaSemaphores() {
- std::vector<GrBackendSemaphore> temp_vector;
- temp_vector.swap(end_semaphores_);
- return temp_vector;
- }
-
- void EndWriteSkia() {
- // The Flush now takes place in finishPaintCurrentBuffer on the CPU side.
- // check if end_semaphores is not empty then flash here
- DCHECK(scoped_skia_write_access_);
- if (!end_semaphores_.empty()) {
- GrFlushInfo flush_info = {
- .fFlags = kNone_GrFlushFlags,
- .fNumSemaphores = end_semaphores_.size(),
- .fSignalSemaphores = end_semaphores_.data(),
- };
- scoped_skia_write_access_->surface()->flush(
- SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
- }
- scoped_skia_write_access_.reset();
- end_semaphores_.clear();
-
- // SkiaRenderer always draws the full frame.
- skia_representation_->SetCleared();
- }
-
- void BeginPresent() {
- if (++present_count_ != 1) {
- DCHECK(scoped_overlay_read_access_ || scoped_gl_read_access_);
- return;
- }
-
- DCHECK(!scoped_skia_write_access_);
- DCHECK(!scoped_overlay_read_access_);
-
- if (overlay_representation_) {
- scoped_overlay_read_access_ =
- overlay_representation_->BeginScopedReadAccess(
- true /* need_gl_image */);
- DCHECK(scoped_overlay_read_access_);
- return;
- }
-
- scoped_gl_read_access_ = gl_representation_->BeginScopedAccess(
- GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
- gpu::SharedImageRepresentation::AllowUnclearedAccess::kNo);
- DCHECK(scoped_gl_read_access_);
- }
-
- void EndPresent() {
- DCHECK(present_count_);
- if (--present_count_)
- return;
- scoped_overlay_read_access_.reset();
- scoped_gl_read_access_.reset();
- }
-
- gl::GLImage* GetGLImage(std::unique_ptr<gfx::GpuFence>* fence) {
- if (scoped_overlay_read_access_)
- return scoped_overlay_read_access_->gl_image();
-
- DCHECK(scoped_gl_read_access_);
-
- if (gl::GLFence::IsGpuFenceSupported() && fence) {
- if (auto gl_fence = gl::GLFence::CreateForGpuFence())
- *fence = gl_fence->GetGpuFence();
- }
- auto* texture = gl_representation_->GetTexture();
- return texture->GetLevelImage(texture->target(), 0);
- }
-
- base::WeakPtr<Image> GetWeakPtr() { return weak_ptr_factory_.GetWeakPtr(); }
-
- int present_count() const { return present_count_; }
- gpu::SharedImageRepresentationSkia* skia_representation() {
- return skia_representation_.get();
- }
- const gfx::Size& size() const { return skia_representation_->size(); }
-
- private:
- gpu::SharedImageFactory* const factory_;
- gpu::SharedImageRepresentationFactory* const representation_factory_;
-
- base::ScopedClosureRunner shared_image_deletor_;
- std::unique_ptr<gpu::SharedImageRepresentationSkia> skia_representation_;
- std::unique_ptr<gpu::SharedImageRepresentationOverlay>
- overlay_representation_;
- std::unique_ptr<gpu::SharedImageRepresentationGLTexture> gl_representation_;
- std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedWriteAccess>
- scoped_skia_write_access_;
- std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
- scoped_overlay_read_access_;
- std::unique_ptr<gpu::SharedImageRepresentationGLTexture::ScopedAccess>
- scoped_gl_read_access_;
- std::vector<GrBackendSemaphore> end_semaphores_;
- int present_count_ = 0;
- base::WeakPtrFactory<Image> weak_ptr_factory_{this};
-
- DISALLOW_COPY_AND_ASSIGN(Image);
-};
-
-class SkiaOutputDeviceBufferQueue::OverlayData {
- public:
- OverlayData(
- std::unique_ptr<gpu::SharedImageRepresentationOverlay> representation,
- std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
- scoped_read_access)
- : representation_(std::move(representation)),
- scoped_read_access_(std::move(scoped_read_access)) {}
- OverlayData(OverlayData&&) = default;
- ~OverlayData() = default;
- OverlayData& operator=(OverlayData&&) = default;
-
- gl::GLImage* gl_image() { return scoped_read_access_->gl_image(); }
-
- private:
- std::unique_ptr<gpu::SharedImageRepresentationOverlay> representation_;
- std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
- scoped_read_access_;
-};
SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
- scoped_refptr<gl::GLSurface> gl_surface,
+ std::unique_ptr<OutputPresenter> presenter,
SkiaOutputSurfaceDependency* deps,
gpu::MemoryTracker* memory_tracker,
- const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback,
- uint32_t shared_image_usage)
+ const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback)
: SkiaOutputDevice(memory_tracker, did_swap_buffer_complete_callback),
- dependency_(deps),
- gl_surface_(std::move(gl_surface)),
- supports_async_swap_(gl_surface_->SupportsAsyncSwap()),
- shared_image_factory_(deps->GetGpuPreferences(),
- deps->GetGpuDriverBugWorkarounds(),
- deps->GetGpuFeatureInfo(),
- deps->GetSharedContextState().get(),
- deps->GetMailboxManager(),
- deps->GetSharedImageManager(),
- deps->GetGpuImageFactory(),
- memory_tracker,
- true),
- shared_image_usage_(shared_image_usage) {
- shared_image_representation_factory_ =
- std::make_unique<gpu::SharedImageRepresentationFactory>(
- dependency_->GetSharedImageManager(), memory_tracker);
-
-#if defined(USE_OZONE)
- image_format_ = GetResourceFormat(display::DisplaySnapshot::PrimaryFormat());
-#else
- image_format_ = RGBA_8888;
-#endif
- // GL is origin is at bottom left normally, all Surfaceless implementations
- // are flipped.
- DCHECK_EQ(gl_surface_->GetOrigin(), gfx::SurfaceOrigin::kTopLeft);
-
+ presenter_(std::move(presenter)),
+ dependency_(deps) {
capabilities_.uses_default_gl_framebuffer = false;
- capabilities_.android_surface_control_feature_enabled = true;
- capabilities_.supports_post_sub_buffer = gl_surface_->SupportsPostSubBuffer();
- capabilities_.supports_commit_overlay_planes =
- gl_surface_->SupportsCommitOverlayPlanes();
- capabilities_.max_frames_pending = 2;
+ capabilities_.preserve_buffer_content = true;
+ capabilities_.only_invalidates_damage_rect = false;
+ capabilities_.number_of_buffers = 3;
// Force the number of max pending frames to one when the switch
// "double-buffer-compositing" is passed.
@@ -289,35 +41,12 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
// allocates at most one additional buffer.
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kDoubleBufferCompositing))
- capabilities_.max_frames_pending = 1;
+ capabilities_.number_of_buffers = 2;
+ capabilities_.max_frames_pending = capabilities_.number_of_buffers - 1;
- capabilities_.only_invalidates_damage_rect = false;
- // Set supports_surfaceless to enable overlays.
- capabilities_.supports_surfaceless = true;
- capabilities_.preserve_buffer_content = true;
- // We expect origin of buffers is at top left.
- capabilities_.output_surface_origin = gfx::SurfaceOrigin::kTopLeft;
-
- // TODO(penghuang): Use defaultBackendFormat() in shared image implementation
- // to make sure backend formant is consistent.
- capabilities_.sk_color_type = ResourceFormatToClosestSkColorType(
- true /* gpu_compositing */, image_format_);
- capabilities_.gr_backend_format =
- dependency_->GetSharedContextState()->gr_context()->defaultBackendFormat(
- capabilities_.sk_color_type, GrRenderable::kYes);
+ presenter_->InitializeCapabilities(&capabilities_);
}
-SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
- scoped_refptr<gl::GLSurface> gl_surface,
- SkiaOutputSurfaceDependency* deps,
- gpu::MemoryTracker* memory_tracker,
- const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback)
- : SkiaOutputDeviceBufferQueue(std::move(gl_surface),
- deps,
- memory_tracker,
- did_swap_buffer_complete_callback,
- kSharedImageUsage) {}
-
SkiaOutputDeviceBufferQueue::~SkiaOutputDeviceBufferQueue() {
FreeAllSurfaces();
// Clear and cancel swap_completion_callbacks_ to free all resource bind to
@@ -325,58 +54,17 @@ SkiaOutputDeviceBufferQueue::~SkiaOutputDeviceBufferQueue() {
swap_completion_callbacks_.clear();
}
-// static
-std::unique_ptr<SkiaOutputDeviceBufferQueue>
-SkiaOutputDeviceBufferQueue::Create(
- SkiaOutputSurfaceDependency* deps,
- gpu::MemoryTracker* memory_tracker,
- const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback) {
-#if defined(OS_ANDROID)
- if (deps->GetGpuFeatureInfo()
- .status_values[gpu::GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] !=
- gpu::kGpuFeatureStatusEnabled) {
- return nullptr;
- }
-
- bool can_be_used_with_surface_control = false;
- ANativeWindow* window =
- gpu::GpuSurfaceLookup::GetInstance()->AcquireNativeWidget(
- deps->GetSurfaceHandle(), &can_be_used_with_surface_control);
- if (!window || !can_be_used_with_surface_control)
- return nullptr;
- // TODO(https://crbug.com/1012401): don't depend on GL.
- auto gl_surface = base::MakeRefCounted<gl::GLSurfaceEGLSurfaceControl>(
- window, base::ThreadTaskRunnerHandle::Get());
- if (!gl_surface->Initialize(gl::GLSurfaceFormat())) {
- LOG(ERROR) << "Failed to initialize GLSurfaceEGLSurfaceControl.";
- return nullptr;
- }
-
- if (!deps->GetSharedContextState()->MakeCurrent(gl_surface.get(),
- true /* needs_gl*/)) {
- LOG(ERROR) << "MakeCurrent failed.";
- return nullptr;
- }
-
- return std::make_unique<SkiaOutputDeviceBufferQueue>(
- std::move(gl_surface), deps, memory_tracker,
- did_swap_buffer_complete_callback);
-#else
- return nullptr;
-#endif
-}
-
-SkiaOutputDeviceBufferQueue::Image*
-SkiaOutputDeviceBufferQueue::GetNextImage() {
+OutputPresenter::Image* SkiaOutputDeviceBufferQueue::GetNextImage() {
DCHECK(!available_images_.empty());
auto* image = available_images_.front();
available_images_.pop_front();
return image;
}
-void SkiaOutputDeviceBufferQueue::PageFlipComplete(Image* image) {
+void SkiaOutputDeviceBufferQueue::PageFlipComplete(
+ OutputPresenter::Image* image) {
if (displayed_image_) {
- DCHECK_EQ(displayed_image_->size(), image_size_);
+ DCHECK_EQ(displayed_image_->skia_representation()->size(), image_size_);
DCHECK_EQ(displayed_image_->present_count() > 1, displayed_image_ == image);
displayed_image_->EndPresent();
if (!displayed_image_->present_count()) {
@@ -417,57 +105,13 @@ void SkiaOutputDeviceBufferQueue::SchedulePrimaryPlane(
DCHECK(image);
image->BeginPresent();
-
- std::unique_ptr<gfx::GpuFence> fence;
- // If the submitted_image_ is being scheduled, we don't new a new fence.
- auto* gl_image =
- image->GetGLImage(image == submitted_image_ ? nullptr : &fence);
-
- // Output surface is also z-order 0.
- constexpr int kPlaneZOrder = 0;
- // Output surface always uses the full texture.
- constexpr gfx::RectF kUVRect(0.f, 0.f, 1.0f, 1.0f);
- gl_surface_->ScheduleOverlayPlane(kPlaneZOrder, plane.transform, gl_image,
- ToNearestRect(plane.display_rect), kUVRect,
- plane.enable_blending, std::move(fence));
+ presenter_->SchedulePrimaryPlane(plane, image, image == submitted_image_);
}
void SkiaOutputDeviceBufferQueue::ScheduleOverlays(
SkiaOutputSurface::OverlayList overlays) {
-#if defined(OS_ANDROID)
DCHECK(pending_overlays_.empty());
- for (auto& overlay : overlays) {
- auto shared_image =
- shared_image_representation_factory_->ProduceOverlay(overlay.mailbox);
- // When display is re-opened, the first few frames might not have video
- // resource ready. Possible investigation crbug.com/1023971.
- if (!shared_image) {
- LOG(ERROR) << "Invalid mailbox.";
- continue;
- }
-
- std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
- shared_image_access =
- shared_image->BeginScopedReadAccess(true /* needs_gl_image */);
- if (!shared_image_access) {
- LOG(ERROR) << "Could not access SharedImage for read.";
- continue;
- }
-
- pending_overlays_.emplace_back(std::move(shared_image),
- std::move(shared_image_access));
- auto* gl_image = pending_overlays_.back().gl_image();
- DLOG_IF(ERROR, !gl_image) << "Cannot get GLImage.";
-
- if (gl_image) {
- DCHECK(!overlay.gpu_fence_id);
- gl_surface_->ScheduleOverlayPlane(
- overlay.plane_z_order, overlay.transform, gl_image,
- ToNearestRect(overlay.display_rect), overlay.uv_rect,
- !overlay.is_opaque, nullptr /* gpu_fence */);
- }
- }
-#endif // defined(OS_ANDROID)
+ pending_overlays_ = presenter_->ScheduleOverlays(std::move(overlays));
}
void SkiaOutputDeviceBufferQueue::SwapBuffers(
@@ -479,24 +123,17 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers(
submitted_image_ = current_image_;
current_image_ = nullptr;
- if (supports_async_swap_) {
- // Cancelable callback uses weak ptr to drop this task upon destruction.
- // Thus it is safe to use |base::Unretained(this)|.
- // Bind submitted_image_->GetWeakPtr(), since the |submitted_image_| could
- // be released due to reshape() or destruction.
- swap_completion_callbacks_.emplace_back(
- std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
- &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
- base::Unretained(this), image_size_, std::move(latency_info),
- submitted_image_->GetWeakPtr(), std::move(committed_overlays_))));
- gl_surface_->SwapBuffersAsync(swap_completion_callbacks_.back()->callback(),
- std::move(feedback));
- } else {
- DoFinishSwapBuffers(image_size_, std::move(latency_info),
- submitted_image_->GetWeakPtr(),
- std::move(committed_overlays_),
- gl_surface_->SwapBuffers(std::move(feedback)), nullptr);
- }
+ // Cancelable callback uses weak ptr to drop this task upon destruction.
+ // Thus it is safe to use |base::Unretained(this)|.
+ // Bind submitted_image_->GetWeakPtr(), since the |submitted_image_| could
+ // be released due to reshape() or destruction.
+ swap_completion_callbacks_.emplace_back(
+ std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
+ &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
+ base::Unretained(this), image_size_, std::move(latency_info),
+ submitted_image_->GetWeakPtr(), std::move(committed_overlays_))));
+ presenter_->SwapBuffers(swap_completion_callbacks_.back()->callback(),
+ std::move(feedback));
committed_overlays_.clear();
std::swap(committed_overlays_, pending_overlays_);
}
@@ -513,27 +150,18 @@ void SkiaOutputDeviceBufferQueue::PostSubBuffer(
}
DCHECK(submitted_image_);
- if (supports_async_swap_) {
- // Cancelable callback uses weak ptr to drop this task upon destruction.
- // Thus it is safe to use |base::Unretained(this)|.
- // Bind submitted_image_->GetWeakPtr(), since the |submitted_image_| could
- // be released due to reshape() or destruction.
- swap_completion_callbacks_.emplace_back(
- std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
- &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
- base::Unretained(this), image_size_, std::move(latency_info),
- submitted_image_->GetWeakPtr(), std::move(committed_overlays_))));
- gl_surface_->PostSubBufferAsync(
- rect.x(), rect.y(), rect.width(), rect.height(),
- swap_completion_callbacks_.back()->callback(), std::move(feedback));
- } else {
- DoFinishSwapBuffers(
- image_size_, std::move(latency_info), submitted_image_->GetWeakPtr(),
- std::move(committed_overlays_),
- gl_surface_->PostSubBuffer(rect.x(), rect.y(), rect.width(),
- rect.height(), std::move(feedback)),
- nullptr);
- }
+ // Cancelable callback uses weak ptr to drop this task upon destruction.
+ // Thus it is safe to use |base::Unretained(this)|.
+ // Bind submitted_image_->GetWeakPtr(), since the |submitted_image_| could
+ // be released due to reshape() or destruction.
+ swap_completion_callbacks_.emplace_back(
+ std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
+ &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
+ base::Unretained(this), image_size_, std::move(latency_info),
+ submitted_image_->GetWeakPtr(), std::move(committed_overlays_))));
+ presenter_->PostSubBuffer(rect, swap_completion_callbacks_.back()->callback(),
+ std::move(feedback));
+
committed_overlays_.clear();
std::swap(committed_overlays_, pending_overlays_);
}
@@ -548,24 +176,18 @@ void SkiaOutputDeviceBufferQueue::CommitOverlayPlanes(
// A main buffer has to be submitted for previous frames.
DCHECK(submitted_image_);
- if (supports_async_swap_) {
- // Cancelable callback uses weak ptr to drop this task upon destruction.
- // Thus it is safe to use |base::Unretained(this)|.
- // Bind submitted_image_->GetWeakPtr(), since the |submitted_image_| could
- // be released due to reshape() or destruction.
- swap_completion_callbacks_.emplace_back(
- std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
- &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
- base::Unretained(this), image_size_, std::move(latency_info),
- submitted_image_->GetWeakPtr(), std::move(committed_overlays_))));
- gl_surface_->CommitOverlayPlanesAsync(
- swap_completion_callbacks_.back()->callback(), std::move(feedback));
- } else {
- DoFinishSwapBuffers(
- image_size_, std::move(latency_info), submitted_image_->GetWeakPtr(),
- std::move(committed_overlays_),
- gl_surface_->CommitOverlayPlanes(std::move(feedback)), nullptr);
- }
+ // Cancelable callback uses weak ptr to drop this task upon destruction.
+ // Thus it is safe to use |base::Unretained(this)|.
+ // Bind submitted_image_->GetWeakPtr(), since the |submitted_image_| could
+ // be released due to reshape() or destruction.
+ swap_completion_callbacks_.emplace_back(
+ std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
+ &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
+ base::Unretained(this), image_size_, std::move(latency_info),
+ submitted_image_->GetWeakPtr(), std::move(committed_overlays_))));
+ presenter_->CommitOverlayPlanes(swap_completion_callbacks_.back()->callback(),
+ std::move(feedback));
+
committed_overlays_.clear();
std::swap(committed_overlays_, pending_overlays_);
}
@@ -573,13 +195,11 @@ void SkiaOutputDeviceBufferQueue::CommitOverlayPlanes(
void SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers(
const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info,
- const base::WeakPtr<Image>& image,
- std::vector<OverlayData> overlays,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
- DCHECK(!gpu_fence);
-
- FinishSwapBuffers(result, size, latency_info);
+ const base::WeakPtr<OutputPresenter::Image>& image,
+ std::vector<OutputPresenter::OverlayData> overlays,
+ gfx::SwapCompletionResult result) {
+ DCHECK(!result.gpu_fence);
+ FinishSwapBuffers(std::move(result), size, latency_info);
PageFlipComplete(image.get());
}
@@ -588,8 +208,8 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size,
const gfx::ColorSpace& color_space,
gfx::BufferFormat format,
gfx::OverlayTransform transform) {
- if (!gl_surface_->Resize(size, device_scale_factor, color_space,
- gfx::AlphaBitsForBufferFormat(format))) {
+ if (!presenter_->Reshape(size, device_scale_factor, color_space, format,
+ transform)) {
DLOG(ERROR) << "Failed to resize.";
return false;
}
@@ -598,16 +218,13 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size,
image_size_ = size;
FreeAllSurfaces();
- for (int i = 0; i < capabilities_.max_frames_pending + 1; ++i) {
- auto image = std::make_unique<Image>(
- &shared_image_factory_, shared_image_representation_factory_.get());
- if (!image->Initialize(image_size_, color_space_, image_format_,
- dependency_, shared_image_usage_)) {
- DLOG(ERROR) << "Failed to initialize image.";
- return false;
- }
+ images_ = presenter_->AllocateImages(color_space_, image_size_,
+ capabilities_.number_of_buffers);
+ if (images_.empty())
+ return false;
+
+ for (auto& image : images_) {
available_images_.push_back(image.get());
- images_.push_back(std::move(image));
}
return true;
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.h b/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.h
index 77ad81308cf..efb7d9c629a 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue.h
@@ -7,39 +7,29 @@
#include "base/cancelable_callback.h"
#include "base/macros.h"
+#include "components/viz/service/display_embedder/output_presenter.h"
#include "components/viz/service/display_embedder/skia_output_device.h"
#include "components/viz/service/viz_service_export.h"
-#include "gpu/command_buffer/service/shared_image_factory.h"
-
-namespace gl {
-class GLSurface;
-} // namespace gl
namespace viz {
class SkiaOutputSurfaceDependency;
-class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
- : public SkiaOutputDevice {
+class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue : public SkiaOutputDevice {
public:
SkiaOutputDeviceBufferQueue(
- scoped_refptr<gl::GLSurface> gl_surface,
+ std::unique_ptr<OutputPresenter> presenter,
SkiaOutputSurfaceDependency* deps,
gpu::MemoryTracker* memory_tracker,
const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback);
- SkiaOutputDeviceBufferQueue(
- scoped_refptr<gl::GLSurface> gl_surface,
- SkiaOutputSurfaceDependency* deps,
- gpu::MemoryTracker* memory_tracker,
- const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback,
- uint32_t shared_image_usage);
+
~SkiaOutputDeviceBufferQueue() override;
- static std::unique_ptr<SkiaOutputDeviceBufferQueue> Create(
- SkiaOutputSurfaceDependency* deps,
- gpu::MemoryTracker* memory_tracker,
- const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback);
+ SkiaOutputDeviceBufferQueue(const SkiaOutputDeviceBufferQueue&) = delete;
+ SkiaOutputDeviceBufferQueue& operator=(const SkiaOutputDeviceBufferQueue&) =
+ delete;
+ // SkiaOutputDevice overrides.
void SwapBuffers(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override;
void PostSubBuffer(const gfx::Rect& rect,
@@ -63,48 +53,41 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
override;
void ScheduleOverlays(SkiaOutputSurface::OverlayList overlays) override;
- gl::GLSurface* gl_surface() { return gl_surface_.get(); }
-
private:
friend class SkiaOutputDeviceBufferQueueTest;
- class Image;
- class OverlayData;
using CancelableSwapCompletionCallback =
- base::CancelableOnceCallback<void(gfx::SwapResult,
- std::unique_ptr<gfx::GpuFence>)>;
+ base::CancelableOnceCallback<void(gfx::SwapCompletionResult)>;
- Image* GetNextImage();
- void PageFlipComplete(Image* image);
+ OutputPresenter::Image* GetNextImage();
+ void PageFlipComplete(OutputPresenter::Image* image);
void FreeAllSurfaces();
- // Used as callback for SwapBuff ersAsync and PostSubBufferAsync to finish
+ // Used as callback for SwapBuffersAsync and PostSubBufferAsync to finish
// operation
void DoFinishSwapBuffers(const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info,
- const base::WeakPtr<Image>& image,
- std::vector<OverlayData> overlays,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence);
+ const base::WeakPtr<OutputPresenter::Image>& image,
+ std::vector<OutputPresenter::OverlayData> overlays,
+ gfx::SwapCompletionResult result);
+
+ std::unique_ptr<OutputPresenter> presenter_;
SkiaOutputSurfaceDependency* const dependency_;
- scoped_refptr<gl::GLSurface> gl_surface_;
- const bool supports_async_swap_;
// Format of images
gfx::ColorSpace color_space_;
gfx::Size image_size_;
- ResourceFormat image_format_;
// All allocated images.
- std::vector<std::unique_ptr<Image>> images_;
+ std::vector<std::unique_ptr<OutputPresenter::Image>> images_;
// This image is currently used by Skia as RenderTarget. This may be nullptr
// if there is no drawing for the current frame or if allocation failed.
- Image* current_image_ = nullptr;
+ OutputPresenter::Image* current_image_ = nullptr;
// The last image submitted for presenting.
- Image* submitted_image_ = nullptr;
+ OutputPresenter::Image* submitted_image_ = nullptr;
// The image currently on the screen, if any.
- Image* displayed_image_ = nullptr;
+ OutputPresenter::Image* displayed_image_ = nullptr;
// These are free for use, and are not nullptr.
- base::circular_deque<Image*> available_images_;
+ base::circular_deque<OutputPresenter::Image*> available_images_;
// These cancelable callbacks bind images that have been scheduled to display
// but are not displayed yet. This deque will be cleared when represented
// frames are destroyed. Use CancelableOnceCallback to prevent resources
@@ -112,17 +95,9 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
base::circular_deque<std::unique_ptr<CancelableSwapCompletionCallback>>
swap_completion_callbacks_;
// Scheduled overlays for the next SwapBuffers call.
- std::vector<OverlayData> pending_overlays_;
+ std::vector<OutputPresenter::OverlayData> pending_overlays_;
// Committed overlays for the last SwapBuffers call.
- std::vector<OverlayData> committed_overlays_;
-
- // Shared Image factories
- gpu::SharedImageFactory shared_image_factory_;
- std::unique_ptr<gpu::SharedImageRepresentationFactory>
- shared_image_representation_factory_;
- uint32_t shared_image_usage_;
-
- DISALLOW_COPY_AND_ASSIGN(SkiaOutputDeviceBufferQueue);
+ std::vector<OutputPresenter::OverlayData> committed_overlays_;
};
} // namespace viz
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue_unittest.cc b/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue_unittest.cc
index 8cc92113d1d..92a51592bf6 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue_unittest.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_buffer_queue_unittest.cc
@@ -16,6 +16,7 @@
#include "build/build_config.h"
#include "gpu/command_buffer/service/scheduler.h"
+#include "components/viz/service/display_embedder/output_presenter_gl.h"
#include "components/viz/service/display_embedder/skia_output_surface_dependency_impl.h"
#include "components/viz/service/gl/gpu_service_impl.h"
#include "components/viz/test/test_gpu_service_holder.h"
@@ -157,7 +158,8 @@ class MockGLSurfaceAsync : public gl::GLSurfaceStub {
void SwapComplete() {
DCHECK(!callbacks_.empty());
- std::move(callbacks_.front()).Run(gfx::SwapResult::SWAP_ACK, nullptr);
+ std::move(callbacks_.front())
+ .Run(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK));
callbacks_.pop_front();
}
@@ -222,15 +224,17 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
std::unique_ptr<SkiaOutputDeviceBufferQueue> onscreen_device =
std::make_unique<SkiaOutputDeviceBufferQueue>(
- gl_surface_, dependency_.get(), memory_tracker_.get(),
- present_callback, shared_image_usage);
+ std::make_unique<OutputPresenterGL>(gl_surface_, dependency_.get(),
+ memory_tracker_.get(),
+ shared_image_usage),
+ dependency_.get(), memory_tracker_.get(), present_callback);
output_device_ = std::move(onscreen_device);
}
void TearDownOnGpu() override { output_device_.reset(); }
- using Image = SkiaOutputDeviceBufferQueue::Image;
+ using Image = OutputPresenter::Image;
const std::vector<std::unique_ptr<Image>>& images() {
return output_device_->images_;
@@ -279,11 +283,15 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
(size_t)CountBuffers());
}
- Image* PaintAndSchedulePrimaryPlane() {
- // Call Begin/EndPaint to ensusre the image is initialized before use.
+ Image* PaintPrimaryPlane() {
std::vector<GrBackendSemaphore> end_semaphores;
output_device_->BeginPaint(&end_semaphores);
output_device_->EndPaint();
+ return current_image();
+ }
+
+ Image* PaintAndSchedulePrimaryPlane() {
+ PaintPrimaryPlane();
SchedulePrimaryPlane();
return current_image();
}
@@ -330,7 +338,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, MultipleGetCurrentBufferCalls) {
output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), kDefaultFormat,
gfx::OVERLAY_TRANSFORM_NONE);
EXPECT_NE(0U, memory_tracker().GetSize());
- EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
+ EXPECT_NE(PaintPrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(3, CountBuffers());
auto* fb = current_image();
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_dawn.cc b/chromium/components/viz/service/display_embedder/skia_output_device_dawn.cc
index a2bb693c020..9790a1b1a03 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_dawn.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_dawn.cc
@@ -6,17 +6,11 @@
#include "base/check_op.h"
#include "base/notreached.h"
-#include "build/build_config.h"
#include "components/viz/common/gpu/dawn_context_provider.h"
+#include "third_party/dawn/src/include/dawn_native/D3D12Backend.h"
#include "ui/gfx/presentation_feedback.h"
#include "ui/gfx/vsync_provider.h"
-
-#if defined(OS_WIN)
-#include "third_party/dawn/src/include/dawn_native/D3D12Backend.h"
#include "ui/gl/vsync_provider_win.h"
-#elif defined(OS_LINUX)
-#include "third_party/dawn/src/include/dawn_native/VulkanBackend.h"
-#endif
namespace viz {
@@ -41,7 +35,7 @@ SkiaOutputDeviceDawn::SkiaOutputDeviceDawn(
DidSwapBufferCompleteCallback did_swap_buffer_complete_callback)
: SkiaOutputDevice(memory_tracker, did_swap_buffer_complete_callback),
context_provider_(context_provider),
- widget_(widget) {
+ child_window_(widget) {
capabilities_.output_surface_origin = origin;
capabilities_.uses_default_gl_framebuffer = false;
capabilities_.supports_post_sub_buffer = false;
@@ -51,13 +45,16 @@ SkiaOutputDeviceDawn::SkiaOutputDeviceDawn(
context_provider_->GetGrContext()->defaultBackendFormat(
kSurfaceColorType, GrRenderable::kYes);
-#if defined(OS_WIN)
- vsync_provider_ = std::make_unique<gl::VSyncProviderWin>(widget_);
-#endif
+ vsync_provider_ = std::make_unique<gl::VSyncProviderWin>(widget);
+ child_window_.Initialize();
}
SkiaOutputDeviceDawn::~SkiaOutputDeviceDawn() = default;
+gpu::SurfaceHandle SkiaOutputDeviceDawn::GetChildSurfaceHandle() const {
+ return child_window_.window();
+}
+
bool SkiaOutputDeviceDawn::Reshape(const gfx::Size& size,
float device_scale_factor,
const gfx::ColorSpace& color_space,
@@ -86,7 +83,7 @@ void SkiaOutputDeviceDawn::SwapBuffers(
std::vector<ui::LatencyInfo> latency_info) {
StartSwapBuffers({});
swap_chain_.Present();
- FinishSwapBuffers(gfx::SwapResult::SWAP_ACK,
+ FinishSwapBuffers(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK),
gfx::Size(size_.width(), size_.height()),
std::move(latency_info));
@@ -136,13 +133,8 @@ void SkiaOutputDeviceDawn::EndPaint() {
}
void SkiaOutputDeviceDawn::CreateSwapChainImplementation() {
-#if defined(OS_WIN)
swap_chain_implementation_ = dawn_native::d3d12::CreateNativeSwapChainImpl(
- context_provider_->GetDevice().Get(), widget_);
-#else
- NOTREACHED();
- ALLOW_UNUSED_LOCAL(widget_);
-#endif
+ context_provider_->GetDevice().Get(), child_window_.window());
}
} // namespace viz
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_dawn.h b/chromium/components/viz/service/display_embedder/skia_output_device_dawn.h
index 56ef86fe380..3d6bcf821f0 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_dawn.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_dawn.h
@@ -5,6 +5,7 @@
#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_SKIA_OUTPUT_DEVICE_DAWN_H_
#define COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_SKIA_OUTPUT_DEVICE_DAWN_H_
+#include "build/build_config.h"
#include "components/viz/service/display_embedder/skia_output_device.h"
#include "third_party/dawn/src/include/dawn/dawn_wsi.h"
#include "third_party/dawn/src/include/dawn/webgpu.h"
@@ -13,6 +14,7 @@
#include "third_party/skia/include/core/SkImageInfo.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/child_window_win.h"
namespace viz {
@@ -28,6 +30,8 @@ class SkiaOutputDeviceDawn : public SkiaOutputDevice {
DidSwapBufferCompleteCallback did_swap_buffer_complete_callback);
~SkiaOutputDeviceDawn() override;
+ gpu::SurfaceHandle GetChildSurfaceHandle() const;
+
// SkiaOutputDevice implementation:
bool Reshape(const gfx::Size& size,
float device_scale_factor,
@@ -45,7 +49,6 @@ class SkiaOutputDeviceDawn : public SkiaOutputDevice {
void CreateSwapChainImplementation();
DawnContextProvider* const context_provider_;
- gfx::AcceleratedWidget widget_;
DawnSwapChainImplementation swap_chain_implementation_;
wgpu::SwapChain swap_chain_;
wgpu::Texture texture_;
@@ -56,6 +59,13 @@ class SkiaOutputDeviceDawn : public SkiaOutputDevice {
sk_sp<SkColorSpace> sk_color_space_;
GrBackendTexture backend_texture_;
+ // D3D12 requires that we use flip model swap chains. Flip swap chains
+ // require that the swap chain be connected with DWM. DWM requires that
+ // the rendering windows are owned by the process that's currently doing
+ // the rendering. gl::ChildWindowWin creates and owns a window which is
+ // reparented by the browser to be a child of its window.
+ gl::ChildWindowWin child_window_;
+
DISALLOW_COPY_AND_ASSIGN(SkiaOutputDeviceDawn);
};
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_gl.cc b/chromium/components/viz/service/display_embedder/skia_output_device_gl.cc
index 44ce3b99f68..2f80461da3b 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_gl.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_gl.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/bind_helpers.h"
+#include "components/viz/common/gpu/context_lost_reason.h"
#include "components/viz/service/display/dc_layer_overlay.h"
#include "gpu/command_buffer/common/swap_buffers_complete_params.h"
#include "gpu/command_buffer/service/feature_info.h"
@@ -25,6 +26,7 @@
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface.h"
+#include "ui/gl/gl_utils.h"
#include "ui/gl/gl_version_info.h"
namespace viz {
@@ -192,8 +194,9 @@ void SkiaOutputDeviceGL::SwapBuffers(
std::move(latency_info));
gl_surface_->SwapBuffersAsync(std::move(callback), std::move(feedback));
} else {
- FinishSwapBuffers(gl_surface_->SwapBuffers(std::move(feedback)),
- surface_size, std::move(latency_info));
+ gfx::SwapResult result = gl_surface_->SwapBuffers(std::move(feedback));
+ FinishSwapBuffers(gfx::SwapCompletionResult(result), surface_size,
+ std::move(latency_info));
}
}
@@ -213,12 +216,11 @@ void SkiaOutputDeviceGL::PostSubBuffer(
gl_surface_->PostSubBufferAsync(rect.x(), rect.y(), rect.width(),
rect.height(), std::move(callback),
std::move(feedback));
-
} else {
- FinishSwapBuffers(
- gl_surface_->PostSubBuffer(rect.x(), rect.y(), rect.width(),
- rect.height(), std::move(feedback)),
- surface_size, std::move(latency_info));
+ gfx::SwapResult result = gl_surface_->PostSubBuffer(
+ rect.x(), rect.y(), rect.width(), rect.height(), std::move(feedback));
+ FinishSwapBuffers(gfx::SwapCompletionResult(result), surface_size,
+ std::move(latency_info));
}
}
@@ -237,22 +239,23 @@ void SkiaOutputDeviceGL::CommitOverlayPlanes(
gl_surface_->CommitOverlayPlanesAsync(std::move(callback),
std::move(feedback));
} else {
- FinishSwapBuffers(gl_surface_->CommitOverlayPlanes(std::move(feedback)),
- surface_size, std::move(latency_info));
+ FinishSwapBuffers(
+ gfx::SwapCompletionResult(
+ gl_surface_->CommitOverlayPlanes(std::move(feedback))),
+ surface_size, std::move(latency_info));
}
}
void SkiaOutputDeviceGL::DoFinishSwapBuffers(
const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
- DCHECK(!gpu_fence);
- FinishSwapBuffers(result, size, latency_info);
+ gfx::SwapCompletionResult result) {
+ DCHECK(!result.gpu_fence);
+ FinishSwapBuffers(std::move(result), size, latency_info);
}
-void SkiaOutputDeviceGL::SetDrawRectangle(const gfx::Rect& draw_rectangle) {
- gl_surface_->SetDrawRectangle(draw_rectangle);
+bool SkiaOutputDeviceGL::SetDrawRectangle(const gfx::Rect& draw_rectangle) {
+ return gl_surface_->SetDrawRectangle(draw_rectangle);
}
void SkiaOutputDeviceGL::SetGpuVSyncEnabled(bool enabled) {
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_gl.h b/chromium/components/viz/service/display_embedder/skia_output_device_gl.h
index d8491578a4b..a1318dbbaec 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_gl.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_gl.h
@@ -20,10 +20,6 @@ class GLImage;
class GLSurface;
} // namespace gl
-namespace gfx {
-class GpuFence;
-} // namespace gfx
-
namespace gpu {
class MailboxManager;
class SharedContextState;
@@ -63,7 +59,7 @@ class SkiaOutputDeviceGL final : public SkiaOutputDevice {
std::vector<ui::LatencyInfo> latency_info) override;
void CommitOverlayPlanes(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override;
- void SetDrawRectangle(const gfx::Rect& draw_rectangle) override;
+ bool SetDrawRectangle(const gfx::Rect& draw_rectangle) override;
void SetGpuVSyncEnabled(bool enabled) override;
#if defined(OS_WIN)
void SetEnableDCLayers(bool enable) override;
@@ -80,8 +76,7 @@ class SkiaOutputDeviceGL final : public SkiaOutputDevice {
// operation
void DoFinishSwapBuffers(const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence>);
+ gfx::SwapCompletionResult result);
scoped_refptr<gl::GLImage> GetGLImageForMailbox(const gpu::Mailbox& mailbox);
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_offscreen.cc b/chromium/components/viz/service/display_embedder/skia_output_device_offscreen.cc
index 7b4708d94c9..b846ee6db2a 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_offscreen.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_offscreen.cc
@@ -63,7 +63,7 @@ void SkiaOutputDeviceOffscreen::SwapBuffers(
DCHECK(backend_texture_.isValid());
StartSwapBuffers(std::move(feedback));
- FinishSwapBuffers(gfx::SwapResult::SWAP_ACK,
+ FinishSwapBuffers(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK),
gfx::Size(size_.width(), size_.height()),
std::move(latency_info));
}
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.cc b/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.cc
index 930ec126a14..6b89f737bdc 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/logging.h"
#include "build/build_config.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "gpu/command_buffer/service/memory_tracking.h"
@@ -101,22 +102,37 @@ void SkiaOutputDeviceVulkan::PostSubBuffer(
#endif
StartSwapBuffers(std::move(feedback));
- auto image_size = vulkan_surface_->image_size();
- gfx::SwapResult result = gfx::SwapResult::SWAP_ACK;
- // If the swapchain is new created, but rect doesn't cover the whole buffer,
- // we will still present it even it causes a artifact in this frame and
- // recovered when the next frame is presented. We do that because the old
- // swapchain's present thread is blocked on waiting a reply from xserver, and
- // presenting a new image with the new create swapchain will somehow makes
- // xserver send a reply to us, and then unblock the old swapchain's present
- // thread. So the old swapchain can be destroyed properly.
- if (!rect.IsEmpty())
- result = vulkan_surface_->PostSubBuffer(rect);
- if (is_new_swapchain_) {
- is_new_swapchain_ = false;
- result = gfx::SwapResult::SWAP_NAK_RECREATE_BUFFERS;
+
+ if (is_new_swap_chain_ && rect == gfx::Rect(vulkan_surface_->image_size())) {
+ is_new_swap_chain_ = false;
+ }
+
+ if (!is_new_swap_chain_) {
+ auto image_index = vulkan_surface_->swap_chain()->current_image_index();
+ for (size_t i = 0; i < damage_of_images_.size(); ++i) {
+ if (i == image_index) {
+ damage_of_images_[i] = gfx::Rect();
+ } else {
+ damage_of_images_[i].Union(rect);
+ }
+ }
+ }
+
+ if (!rect.IsEmpty()) {
+ // If the swapchain is new created, but rect doesn't cover the whole buffer,
+ // we will still present it even it causes a artifact in this frame and
+ // recovered when the next frame is presented. We do that because the old
+ // swapchain's present thread is blocked on waiting a reply from xserver,
+ // and presenting a new image with the new create swapchain will somehow
+ // makes xserver send a reply to us, and then unblock the old swapchain's
+ // present thread. So the old swapchain can be destroyed properly.
+ vulkan_surface_->PostSubBufferAsync(
+ rect, base::BindOnce(&SkiaOutputDeviceVulkan::OnPostSubBufferFinished,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(latency_info)));
+ } else {
+ OnPostSubBufferFinished(std::move(latency_info), gfx::SwapResult::SWAP_ACK);
}
- FinishSwapBuffers(result, image_size, std::move(latency_info));
}
SkSurface* SkiaOutputDeviceVulkan::BeginPaint(
@@ -241,7 +257,7 @@ bool SkiaOutputDeviceVulkan::Initialize() {
vulkan_surface_ = std::move(vulkan_surface);
capabilities_.uses_default_gl_framebuffer = false;
- capabilities_.max_frames_pending = vulkan_surface_->image_count() - 1;
+ capabilities_.max_frames_pending = 1;
// Vulkan FIFO swap chain should return vk images in presenting order, so set
// preserve_buffer_content & supports_post_sub_buffer to true to let
// SkiaOutputBufferImpl to manager damages.
@@ -249,6 +265,12 @@ bool SkiaOutputDeviceVulkan::Initialize() {
capabilities_.output_surface_origin = gfx::SurfaceOrigin::kTopLeft;
capabilities_.supports_post_sub_buffer = true;
capabilities_.supports_pre_transform = true;
+ // We don't know the number of buffers until the VulkanSwapChain is
+ // initialized, so set it to 0. Since |damage_area_from_skia_output_device| is
+ // assigned to true, so |number_of_buffers| will not be used for tracking
+ // framebuffer damages.
+ capabilities_.number_of_buffers = 0;
+ capabilities_.damage_area_from_skia_output_device = true;
const auto surface_format = vulkan_surface_->surface_format().format;
DCHECK(surface_format == VK_FORMAT_B8G8R8A8_UNORM ||
@@ -277,15 +299,34 @@ bool SkiaOutputDeviceVulkan::RecreateSwapChain(
for (const auto& sk_surface_size_pair : sk_surface_size_pairs_) {
memory_type_tracker_->TrackMemFree(sk_surface_size_pair.bytes_allocated);
}
+ auto num_images = vulkan_surface_->swap_chain()->num_images();
sk_surface_size_pairs_.clear();
- sk_surface_size_pairs_.resize(vulkan_surface_->swap_chain()->num_images());
+ sk_surface_size_pairs_.resize(num_images);
color_space_ = std::move(color_space);
- is_new_swapchain_ = true;
+ damage_of_images_.resize(num_images);
+ for (auto& damage : damage_of_images_)
+ damage = gfx::Rect(vulkan_surface_->image_size());
+ is_new_swap_chain_ = true;
}
return true;
}
+void SkiaOutputDeviceVulkan::OnPostSubBufferFinished(
+ std::vector<ui::LatencyInfo> latency_info,
+ gfx::SwapResult result) {
+ if (result == gfx::SwapResult::SWAP_ACK) {
+ auto image_index = vulkan_surface_->swap_chain()->current_image_index();
+ FinishSwapBuffers(gfx::SwapCompletionResult(result),
+ vulkan_surface_->image_size(), std::move(latency_info),
+ damage_of_images_[image_index]);
+ } else {
+ FinishSwapBuffers(gfx::SwapCompletionResult(result),
+ vulkan_surface_->image_size(), std::move(latency_info),
+ gfx::Rect(vulkan_surface_->image_size()));
+ }
+}
+
SkiaOutputDeviceVulkan::SkSurfaceSizePair::SkSurfaceSizePair() = default;
SkiaOutputDeviceVulkan::SkSurfaceSizePair::SkSurfaceSizePair(
const SkSurfaceSizePair& other) = default;
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.h b/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.h
index af859a52785..4ae2ad690d4 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_vulkan.h
@@ -9,6 +9,7 @@
#include <vector>
#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/util/type_safety/pass_key.h"
#include "build/build_config.h"
@@ -72,6 +73,8 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice {
bool RecreateSwapChain(const gfx::Size& size,
sk_sp<SkColorSpace> color_space,
gfx::OverlayTransform transform);
+ void OnPostSubBufferFinished(std::vector<ui::LatencyInfo> latency_info,
+ gfx::SwapResult result);
VulkanContextProvider* const context_provider_;
@@ -88,7 +91,14 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice {
std::vector<SkSurfaceSizePair> sk_surface_size_pairs_;
sk_sp<SkColorSpace> color_space_;
- bool is_new_swapchain_ = true;
+
+ // The swapchain is new created without a frame which convers the whole area
+ // of it.
+ bool is_new_swap_chain_ = true;
+
+ std::vector<gfx::Rect> damage_of_images_;
+
+ base::WeakPtrFactory<SkiaOutputDeviceVulkan> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SkiaOutputDeviceVulkan);
};
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_webview.cc b/chromium/components/viz/service/display_embedder/skia_output_device_webview.cc
index a0526224e33..e8aa380c9f9 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_webview.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_webview.cc
@@ -80,8 +80,9 @@ void SkiaOutputDeviceWebView::SwapBuffers(
gfx::Size surface_size =
gfx::Size(sk_surface_->width(), sk_surface_->height());
- FinishSwapBuffers(gl_surface_->SwapBuffers(std::move(feedback)), surface_size,
- std::move(latency_info));
+ FinishSwapBuffers(
+ gfx::SwapCompletionResult(gl_surface_->SwapBuffers(std::move(feedback))),
+ surface_size, std::move(latency_info));
}
SkSurface* SkiaOutputDeviceWebView::BeginPaint(
diff --git a/chromium/components/viz/service/display_embedder/skia_output_device_x11.cc b/chromium/components/viz/service/display_embedder/skia_output_device_x11.cc
index 2fc506b3fff..c30693c52c4 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_device_x11.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_device_x11.cc
@@ -6,6 +6,7 @@
#include <utility>
+#include "base/logging.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/vk/GrVkTypes.h"
@@ -27,10 +28,11 @@ SkiaOutputDeviceX11::SkiaOutputDeviceX11(
did_swap_buffer_complete_callback),
display_(gfx::GetXDisplay()),
widget_(widget),
- gc_(XCreateGC(display_, widget_, 0, nullptr)) {
- int result = XGetWindowAttributes(display_, widget_, &attributes_);
+ gc_(XCreateGC(display_, static_cast<uint32_t>(widget_), 0, nullptr)) {
+ int result = XGetWindowAttributes(display_, static_cast<uint32_t>(widget_),
+ &attributes_);
LOG_IF(FATAL, !result) << "XGetWindowAttributes failed for window "
- << widget_;
+ << static_cast<uint32_t>(widget_);
bpp_ = gfx::BitsPerPixelForPixmapDepth(display_, attributes_.depth);
support_rendr_ = ui::QueryRenderSupport(display_);
@@ -82,14 +84,15 @@ void SkiaOutputDeviceX11::PostSubBuffer(
if (bpp_ == 32 || bpp_ == 16) {
// gfx::PutARGBImage() only supports 16 and 32 bpp.
// TODO(penghuang): Switch to XShmPutImage.
- gfx::PutARGBImage(display_, attributes_.visual, attributes_.depth, widget_,
- gc_, static_cast<const uint8_t*>(sk_pixmap.addr()),
+ gfx::PutARGBImage(display_, attributes_.visual, attributes_.depth,
+ static_cast<uint32_t>(widget_), gc_,
+ static_cast<const uint8_t*>(sk_pixmap.addr()),
rect.width(), rect.height(), 0 /* src_x */, 0 /* src_y */,
rect.x() /* dst_x */, rect.y() /* dst_y */, rect.width(),
rect.height());
} else if (support_rendr_) {
- Pixmap pixmap =
- XCreatePixmap(display_, widget_, rect.width(), rect.height(), 32);
+ Pixmap pixmap = XCreatePixmap(display_, static_cast<uint32_t>(widget_),
+ rect.width(), rect.height(), 32);
GC gc = XCreateGC(display_, pixmap, 0, nullptr);
XImage image = {};
@@ -97,10 +100,10 @@ void SkiaOutputDeviceX11::PostSubBuffer(
image.height = rect.height();
image.depth = 32;
image.bits_per_pixel = 32;
- image.format = ZPixmap;
- image.byte_order = LSBFirst;
+ image.format = static_cast<int>(x11::ImageFormat::ZPixmap);
+ image.byte_order = static_cast<int>(x11::ImageOrder::LSBFirst);
image.bitmap_unit = 8;
- image.bitmap_bit_order = LSBFirst;
+ image.bitmap_bit_order = static_cast<int>(x11::ImageOrder::LSBFirst);
image.bytes_per_line = sk_pixmap.rowBytes();
image.red_mask = 0xff << SK_R32_SHIFT;
@@ -115,8 +118,8 @@ void SkiaOutputDeviceX11::PostSubBuffer(
display_, pixmap, ui::GetRenderARGB32Format(display_), 0, nullptr);
XRenderPictFormat* pictformat =
XRenderFindVisualFormat(display_, attributes_.visual);
- Picture dest_picture =
- XRenderCreatePicture(display_, widget_, pictformat, 0, nullptr);
+ Picture dest_picture = XRenderCreatePicture(
+ display_, static_cast<uint32_t>(widget_), pictformat, 0, nullptr);
XRenderComposite(display_,
PictOpSrc, // op
picture, // src
@@ -137,7 +140,7 @@ void SkiaOutputDeviceX11::PostSubBuffer(
NOTIMPLEMENTED();
}
XFlush(display_);
- FinishSwapBuffers(gfx::SwapResult::SWAP_ACK,
+ FinishSwapBuffers(gfx::SwapCompletionResult(gfx::SwapResult::SWAP_ACK),
gfx::Size(sk_surface_->width(), sk_surface_->height()),
std::move(latency_info));
}
diff --git a/chromium/components/viz/service/display_embedder/skia_output_surface_dependency.h b/chromium/components/viz/service/display_embedder/skia_output_surface_dependency.h
index d3d814edf9b..a85a0cf5c1c 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_surface_dependency.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_surface_dependency.h
@@ -120,6 +120,10 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurfaceDependency {
bool IsUsingDawn() const {
return gr_context_type() == gpu::GrContextType::kDawn;
}
+
+ bool IsUsingMetal() const {
+ return gr_context_type() == gpu::GrContextType::kMetal;
+ }
};
} // namespace viz
diff --git a/chromium/components/viz/service/display_embedder/skia_output_surface_impl.cc b/chromium/components/viz/service/display_embedder/skia_output_surface_impl.cc
index 36404ea695d..f453eae9c38 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_surface_impl.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_surface_impl.cc
@@ -204,10 +204,14 @@ void SkiaOutputSurfaceImpl::Reshape(const gfx::Size& size,
// SetDrawRectangle() will need to be called at the new size.
has_set_draw_rectangle_for_frame_ = false;
- // Reshape will damage all buffers.
- current_buffer_ = 0u;
- for (auto& damage : damage_of_buffers_)
- damage = gfx::Rect(size);
+ if (use_damage_area_from_skia_output_device_) {
+ damage_of_current_buffer_ = gfx::Rect(size);
+ } else {
+ // Reshape will damage all buffers.
+ current_buffer_ = 0u;
+ for (auto& damage : damage_of_buffers_)
+ damage = gfx::Rect(size);
+ }
// impl_on_gpu_ is released on the GPU thread by a posted task from
// SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
@@ -429,21 +433,15 @@ void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) {
}
void SkiaOutputSurfaceImpl::SwapBuffersSkipped() {
- if (deferred_framebuffer_draw_closure_) {
- // Run the task to draw the root RenderPass on the GPU thread. If we aren't
- // going to swap buffers and there are no CopyOutputRequests on the root
- // RenderPass we don't strictly need to draw. However, we still need to
- // PostTask to the GPU thread to deal with freeing resources and running
- // callbacks. This is infrequent and all the work is already done in
- // FinishPaintCurrentFrame() so use the same path.
- auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SwapBuffersSkipped,
- base::Unretained(impl_on_gpu_.get()),
- std::move(deferred_framebuffer_draw_closure_));
- ScheduleGpuTask(std::move(task), std::move(resource_sync_tokens_));
-
- // TODO(vasilyt): reuse root recorder
- RecreateRootRecorder();
- }
+ // PostTask to the GPU thread to deal with freeing resources and running
+ // callbacks.
+ auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::SwapBuffersSkipped,
+ base::Unretained(impl_on_gpu_.get()),
+ std::move(deferred_framebuffer_draw_closure_));
+ ScheduleGpuTask(std::move(task), std::move(resource_sync_tokens_));
+
+ // TODO(vasilyt): reuse root recorder
+ RecreateRootRecorder();
}
void SkiaOutputSurfaceImpl::ScheduleOutputSurfaceAsOverlay(
@@ -491,7 +489,6 @@ gpu::SyncToken SkiaOutputSurfaceImpl::SubmitPaint(
sync_token.SetVerifyFlush();
auto ddl = current_paint_->recorder()->detach();
- DCHECK(ddl);
// impl_on_gpu_ is released on the GPU thread by a posted task from
// SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
@@ -562,7 +559,9 @@ sk_sp<SkImage> SkiaOutputSurfaceImpl::MakePromiseSkImageFromRenderPass(
image_context->color_space(), Fulfill, DoNothing, DoNothing,
image_context.get()),
backend_format);
- DCHECK(image_context->has_image());
+ if (!image_context->has_image()) {
+ return nullptr;
+ }
}
images_in_current_paint_.push_back(image_context.get());
return image_context->image();
@@ -601,14 +600,22 @@ void SkiaOutputSurfaceImpl::CopyOutput(
const gfx::ColorSpace& color_space,
std::unique_ptr<CopyOutputRequest> request) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- if (!request->has_result_task_runner())
- request->set_result_task_runner(base::ThreadTaskRunnerHandle::Get());
- auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::CopyOutput,
- base::Unretained(impl_on_gpu_.get()), id,
- geometry, color_space, std::move(request),
- std::move(deferred_framebuffer_draw_closure_));
- ScheduleGpuTask(std::move(callback), std::move(resource_sync_tokens_));
+ // Defer CopyOutput for root render pass with draw framebuffer to
+ // SwapBuffers() or SwapBuffersSkipped().
+ if (!id) {
+ deferred_framebuffer_draw_closure_ = base::BindOnce(
+ &SkiaOutputSurfaceImplOnGpu::CopyOutput,
+ base::Unretained(impl_on_gpu_.get()), id, geometry, color_space,
+ std::move(request), std::move(deferred_framebuffer_draw_closure_));
+ } else {
+ DCHECK(!deferred_framebuffer_draw_closure_);
+ auto callback = base::BindOnce(
+ base::IgnoreResult(&SkiaOutputSurfaceImplOnGpu::CopyOutput),
+ base::Unretained(impl_on_gpu_.get()), id, geometry, color_space,
+ std::move(request), base::OnceCallback<bool()>());
+ ScheduleGpuTask(std::move(callback), std::move(resource_sync_tokens_));
+ }
}
void SkiaOutputSurfaceImpl::ScheduleOverlays(
@@ -692,7 +699,17 @@ bool SkiaOutputSurfaceImpl::Initialize() {
if (capabilities_.preserve_buffer_content &&
capabilities_.supports_post_sub_buffer) {
capabilities_.only_invalidates_damage_rect = false;
- damage_of_buffers_.resize(capabilities_.max_frames_pending + 1);
+ capabilities_.supports_target_damage = true;
+ // If there is only one pending frame, then we can use damage area hint from
+ // SkiaOutputDevice, otherwise we have to track damage area in
+ // SkiaOutputSurfaceImpl.
+ if (capabilities_.max_frames_pending == 1 &&
+ capabilities_.damage_area_from_skia_output_device) {
+ use_damage_area_from_skia_output_device_ = true;
+ damage_of_current_buffer_ = gfx::Rect();
+ } else {
+ damage_of_buffers_.resize(capabilities_.number_of_buffers);
+ }
}
return result;
@@ -769,7 +786,24 @@ SkiaOutputSurfaceImpl::CreateSkSurfaceCharacterization(
impl_on_gpu_->GetGpuPreferences().enforce_vulkan_protected_memory
? GrProtected::kYes
: GrProtected::kNo);
- DCHECK(characterization.isValid());
+ VkFormat vk_format = VK_FORMAT_UNDEFINED;
+ LOG_IF(DFATAL, !characterization.isValid())
+ << "\n surface_size=" << surface_size.ToString()
+ << "\n format=" << static_cast<int>(format)
+ << "\n color_type=" << static_cast<int>(color_type)
+ << "\n backend_format.isValid()=" << backend_format.isValid()
+ << "\n backend_format.backend()="
+ << static_cast<int>(backend_format.backend())
+ << "\n backend_format.asGLFormat()="
+ << static_cast<int>(backend_format.asGLFormat())
+ << "\n backend_format.asVkFormat()="
+ << static_cast<int>(backend_format.asVkFormat(&vk_format))
+ << "\n backend_format.asVkFormat() vk_format="
+ << static_cast<int>(vk_format)
+ << "\n surface_origin=" << static_cast<int>(surface_origin)
+ << "\n willGlFBO0=" << capabilities_.uses_default_gl_framebuffer
+ << "\n isProtected="
+ << impl_on_gpu_->GetGpuPreferences().enforce_vulkan_protected_memory;
return characterization;
}
@@ -806,6 +840,11 @@ void SkiaOutputSurfaceImpl::DidSwapBuffersComplete(
damage = gfx::Rect(size_);
}
+ if (use_damage_area_from_skia_output_device_) {
+ damage_of_current_buffer_ = params.frame_buffer_damage_area;
+ DCHECK(damage_of_current_buffer_);
+ }
+
if (!params.texture_in_use_responses.empty())
client_->DidReceiveTextureInUseResponses(params.texture_in_use_responses);
if (!params.ca_layer_params.is_empty)
@@ -881,6 +920,10 @@ GrBackendFormat SkiaOutputSurfaceImpl::GetGrBackendFormatForTexture(
wgpu::TextureFormat format = ToDawnFormat(resource_format);
return GrBackendFormat::MakeDawn(format);
#endif
+ } else if (dependency_->IsUsingMetal()) {
+#if defined(OS_MACOSX)
+ return GrBackendFormat::MakeMtl(ToMTLPixelFormat(resource_format));
+#endif
} else {
DCHECK(!ycbcr_info);
// Convert internal format from GLES2 to platform GL.
@@ -988,6 +1031,11 @@ SkiaOutputSurfaceImpl::GetGpuTaskSchedulerHelper() {
}
gfx::Rect SkiaOutputSurfaceImpl::GetCurrentFramebufferDamage() const {
+ if (use_damage_area_from_skia_output_device_) {
+ DCHECK(damage_of_current_buffer_);
+ return *damage_of_current_buffer_;
+ }
+
if (damage_of_buffers_.empty())
return gfx::Rect();
diff --git a/chromium/components/viz/service/display_embedder/skia_output_surface_impl.h b/chromium/components/viz/service/display_embedder/skia_output_surface_impl.h
index e7738f180a5..d3369b11a64 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_surface_impl.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_surface_impl.h
@@ -269,6 +269,9 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurfaceImpl : public SkiaOutputSurface {
// to avoid the expense of posting a task and calling MakeCurrent.
base::OnceCallback<bool()> deferred_framebuffer_draw_closure_;
+ bool use_damage_area_from_skia_output_device_ = false;
+ // Damage area of the current buffer. Differ to the last submit buffer.
+ base::Optional<gfx::Rect> damage_of_current_buffer_;
// Current buffer index.
size_t current_buffer_ = 0;
// Damage area of the buffer. Differ to the last submit buffer.
diff --git a/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc b/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc
index 1c27142a2d1..7d6ebac68f7 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc
@@ -26,6 +26,7 @@
#include "components/viz/service/display/texture_deleter.h"
#include "components/viz/service/display_embedder/direct_context_provider.h"
#include "components/viz/service/display_embedder/image_context_impl.h"
+#include "components/viz/service/display_embedder/output_presenter_gl.h"
#include "components/viz/service/display_embedder/skia_output_device.h"
#include "components/viz/service/display_embedder/skia_output_device_buffer_queue.h"
#include "components/viz/service/display_embedder/skia_output_device_gl.h"
@@ -83,8 +84,18 @@
#if BUILDFLAG(SKIA_USE_DAWN)
#include "components/viz/common/gpu/dawn_context_provider.h"
+#if defined(OS_WIN)
#include "components/viz/service/display_embedder/skia_output_device_dawn.h"
#endif
+#endif
+
+#if defined(USE_OZONE) || defined(USE_X11)
+#include "ui/base/ui_base_features.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include "components/viz/service/display_embedder/output_presenter_fuchsia.h"
+#endif
namespace viz {
@@ -278,36 +289,34 @@ void OnRGBAReadbackDone(
} // namespace
-class SkiaOutputSurfaceImplOnGpu::ScopedPromiseImageAccess {
- public:
- ScopedPromiseImageAccess(SkiaOutputSurfaceImplOnGpu* impl_on_gpu,
- std::vector<ImageContextImpl*> image_contexts)
- : impl_on_gpu_(impl_on_gpu), image_contexts_(std::move(image_contexts)) {
- begin_semaphores_.reserve(image_contexts_.size());
- // We may need one more space for the swap buffer semaphore.
- end_semaphores_.reserve(image_contexts_.size() + 1);
- impl_on_gpu_->BeginAccessImages(image_contexts_, &begin_semaphores_,
- &end_semaphores_);
- }
-
- ~ScopedPromiseImageAccess() {
- impl_on_gpu_->EndAccessImages(image_contexts_);
- }
-
- std::vector<GrBackendSemaphore>& begin_semaphores() {
- return begin_semaphores_;
- }
+SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::PromiseImageAccessHelper(
+ SkiaOutputSurfaceImplOnGpu* impl_on_gpu)
+ : impl_on_gpu_(impl_on_gpu) {}
- std::vector<GrBackendSemaphore>& end_semaphores() { return end_semaphores_; }
+SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::
+ ~PromiseImageAccessHelper() {
+ CHECK(image_contexts_.empty());
+}
- private:
- SkiaOutputSurfaceImplOnGpu* const impl_on_gpu_;
- std::vector<ImageContextImpl*> image_contexts_;
- std::vector<GrBackendSemaphore> begin_semaphores_;
- std::vector<GrBackendSemaphore> end_semaphores_;
+void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::BeginAccess(
+ std::vector<ImageContextImpl*> image_contexts,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ DCHECK(begin_semaphores);
+ DCHECK(end_semaphores);
+ begin_semaphores->reserve(image_contexts.size());
+ // We may need one more space for the swap buffer semaphore.
+ end_semaphores->reserve(image_contexts.size() + 1);
+ image_contexts_.reserve(image_contexts.size() + image_contexts_.size());
+ image_contexts_.insert(image_contexts.begin(), image_contexts.end());
+ impl_on_gpu_->BeginAccessImages(std::move(image_contexts), begin_semaphores,
+ end_semaphores);
+}
- DISALLOW_COPY_AND_ASSIGN(ScopedPromiseImageAccess);
-};
+void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::EndAccess() {
+ impl_on_gpu_->EndAccessImages(image_contexts_);
+ image_contexts_.clear();
+}
// Skia gr_context() and |context_provider_| share an underlying GLContext.
// Each of them caches some GL state. Interleaving usage could make cached
@@ -599,7 +608,9 @@ class DirectContextProviderDelegateImpl : public DirectContextProviderDelegate,
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) override {
NOTREACHED();
}
@@ -621,6 +632,10 @@ class DirectContextProviderDelegateImpl : public DirectContextProviderDelegate,
return sync_token;
}
+ void WaitSyncToken(const gpu::SyncToken& sync_token) override {
+ NOTREACHED();
+ }
+
void Flush() override {
// No need to flush in this implementation.
}
@@ -810,6 +825,8 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu(
dawn_context_provider_(dependency_->GetDawnContextProvider()),
renderer_settings_(renderer_settings),
sequence_id_(sequence_id),
+ did_swap_buffer_complete_callback_(
+ std::move(did_swap_buffer_complete_callback)),
context_lost_callback_(std::move(context_lost_callback)),
gpu_vsync_callback_(std::move(gpu_vsync_callback)),
gpu_preferences_(dependency_->GetGpuPreferences()),
@@ -818,8 +835,6 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu(
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
- did_swap_buffer_complete_callback_ = CreateSafeRepeatingCallback(
- weak_ptr_, std::move(did_swap_buffer_complete_callback));
buffer_presented_callback_ = CreateSafeRepeatingCallback(
weak_ptr_, std::move(buffer_presented_callback));
}
@@ -833,7 +848,7 @@ SkiaOutputSurfaceImplOnGpu::~SkiaOutputSurfaceImplOnGpu() {
gl::ScopedProgressReporter scoped_progress_reporter(
context_state_->progress_reporter());
// This ensures any outstanding callbacks for promise images are performed.
- gr_context()->flush();
+ gr_context()->flushAndSubmit();
release_current_last_.emplace(gl_surface_, context_state_);
}
@@ -884,14 +899,23 @@ bool SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame(
base::Optional<gfx::Rect> draw_rectangle) {
TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame");
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(ddl);
DCHECK(!scoped_output_device_paint_);
- if (!MakeCurrent(true /* need_fbo0 */))
+ bool need_fbo0 = gl_surface_ && !gl_surface_->IsSurfaceless();
+ if (!MakeCurrent(need_fbo0))
+ return false;
+
+ if (!ddl) {
+ MarkContextLost(CONTEXT_LOST_UNKNOWN);
return false;
+ }
- if (draw_rectangle)
- output_device_->SetDrawRectangle(*draw_rectangle);
+ if (draw_rectangle) {
+ if (!output_device_->SetDrawRectangle(*draw_rectangle)) {
+ MarkContextLost(
+ ContextLostReason::CONTEXT_LOST_SET_DRAW_RECTANGLE_FAILED);
+ }
+ }
// We do not reset scoped_output_device_paint_ after drawing the ddl until
// SwapBuffers() is called, because we may need access to output_sk_surface()
@@ -910,12 +934,13 @@ bool SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame(
gpu::kInProcessCommandBufferClientId);
}
- ScopedPromiseImageAccess scoped_promise_image_access(
- this, std::move(image_contexts));
- if (!scoped_promise_image_access.begin_semaphores().empty()) {
- auto result = output_sk_surface()->wait(
- scoped_promise_image_access.begin_semaphores().size(),
- scoped_promise_image_access.begin_semaphores().data());
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ promise_image_access_helper_.BeginAccess(
+ std::move(image_contexts), &begin_semaphores, &end_semaphores);
+ if (!begin_semaphores.empty()) {
+ auto result = output_sk_surface()->wait(begin_semaphores.size(),
+ begin_semaphores.data());
DCHECK(result);
}
@@ -941,22 +966,16 @@ bool SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame(
&paint);
}
- GrFlushInfo flush_info;
- flush_info.fFlags = kNone_GrFlushFlags;
-
auto end_paint_semaphores =
scoped_output_device_paint_->TakeEndPaintSemaphores();
+ end_semaphores.insert(end_semaphores.end(), end_paint_semaphores.begin(),
+ end_paint_semaphores.end());
- end_paint_semaphores.insert(
- end_paint_semaphores.end(),
- std::make_move_iterator(
- scoped_promise_image_access.end_semaphores().begin()),
- std::make_move_iterator(
- scoped_promise_image_access.end_semaphores().end()));
-
- // update the size and data pointer
- flush_info.fNumSemaphores = end_paint_semaphores.size();
- flush_info.fSignalSemaphores = end_paint_semaphores.data();
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = end_semaphores.size(),
+ .fSignalSemaphores = end_semaphores.data(),
+ };
gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_,
&flush_info);
@@ -972,8 +991,7 @@ bool SkiaOutputSurfaceImplOnGpu::FinishPaintCurrentFrame(
}
if (result != GrSemaphoresSubmitted::kYes &&
- !(scoped_promise_image_access.begin_semaphores().empty() &&
- end_paint_semaphores.empty())) {
+ !(begin_semaphores.empty() && end_semaphores.empty())) {
// TODO(penghuang): handle vulkan device lost.
DLOG(ERROR) << "output_sk_surface()->flush() failed.";
return false;
@@ -1007,6 +1025,8 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers(
}
DCHECK(output_device_);
+ gr_context()->submit();
+ promise_image_access_helper_.EndAccess();
scoped_output_device_paint_.reset();
if (output_surface_plane_) {
@@ -1049,12 +1069,15 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers(
void SkiaOutputSurfaceImplOnGpu::SwapBuffersSkipped(
base::OnceCallback<bool()> deferred_framebuffer_draw_closure) {
- std::move(deferred_framebuffer_draw_closure).Run();
-
+ if (deferred_framebuffer_draw_closure)
+ std::move(deferred_framebuffer_draw_closure).Run();
+ gr_context()->submit();
+ promise_image_access_helper_.EndAccess();
// Perform cleanup that would have otherwise happened in SwapBuffers().
scoped_output_device_paint_.reset();
context_state_->UpdateSkiaOwnedMemorySize();
destroy_after_swap_.clear();
+
#if BUILDFLAG(ENABLE_VULKAN)
if (is_using_vulkan())
gpu::ReportQueueSubmitPerSwapBuffers();
@@ -1071,8 +1094,13 @@ void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass(
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(ddl);
- if (!MakeCurrent(true /* need_fbo0 */))
+ if (!MakeCurrent(false /* need_fbo0 */))
+ return;
+
+ if (!ddl) {
+ MarkContextLost(CONTEXT_LOST_UNKNOWN);
return;
+ }
PullTextureUpdates(std::move(sync_tokens));
@@ -1089,31 +1117,29 @@ void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass(
cache_use.emplace(dependency_->GetGrShaderCache(),
gpu::kInProcessCommandBufferClientId);
}
- ScopedPromiseImageAccess scoped_promise_image_access(
- this, std::move(image_contexts));
- if (!scoped_promise_image_access.begin_semaphores().empty()) {
- auto result = offscreen.surface()->wait(
- scoped_promise_image_access.begin_semaphores().size(),
- scoped_promise_image_access.begin_semaphores().data());
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ promise_image_access_helper_.BeginAccess(
+ std::move(image_contexts), &begin_semaphores, &end_semaphores);
+ if (!begin_semaphores.empty()) {
+ auto result = offscreen.surface()->wait(begin_semaphores.size(),
+ begin_semaphores.data());
DCHECK(result);
}
offscreen.surface()->draw(ddl.get());
destroy_after_swap_.emplace_back(std::move(ddl));
- GrFlushInfo flush_info;
- flush_info.fFlags = kNone_GrFlushFlags;
- flush_info.fNumSemaphores =
- scoped_promise_image_access.end_semaphores().size();
- flush_info.fSignalSemaphores =
- scoped_promise_image_access.end_semaphores().data();
-
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = end_semaphores.size(),
+ .fSignalSemaphores = end_semaphores.data(),
+ };
gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_,
&flush_info);
auto result = offscreen.surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
if (result != GrSemaphoresSubmitted::kYes &&
- !(scoped_promise_image_access.begin_semaphores().empty() &&
- scoped_promise_image_access.end_semaphores().empty())) {
+ !(begin_semaphores.empty() && end_semaphores.empty())) {
// TODO(penghuang): handle vulkan device lost.
DLOG(ERROR) << "offscreen.surface()->flush() failed.";
return;
@@ -1142,7 +1168,7 @@ void SkiaOutputSurfaceImplOnGpu::RemoveRenderPassResource(
// |image_contexts| will go out of scope and be destroyed now.
}
-void SkiaOutputSurfaceImplOnGpu::CopyOutput(
+bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
RenderPassId id,
copy_output::RenderPassGeometry geometry,
const gfx::ColorSpace& color_space,
@@ -1152,10 +1178,15 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
// TODO(crbug.com/898595): Do this on the GPU instead of CPU with Vulkan.
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- // Clear |destroy_after_swap_| if we CopyOutput without SwapBuffers.
- base::ScopedClosureRunner cleanup(
- base::BindOnce([](std::vector<std::unique_ptr<SkDeferredDisplayList>>) {},
- std::move(destroy_after_swap_)));
+ if (deferred_framebuffer_draw_closure) {
+ // returns false if context not set to current, i.e lost
+ if (!std::move(deferred_framebuffer_draw_closure).Run())
+ return false;
+ DCHECK(context_state_->IsCurrent(nullptr /* surface */));
+ } else {
+ if (!MakeCurrent(true /* need_fbo0 */))
+ return false;
+ }
if (use_gl_renderer_copier_)
gpu::ContextUrl::SetActiveUrl(copier_active_url_);
@@ -1163,8 +1194,6 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
// Lazy initialize GLRendererCopier before draw because
// DirectContextProvider ctor the backbuffer.
if (use_gl_renderer_copier_ && !copier_) {
- if (!MakeCurrent(true /* need_fbo0 */))
- return;
auto client = std::make_unique<DirectContextProviderDelegateImpl>(
gpu_preferences_, dependency_->GetGpuDriverBugWorkarounds(),
dependency_->GetGpuFeatureInfo(), context_state_.get(),
@@ -1178,7 +1207,7 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
if (result != gpu::ContextResult::kSuccess) {
DLOG(ERROR) << "Couldn't initialize GLRendererCopier";
context_provider_ = nullptr;
- return;
+ return false;
}
context_current_task_runner_ =
base::MakeRefCounted<ContextCurrentTaskRunner>(weak_ptr_);
@@ -1193,15 +1222,6 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
gr_context()->resetContext();
}
- if (deferred_framebuffer_draw_closure) {
- // returns false if context not set to current, i.e lost
- if (!std::move(deferred_framebuffer_draw_closure).Run())
- return;
- DCHECK(context_state_->IsCurrent(nullptr /* surface */));
- } else {
- if (!MakeCurrent(true /* need_fbo0 */))
- return;
- }
bool from_fbo0 = !id;
DCHECK(scoped_output_device_paint_ || !from_fbo0);
@@ -1227,11 +1247,11 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
surface->getCanvas()->drawPaint(paint);
gl::ScopedProgressReporter scoped_progress_reporter(
context_state_->progress_reporter());
- surface->flush();
+ surface->flush(SkSurface::BackendSurfaceAccess::kNoAccess, {});
}
if (use_gl_renderer_copier_) {
- surface->flush();
+ surface->flush(SkSurface::BackendSurfaceAccess::kNoAccess, {});
GLuint gl_id = 0;
GLenum internal_format = supports_alpha_ ? GL_RGBA : GL_RGB;
@@ -1262,7 +1282,7 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
if (decoder()->HasMoreIdleWork() || decoder()->HasPendingQueries())
ScheduleDelayedWork();
- return;
+ return true;
}
base::Optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
@@ -1307,7 +1327,6 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
SkIRect src_rect =
SkIRect::MakeXYWH(source_selection.x(), source_selection.y(),
source_selection.width(), source_selection.height());
-
if (request->result_format() ==
CopyOutputRequest::ResultFormat::I420_PLANES) {
std::unique_ptr<ReadPixelsContext> context =
@@ -1338,6 +1357,7 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutput(
NOTIMPLEMENTED(); // ResultFormat::RGBA_TEXTURE
}
ScheduleCheckReadbackCompletion();
+ return true;
}
gpu::DecoderContext* SkiaOutputSurfaceImplOnGpu::decoder() {
@@ -1399,7 +1419,7 @@ void SkiaOutputSurfaceImplOnGpu::BeginAccessImages(
}
void SkiaOutputSurfaceImplOnGpu::EndAccessImages(
- const std::vector<ImageContextImpl*>& image_contexts) {
+ const base::flat_set<ImageContextImpl*>& image_contexts) {
TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::EndAccessImages");
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
for (auto* context : image_contexts)
@@ -1457,7 +1477,7 @@ void SkiaOutputSurfaceImplOnGpu::SetCapabilitiesForTesting(
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, capabilities.output_surface_origin,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ GetDidSwapBuffersCompleteCallback());
}
bool SkiaOutputSurfaceImplOnGpu::Initialize() {
@@ -1465,11 +1485,13 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() {
"is_using_vulkan", is_using_vulkan());
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
#if defined(USE_OZONE)
- gpu::SurfaceHandle surface_handle = dependency_->GetSurfaceHandle();
- if (surface_handle != gpu::kNullSurfaceHandle) {
- window_surface_ = ui::OzonePlatform::GetInstance()
- ->GetSurfaceFactoryOzone()
- ->CreatePlatformWindowSurface(surface_handle);
+ if (features::IsUsingOzonePlatform()) {
+ gpu::SurfaceHandle surface_handle = dependency_->GetSurfaceHandle();
+ if (surface_handle != gpu::kNullSurfaceHandle) {
+ window_surface_ = ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->CreatePlatformWindowSurface(surface_handle);
+ }
}
#endif
@@ -1516,7 +1538,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kTopLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ GetDidSwapBuffersCompleteCallback());
supports_alpha_ = renderer_settings_.requires_alpha_channel;
} else {
gl_surface_ =
@@ -1529,8 +1551,10 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
if (gl_surface_->IsSurfaceless()) {
std::unique_ptr<SkiaOutputDeviceBufferQueue> onscreen_device =
std::make_unique<SkiaOutputDeviceBufferQueue>(
- gl_surface_, dependency_, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ std::make_unique<OutputPresenterGL>(gl_surface_, dependency_,
+ memory_tracker_.get()),
+ dependency_, memory_tracker_.get(),
+ GetDidSwapBuffersCompleteCallback());
supports_alpha_ = onscreen_device->supports_alpha();
output_device_ = std::move(onscreen_device);
@@ -1539,7 +1563,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
std::unique_ptr<SkiaOutputDeviceWebView> onscreen_device =
std::make_unique<SkiaOutputDeviceWebView>(
context_state_.get(), gl_surface_, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ GetDidSwapBuffersCompleteCallback());
supports_alpha_ = onscreen_device->supports_alpha();
output_device_ = std::move(onscreen_device);
} else {
@@ -1547,7 +1571,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
std::make_unique<SkiaOutputDeviceGL>(
dependency_->GetMailboxManager(), context_state_.get(),
gl_surface_, feature_info_, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ GetDidSwapBuffersCompleteCallback());
supports_alpha_ = onscreen_device->supports_alpha();
output_device_ = std::move(onscreen_device);
}
@@ -1571,44 +1595,56 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ GetDidSwapBuffersCompleteCallback());
supports_alpha_ = renderer_settings_.requires_alpha_channel;
} else {
#if defined(USE_X11)
- supports_alpha_ = true;
- if (!gpu_preferences_.disable_vulkan_surface) {
- output_device_ = SkiaOutputDeviceVulkan::Create(
- vulkan_context_provider_, dependency_->GetSurfaceHandle(),
- memory_tracker_.get(), did_swap_buffer_complete_callback_);
+ if (!features::IsUsingOzonePlatform()) {
+ supports_alpha_ = true;
+ if (!gpu_preferences_.disable_vulkan_surface) {
+ output_device_ = SkiaOutputDeviceVulkan::Create(
+ vulkan_context_provider_, dependency_->GetSurfaceHandle(),
+ memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
+ }
+ if (!output_device_) {
+ output_device_ = std::make_unique<SkiaOutputDeviceX11>(
+ context_state_, dependency_->GetSurfaceHandle(),
+ memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
+ }
}
+#endif
if (!output_device_) {
- output_device_ = std::make_unique<SkiaOutputDeviceX11>(
- context_state_, dependency_->GetSurfaceHandle(),
- memory_tracker_.get(), did_swap_buffer_complete_callback_);
- }
+#if defined(OS_FUCHSIA)
+ auto output_presenter = OutputPresenterFuchsia::Create(
+ window_surface_.get(), dependency_, memory_tracker_.get());
#else
- auto output_device = SkiaOutputDeviceBufferQueue::Create(
- dependency_, memory_tracker_.get(), did_swap_buffer_complete_callback_);
- if (output_device) {
- // TODO(https://crbug.com/1012401): don't depend on GL.
- gl_surface_ = output_device->gl_surface();
- output_device_ = std::move(output_device);
- } else {
- auto output_device = SkiaOutputDeviceVulkan::Create(
- vulkan_context_provider_, dependency_->GetSurfaceHandle(),
- memory_tracker_.get(), did_swap_buffer_complete_callback_);
-#if defined(OS_WIN)
- gpu::SurfaceHandle child_surface =
- output_device ? output_device->GetChildSurfaceHandle()
- : gpu::kNullSurfaceHandle;
- if (child_surface != gpu::kNullSurfaceHandle) {
- DidCreateAcceleratedSurfaceChildWindow(dependency_->GetSurfaceHandle(),
- child_surface);
+ auto output_presenter =
+ OutputPresenterGL::Create(dependency_, memory_tracker_.get());
+ if (output_presenter) {
+ // TODO(https://crbug.com/1012401): don't depend on GL.
+ gl_surface_ = output_presenter->gl_surface();
}
#endif
- output_device_ = std::move(output_device);
- }
+ if (output_presenter) {
+ output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
+ std::move(output_presenter), dependency_, memory_tracker_.get(),
+ GetDidSwapBuffersCompleteCallback());
+ } else {
+ auto output_device = SkiaOutputDeviceVulkan::Create(
+ vulkan_context_provider_, dependency_->GetSurfaceHandle(),
+ memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
+#if defined(OS_WIN)
+ gpu::SurfaceHandle child_surface =
+ output_device ? output_device->GetChildSurfaceHandle()
+ : gpu::kNullSurfaceHandle;
+ if (child_surface != gpu::kNullSurfaceHandle) {
+ DidCreateAcceleratedSurfaceChildWindow(
+ dependency_->GetSurfaceHandle(), child_surface);
+ }
#endif
+ output_device_ = std::move(output_device);
+ }
+ }
}
#endif
return !!output_device_;
@@ -1622,20 +1658,33 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ GetDidSwapBuffersCompleteCallback());
supports_alpha_ = renderer_settings_.requires_alpha_channel;
} else {
#if defined(USE_X11)
// TODO(sgilhuly): Set up a Vulkan swapchain so that Linux can also use
// SkiaOutputDeviceDawn.
- output_device_ = std::make_unique<SkiaOutputDeviceX11>(
- context_state_, dependency_->GetSurfaceHandle(), memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ if (!features::IsUsingOzonePlatform()) {
+ output_device_ = std::make_unique<SkiaOutputDeviceX11>(
+ context_state_, dependency_->GetSurfaceHandle(),
+ memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
+ } else {
+ return false;
+ }
+#elif defined(OS_WIN)
+ std::unique_ptr<SkiaOutputDeviceDawn> output_device =
+ std::make_unique<SkiaOutputDeviceDawn>(
+ dawn_context_provider_, dependency_->GetSurfaceHandle(),
+ gfx::SurfaceOrigin::kTopLeft, memory_tracker_.get(),
+ GetDidSwapBuffersCompleteCallback());
+ const gpu::SurfaceHandle child_surface_handle =
+ output_device->GetChildSurfaceHandle();
+ DidCreateAcceleratedSurfaceChildWindow(dependency_->GetSurfaceHandle(),
+ child_surface_handle);
+ output_device_ = std::move(output_device);
#else
- output_device_ = std::make_unique<SkiaOutputDeviceDawn>(
- dawn_context_provider_, dependency_->GetSurfaceHandle(),
- gfx::SurfaceOrigin::kTopLeft, memory_tracker_.get(),
- did_swap_buffer_complete_callback_);
+ NOTREACHED();
+ return false;
#endif
}
#endif
@@ -1651,9 +1700,10 @@ bool SkiaOutputSurfaceImplOnGpu::MakeCurrent(bool need_fbo0) {
if (!context_state_->MakeCurrent(gl_surface, need_gl)) {
LOG(ERROR) << "Failed to make current.";
dependency_->DidLoseContext(
- gpu::error::kMakeCurrentFailed,
+ *context_state_->context_lost_reason(),
GURL("chrome://gpu/SkiaOutputSurfaceImplOnGpu::MakeCurrent"));
- MarkContextLost(CONTEXT_LOST_MAKECURRENT_FAILED);
+ MarkContextLost(GetContextLostReason(
+ gpu::error::kLostContext, *context_state_->context_lost_reason()));
return false;
}
context_state_->set_need_context_state_reset(true);
@@ -1734,6 +1784,28 @@ void SkiaOutputSurfaceImplOnGpu::BufferPresented(
// Handled by SkiaOutputDevice already.
}
+void SkiaOutputSurfaceImplOnGpu::DidSwapBuffersCompleteInternal(
+ gpu::SwapBuffersCompleteParams params,
+ const gfx::Size& pixel_size) {
+ if (params.swap_response.result == gfx::SwapResult::SWAP_FAILED) {
+ DLOG(ERROR) << "Context lost on SWAP_FAILED";
+ if (!context_state_->IsCurrent(nullptr) ||
+ !context_state_->CheckResetStatus(false)) {
+ // Mark the context lost if not already lost.
+ MarkContextLost(ContextLostReason::CONTEXT_LOST_SWAP_FAILED);
+ }
+ }
+
+ PostTaskToClientThread(
+ base::BindOnce(did_swap_buffer_complete_callback_, params, pixel_size));
+}
+
+SkiaOutputSurfaceImplOnGpu::DidSwapBufferCompleteCallback
+SkiaOutputSurfaceImplOnGpu::GetDidSwapBuffersCompleteCallback() {
+ return base::BindRepeating(
+ &SkiaOutputSurfaceImplOnGpu::DidSwapBuffersCompleteInternal, weak_ptr_);
+}
+
void SkiaOutputSurfaceImplOnGpu::MarkContextLost(ContextLostReason reason) {
// This function potentially can be re-entered during from
// SharedContextState::MarkContextLost(). This guards against it.
diff --git a/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h b/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h
index b9ab673914a..c6919214b9c 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h
+++ b/chromium/components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.h
@@ -9,7 +9,6 @@
#include <utility>
#include <vector>
-#include "base/containers/circular_deque.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/threading/thread_checker.h"
@@ -153,7 +152,7 @@ class SkiaOutputSurfaceImplOnGpu : public gpu::ImageTransportSurfaceDelegate {
void RemoveRenderPassResource(
std::vector<RenderPassId> ids,
std::vector<std::unique_ptr<ImageContextImpl>> image_contexts);
- void CopyOutput(RenderPassId id,
+ bool CopyOutput(RenderPassId id,
copy_output::RenderPassGeometry geometry,
const gfx::ColorSpace& color_space,
std::unique_ptr<CopyOutputRequest> request,
@@ -162,7 +161,7 @@ class SkiaOutputSurfaceImplOnGpu : public gpu::ImageTransportSurfaceDelegate {
void BeginAccessImages(const std::vector<ImageContextImpl*>& image_contexts,
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores);
- void EndAccessImages(const std::vector<ImageContextImpl*>& image_contexts);
+ void EndAccessImages(const base::flat_set<ImageContextImpl*>& image_contexts);
sk_sp<GrContextThreadSafeProxy> GetGrContextThreadSafeProxy();
const gl::GLVersionInfo* gl_version_info() const { return gl_version_info_; }
@@ -211,7 +210,6 @@ class SkiaOutputSurfaceImplOnGpu : public gpu::ImageTransportSurfaceDelegate {
gpu::MemoryTracker* GetMemoryTracker() { return memory_tracker_.get(); }
private:
- class ScopedPromiseImageAccess;
class OffscreenSurface;
class DisplayContext;
@@ -220,6 +218,12 @@ class SkiaOutputSurfaceImplOnGpu : public gpu::ImageTransportSurfaceDelegate {
bool InitializeForVulkan();
bool InitializeForDawn();
+ // Provided as a callback to |device_|.
+ void DidSwapBuffersCompleteInternal(gpu::SwapBuffersCompleteParams params,
+ const gfx::Size& pixel_size);
+
+ DidSwapBufferCompleteCallback GetDidSwapBuffersCompleteCallback();
+
// Make context current for GL, and return false if the context is lost.
// It will do nothing when Vulkan is used.
bool MakeCurrent(bool need_fbo0);
@@ -286,6 +290,7 @@ class SkiaOutputSurfaceImplOnGpu : public gpu::ImageTransportSurfaceDelegate {
// readback using GLRendererCopier.
// TODO(samans): Remove |sequence_id| once readback always uses Skia.
const gpu::SequenceId sequence_id_;
+ // Should only be run on the client thread with PostTaskToClientThread().
DidSwapBufferCompleteCallback did_swap_buffer_complete_callback_;
BufferPresentedCallback buffer_presented_callback_;
ContextLostCallback context_lost_callback_;
@@ -308,6 +313,24 @@ class SkiaOutputSurfaceImplOnGpu : public gpu::ImageTransportSurfaceDelegate {
std::unique_ptr<DisplayContext> display_context_;
bool context_is_lost_ = false;
+ class PromiseImageAccessHelper {
+ public:
+ explicit PromiseImageAccessHelper(SkiaOutputSurfaceImplOnGpu* impl_on_gpu);
+ ~PromiseImageAccessHelper();
+
+ void BeginAccess(std::vector<ImageContextImpl*> image_contexts,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores);
+ void EndAccess();
+
+ private:
+ SkiaOutputSurfaceImplOnGpu* const impl_on_gpu_;
+ base::flat_set<ImageContextImpl*> image_contexts_;
+
+ DISALLOW_COPY_AND_ASSIGN(PromiseImageAccessHelper);
+ };
+ PromiseImageAccessHelper promise_image_access_helper_{this};
+
std::unique_ptr<SkiaOutputDevice> output_device_;
base::Optional<SkiaOutputDevice::ScopedPaint> scoped_output_device_paint_;
diff --git a/chromium/components/viz/service/display_embedder/skia_output_surface_impl_unittest.cc b/chromium/components/viz/service/display_embedder/skia_output_surface_impl_unittest.cc
index 67800b78170..8f5b7e6e077 100644
--- a/chromium/components/viz/service/display_embedder/skia_output_surface_impl_unittest.cc
+++ b/chromium/components/viz/service/display_embedder/skia_output_surface_impl_unittest.cc
@@ -156,6 +156,7 @@ TEST_F(SkiaOutputSurfaceImplTest, SubmitPaint) {
geometry.readback_offset = gfx::Vector2d(0, 0);
output_surface_->CopyOutput(0, geometry, color_space, std::move(request));
+ output_surface_->SwapBuffersSkipped();
BlockMainThread();
// SubmitPaint draw is deferred until CopyOutput.