diff options
author | Zeno Albisser <zeno.albisser@digia.com> | 2013-08-15 21:46:11 +0200 |
---|---|---|
committer | Zeno Albisser <zeno.albisser@digia.com> | 2013-08-15 21:46:11 +0200 |
commit | 679147eead574d186ebf3069647b4c23e8ccace6 (patch) | |
tree | fc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /chromium/content/browser/gpu | |
download | qtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz |
Initial import.
Diffstat (limited to 'chromium/content/browser/gpu')
30 files changed, 9697 insertions, 0 deletions
diff --git a/chromium/content/browser/gpu/OWNERS b/chromium/content/browser/gpu/OWNERS new file mode 100644 index 00000000000..92ac85812fe --- /dev/null +++ b/chromium/content/browser/gpu/OWNERS @@ -0,0 +1,4 @@ +apatrick@chromium.org +kbr@chromium.org +piman@chromium.org +zmo@chromium.org diff --git a/chromium/content/browser/gpu/browser_gpu_channel_host_factory.cc b/chromium/content/browser/gpu/browser_gpu_channel_host_factory.cc new file mode 100644 index 00000000000..7bc6fca71e6 --- /dev/null +++ b/chromium/content/browser/gpu/browser_gpu_channel_host_factory.cc @@ -0,0 +1,321 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/browser_gpu_channel_host_factory.h" + +#include "base/bind.h" +#include "base/threading/thread_restrictions.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "content/browser/gpu/gpu_process_host.h" +#include "content/browser/gpu/gpu_surface_tracker.h" +#include "content/common/gpu/gpu_messages.h" +#include "content/common/child_process_host_impl.h" +#include "content/public/browser/browser_thread.h" +#include "content/public/common/content_client.h" +#include "ipc/ipc_forwarding_message_filter.h" + +namespace content { + +BrowserGpuChannelHostFactory* BrowserGpuChannelHostFactory::instance_ = NULL; + +BrowserGpuChannelHostFactory::CreateRequest::CreateRequest() + : event(false, false), + gpu_host_id(0), + route_id(MSG_ROUTING_NONE) { +} + +BrowserGpuChannelHostFactory::CreateRequest::~CreateRequest() { +} + +BrowserGpuChannelHostFactory::EstablishRequest::EstablishRequest( + CauseForGpuLaunch cause) + : event(false, false), + cause_for_gpu_launch(cause), + gpu_host_id(0), + reused_gpu_process(true) { +} + +BrowserGpuChannelHostFactory::EstablishRequest::~EstablishRequest() { +} + +void BrowserGpuChannelHostFactory::Initialize() { + instance_ = new BrowserGpuChannelHostFactory(); +} + +void BrowserGpuChannelHostFactory::Terminate() { + delete instance_; + instance_ = NULL; +} + +BrowserGpuChannelHostFactory::BrowserGpuChannelHostFactory() + : gpu_client_id_(ChildProcessHostImpl::GenerateChildProcessUniqueId()), + shutdown_event_(new base::WaitableEvent(true, false)), + gpu_host_id_(0) { +} + +BrowserGpuChannelHostFactory::~BrowserGpuChannelHostFactory() { + shutdown_event_->Signal(); +} + +bool BrowserGpuChannelHostFactory::IsMainThread() { + return BrowserThread::CurrentlyOn(BrowserThread::UI); +} + +base::MessageLoop* BrowserGpuChannelHostFactory::GetMainLoop() { + return BrowserThread::UnsafeGetMessageLoopForThread(BrowserThread::UI); +} + +scoped_refptr<base::MessageLoopProxy> +BrowserGpuChannelHostFactory::GetIOLoopProxy() { + return BrowserThread::GetMessageLoopProxyForThread(BrowserThread::IO); +} + +base::WaitableEvent* BrowserGpuChannelHostFactory::GetShutDownEvent() { + return shutdown_event_.get(); +} + +scoped_ptr<base::SharedMemory> +BrowserGpuChannelHostFactory::AllocateSharedMemory(size_t size) { + scoped_ptr<base::SharedMemory> shm(new base::SharedMemory()); + if (!shm->CreateAnonymous(size)) + return scoped_ptr<base::SharedMemory>(); + return shm.Pass(); +} + +void BrowserGpuChannelHostFactory::CreateViewCommandBufferOnIO( + CreateRequest* request, + int32 surface_id, + const GPUCreateCommandBufferConfig& init_params) { + GpuProcessHost* host = GpuProcessHost::FromID(gpu_host_id_); + if (!host) { + request->event.Signal(); + return; + } + + gfx::GLSurfaceHandle surface = + GpuSurfaceTracker::Get()->GetSurfaceHandle(surface_id); + + host->CreateViewCommandBuffer( + surface, + surface_id, + gpu_client_id_, + init_params, + base::Bind(&BrowserGpuChannelHostFactory::CommandBufferCreatedOnIO, + request)); +} + +// static +void BrowserGpuChannelHostFactory::CommandBufferCreatedOnIO( + CreateRequest* request, int32 route_id) { + request->route_id = route_id; + request->event.Signal(); +} + +int32 BrowserGpuChannelHostFactory::CreateViewCommandBuffer( + int32 surface_id, + const GPUCreateCommandBufferConfig& init_params) { + CreateRequest request; + GetIOLoopProxy()->PostTask(FROM_HERE, base::Bind( + &BrowserGpuChannelHostFactory::CreateViewCommandBufferOnIO, + base::Unretained(this), + &request, + surface_id, + init_params)); + // We're blocking the UI thread, which is generally undesirable. + // In this case we need to wait for this before we can show any UI /anyway/, + // so it won't cause additional jank. + // TODO(piman): Make this asynchronous (http://crbug.com/125248). + base::ThreadRestrictions::ScopedAllowWait allow_wait; + request.event.Wait(); + return request.route_id; +} + +void BrowserGpuChannelHostFactory::CreateImageOnIO( + gfx::PluginWindowHandle window, + int32 image_id, + const CreateImageCallback& callback) { + GpuProcessHost* host = GpuProcessHost::FromID(gpu_host_id_); + if (!host) { + ImageCreatedOnIO(callback, gfx::Size()); + return; + } + + host->CreateImage( + window, + gpu_client_id_, + image_id, + base::Bind(&BrowserGpuChannelHostFactory::ImageCreatedOnIO, callback)); +} + +// static +void BrowserGpuChannelHostFactory::ImageCreatedOnIO( + const CreateImageCallback& callback, const gfx::Size size) { + BrowserThread::PostTask( + BrowserThread::UI, + FROM_HERE, + base::Bind(&BrowserGpuChannelHostFactory::OnImageCreated, + callback, size)); +} + +// static +void BrowserGpuChannelHostFactory::OnImageCreated( + const CreateImageCallback& callback, const gfx::Size size) { + callback.Run(size); +} + +void BrowserGpuChannelHostFactory::CreateImage( + gfx::PluginWindowHandle window, + int32 image_id, + const CreateImageCallback& callback) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + GetIOLoopProxy()->PostTask(FROM_HERE, base::Bind( + &BrowserGpuChannelHostFactory::CreateImageOnIO, + base::Unretained(this), + window, + image_id, + callback)); +} + +void BrowserGpuChannelHostFactory::DeleteImageOnIO( + int32 image_id, int32 sync_point) { + GpuProcessHost* host = GpuProcessHost::FromID(gpu_host_id_); + if (!host) { + return; + } + + host->DeleteImage(gpu_client_id_, image_id, sync_point); +} + +void BrowserGpuChannelHostFactory::DeleteImage( + int32 image_id, int32 sync_point) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + GetIOLoopProxy()->PostTask(FROM_HERE, base::Bind( + &BrowserGpuChannelHostFactory::DeleteImageOnIO, + base::Unretained(this), + image_id, + sync_point)); +} + +void BrowserGpuChannelHostFactory::EstablishGpuChannelOnIO( + EstablishRequest* request) { + GpuProcessHost* host = GpuProcessHost::FromID(gpu_host_id_); + if (!host) { + host = GpuProcessHost::Get(GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED, + request->cause_for_gpu_launch); + if (!host) { + request->event.Signal(); + return; + } + gpu_host_id_ = host->host_id(); + request->reused_gpu_process = false; + } else { + if (host->host_id() == request->gpu_host_id) { + // We come here if we retried to establish the channel because of a + // failure in GpuChannelEstablishedOnIO, but we ended up with the same + // process ID, meaning the failure was not because of a channel error, but + // another reason. So fail now. + request->event.Signal(); + return; + } + request->reused_gpu_process = true; + } + request->gpu_host_id = gpu_host_id_; + + host->EstablishGpuChannel( + gpu_client_id_, + true, + base::Bind(&BrowserGpuChannelHostFactory::GpuChannelEstablishedOnIO, + base::Unretained(this), + request)); +} + +void BrowserGpuChannelHostFactory::GpuChannelEstablishedOnIO( + EstablishRequest* request, + const IPC::ChannelHandle& channel_handle, + const gpu::GPUInfo& gpu_info) { + if (channel_handle.name.empty() && request->reused_gpu_process) { + // We failed after re-using the GPU process, but it may have died in the + // mean time. Retry to have a chance to create a fresh GPU process. + EstablishGpuChannelOnIO(request); + } else { + request->channel_handle = channel_handle; + request->gpu_info = gpu_info; + request->event.Signal(); + } +} + +GpuChannelHost* BrowserGpuChannelHostFactory::EstablishGpuChannelSync( + CauseForGpuLaunch cause_for_gpu_launch) { + if (gpu_channel_.get()) { + // Recreate the channel if it has been lost. + if (gpu_channel_->IsLost()) + gpu_channel_ = NULL; + else + return gpu_channel_.get(); + } + // Ensure initialization on the main thread. + GpuDataManagerImpl::GetInstance(); + + EstablishRequest request(cause_for_gpu_launch); + GetIOLoopProxy()->PostTask( + FROM_HERE, + base::Bind( + &BrowserGpuChannelHostFactory::EstablishGpuChannelOnIO, + base::Unretained(this), + &request)); + + { + // We're blocking the UI thread, which is generally undesirable. + // In this case we need to wait for this before we can show any UI /anyway/, + // so it won't cause additional jank. + // TODO(piman): Make this asynchronous (http://crbug.com/125248). + base::ThreadRestrictions::ScopedAllowWait allow_wait; + request.event.Wait(); + } + + if (request.channel_handle.name.empty()) + return NULL; + + GetContentClient()->SetGpuInfo(request.gpu_info); + gpu_channel_ = GpuChannelHost::Create( + this, request.gpu_host_id, gpu_client_id_, + request.gpu_info, request.channel_handle); + return gpu_channel_.get(); +} + +// static +void BrowserGpuChannelHostFactory::AddFilterOnIO( + int host_id, + scoped_refptr<IPC::ChannelProxy::MessageFilter> filter) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + + GpuProcessHost* host = GpuProcessHost::FromID(host_id); + if (host) + host->AddFilter(filter.get()); +} + +void BrowserGpuChannelHostFactory::SetHandlerForControlMessages( + const uint32* message_ids, + size_t num_messages, + const base::Callback<void(const IPC::Message&)>& handler, + base::TaskRunner* target_task_runner) { + DCHECK(gpu_host_id_) + << "Do not call" + << " BrowserGpuChannelHostFactory::SetHandlerForControlMessages()" + << " until the GpuProcessHost has been set up."; + + scoped_refptr<IPC::ForwardingMessageFilter> filter = + new IPC::ForwardingMessageFilter(message_ids, + num_messages, + target_task_runner); + filter->AddRoute(MSG_ROUTING_CONTROL, handler); + + GetIOLoopProxy()->PostTask( + FROM_HERE, + base::Bind(&BrowserGpuChannelHostFactory::AddFilterOnIO, + gpu_host_id_, + filter)); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/browser_gpu_channel_host_factory.h b/chromium/content/browser/gpu/browser_gpu_channel_host_factory.h new file mode 100644 index 00000000000..ca311839d9c --- /dev/null +++ b/chromium/content/browser/gpu/browser_gpu_channel_host_factory.h @@ -0,0 +1,110 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_BROWSER_GPU_CHANNEL_HOST_FACTORY_H_ +#define CONTENT_BROWSER_GPU_BROWSER_GPU_CHANNEL_HOST_FACTORY_H_ + +#include "base/memory/ref_counted.h" +#include "base/memory/scoped_ptr.h" +#include "base/process/process.h" +#include "base/synchronization/waitable_event.h" +#include "content/common/gpu/client/gpu_channel_host.h" +#include "ipc/ipc_channel_handle.h" + +namespace content { + +class CONTENT_EXPORT BrowserGpuChannelHostFactory + : public GpuChannelHostFactory { + public: + static void Initialize(); + static void Terminate(); + static BrowserGpuChannelHostFactory* instance() { return instance_; } + + // GpuChannelHostFactory implementation. + virtual bool IsMainThread() OVERRIDE; + virtual base::MessageLoop* GetMainLoop() OVERRIDE; + virtual scoped_refptr<base::MessageLoopProxy> GetIOLoopProxy() OVERRIDE; + virtual base::WaitableEvent* GetShutDownEvent() OVERRIDE; + virtual scoped_ptr<base::SharedMemory> AllocateSharedMemory( + size_t size) OVERRIDE; + virtual int32 CreateViewCommandBuffer( + int32 surface_id, + const GPUCreateCommandBufferConfig& init_params) OVERRIDE; + virtual void CreateImage( + gfx::PluginWindowHandle window, + int32 image_id, + const CreateImageCallback& callback) OVERRIDE; + virtual void DeleteImage(int32 image_idu, int32 sync_point) OVERRIDE; + virtual GpuChannelHost* EstablishGpuChannelSync( + CauseForGpuLaunch cause_for_gpu_launch) OVERRIDE; + + // Specify a task runner and callback to be used for a set of messages. The + // callback will be set up on the current GpuProcessHost, identified by + // GpuProcessHostId(). + virtual void SetHandlerForControlMessages( + const uint32* message_ids, + size_t num_messages, + const base::Callback<void(const IPC::Message&)>& handler, + base::TaskRunner* target_task_runner); + int GpuProcessHostId() { return gpu_host_id_; } + + private: + struct CreateRequest { + CreateRequest(); + ~CreateRequest(); + base::WaitableEvent event; + int gpu_host_id; + int32 route_id; + }; + + struct EstablishRequest { + explicit EstablishRequest(CauseForGpuLaunch); + ~EstablishRequest(); + base::WaitableEvent event; + CauseForGpuLaunch cause_for_gpu_launch; + int gpu_host_id; + bool reused_gpu_process; + IPC::ChannelHandle channel_handle; + gpu::GPUInfo gpu_info; + }; + + BrowserGpuChannelHostFactory(); + virtual ~BrowserGpuChannelHostFactory(); + + void CreateViewCommandBufferOnIO( + CreateRequest* request, + int32 surface_id, + const GPUCreateCommandBufferConfig& init_params); + static void CommandBufferCreatedOnIO(CreateRequest* request, int32 route_id); + void CreateImageOnIO( + gfx::PluginWindowHandle window, + int32 image_id, + const CreateImageCallback& callback); + static void ImageCreatedOnIO( + const CreateImageCallback& callback, const gfx::Size size); + static void OnImageCreated( + const CreateImageCallback& callback, const gfx::Size size); + void DeleteImageOnIO(int32 image_id, int32 sync_point); + void EstablishGpuChannelOnIO(EstablishRequest* request); + void GpuChannelEstablishedOnIO( + EstablishRequest* request, + const IPC::ChannelHandle& channel_handle, + const gpu::GPUInfo& gpu_info); + static void AddFilterOnIO( + int gpu_host_id, + scoped_refptr<IPC::ChannelProxy::MessageFilter> filter); + + int gpu_client_id_; + scoped_ptr<base::WaitableEvent> shutdown_event_; + scoped_refptr<GpuChannelHost> gpu_channel_; + int gpu_host_id_; + + static BrowserGpuChannelHostFactory* instance_; + + DISALLOW_COPY_AND_ASSIGN(BrowserGpuChannelHostFactory); +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_BROWSER_GPU_CHANNEL_HOST_FACTORY_H_ diff --git a/chromium/content/browser/gpu/compositor_util.cc b/chromium/content/browser/gpu/compositor_util.cc new file mode 100644 index 00000000000..a0cf1e21a2b --- /dev/null +++ b/chromium/content/browser/gpu/compositor_util.cc @@ -0,0 +1,117 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/public/browser/compositor_util.h" + +#include "base/command_line.h" +#include "base/metrics/field_trial.h" +#include "content/public/browser/gpu_data_manager.h" +#include "content/public/common/content_constants.h" +#include "content/public/common/content_switches.h" +#include "gpu/config/gpu_feature_type.h" + +namespace content { + +namespace { + +bool CanDoAcceleratedCompositing() { + const GpuDataManager* manager = GpuDataManager::GetInstance(); + + // Don't run the field trial if gpu access has been blocked or + // accelerated compositing is blacklisted. + if (!manager->GpuAccessAllowed(NULL) || + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING)) + return false; + + // Check for SwiftShader. + if (manager->ShouldUseSwiftShader()) + return false; + + const CommandLine& command_line = *CommandLine::ForCurrentProcess(); + if (command_line.HasSwitch(switches::kDisableAcceleratedCompositing)) + return false; + + return true; +} + +bool IsForceCompositingModeBlacklisted() { + return GpuDataManager::GetInstance()->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_FORCE_COMPOSITING_MODE); +} + +} // namespace + +bool IsThreadedCompositingEnabled() { +#if defined(OS_WIN) && defined(USE_AURA) + // We always want compositing on Aura Windows. + return true; +#endif + + if (!CanDoAcceleratedCompositing()) + return false; + + const CommandLine& command_line = *CommandLine::ForCurrentProcess(); + + // Command line switches take precedence over blacklist and field trials. + if (command_line.HasSwitch(switches::kDisableForceCompositingMode) || + command_line.HasSwitch(switches::kDisableThreadedCompositing)) + return false; + +#if defined(OS_CHROMEOS) + // We always want threaded compositing on ChromeOS unless it's explicitly + // disabled above. + return true; +#endif + + if (command_line.HasSwitch(switches::kEnableThreadedCompositing)) + return true; + + if (IsForceCompositingModeBlacklisted()) + return false; + + base::FieldTrial* trial = + base::FieldTrialList::Find(kGpuCompositingFieldTrialName); + return trial && + trial->group_name() == kGpuCompositingFieldTrialThreadEnabledName; +} + +bool IsForceCompositingModeEnabled() { +#if defined(OS_WIN) && defined(USE_AURA) + // We always want compositing on Aura Windows. + return true; +#endif + + if (!CanDoAcceleratedCompositing()) + return false; + + const CommandLine& command_line = *CommandLine::ForCurrentProcess(); + + // Command line switches take precedence over blacklisting and field trials. + if (command_line.HasSwitch(switches::kDisableForceCompositingMode)) + return false; + +#if defined(OS_CHROMEOS) + // We always want compositing ChromeOS unless it's explicitly disabled above. + return true; +#endif + + if (command_line.HasSwitch(switches::kForceCompositingMode)) + return true; + + if (IsForceCompositingModeBlacklisted()) + return false; + + base::FieldTrial* trial = + base::FieldTrialList::Find(kGpuCompositingFieldTrialName); + + // Force compositing is enabled in both the force compositing + // and threaded compositing mode field trials. + return trial && + (trial->group_name() == + kGpuCompositingFieldTrialForceCompositingEnabledName || + trial->group_name() == kGpuCompositingFieldTrialThreadEnabledName); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/generate_webgl_conformance_test_list.py b/chromium/content/browser/gpu/generate_webgl_conformance_test_list.py new file mode 100755 index 00000000000..8d5854ab52d --- /dev/null +++ b/chromium/content/browser/gpu/generate_webgl_conformance_test_list.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Auto-generates the WebGL conformance test list header file. + +Parses the WebGL conformance test *.txt file, which contains a list of URLs +for individual conformance tests (each on a new line). It recursively parses +*.txt files. For each test URL, the matching gtest call is created and +sent to the C++ header file. +""" + +import getopt +import os +import re +import sys + +COPYRIGHT = """\ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +""" +WARNING = """\ +// DO NOT EDIT! This file is auto-generated by +// generate_webgl_conformance_test_list.py +// It is included by webgl_conformance_test.cc + +""" +HEADER_GUARD = """\ +#ifndef CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ +#define CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ + +""" +HEADER_GUARD_END = """ +#endif // CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ + +""" + +# Assume this script is run from the src/content/test/gpu directory. +INPUT_DIR = "../../../third_party/webgl_conformance" +INPUT_FILE = "00_test_list.txt" +OUTPUT_FILE = "webgl_conformance_test_list_autogen.h" + +def main(argv): + """Main function for the WebGL conformance test list generator. + """ + if not os.path.exists(os.path.join(INPUT_DIR, INPUT_FILE)): + print >> sys.stderr, "ERROR: WebGL conformance tests do not exist." + print >> sys.stderr, "Run the script from the directory containing it." + return 1 + + output = open(OUTPUT_FILE, "w") + output.write(COPYRIGHT) + output.write(WARNING) + output.write(HEADER_GUARD) + + test_prefix = {} + + unparsed_files = [INPUT_FILE] + while unparsed_files: + filename = unparsed_files.pop(0) + try: + input = open(os.path.join(INPUT_DIR, filename)) + except IOError: + print >> sys.stderr, "WARNING: %s does not exist (skipped)." % filename + continue + + for url in input: + url = re.sub("//.*", "", url) + url = re.sub("#.*", "", url) + url = url.strip() + # Some filename has options before them, for example, + # --min-version 1.0.2 testname.html + pos = url.rfind(" ") + if pos != -1: + url = url[pos+1:] + + if not url: + continue + + # Cannot use os.path.join() because Windows with use "\\" but this path + # is sent through javascript. + if os.path.dirname(filename): + url = "%s/%s" % (os.path.dirname(filename), url) + + # Queue all text files for parsing, because test list URLs are nested + # through .txt files. + if re.match(".+\.txt\s*$", url): + unparsed_files.append(url) + + # Convert the filename to a valid test name and output the gtest code. + else: + name = os.path.splitext(url)[0] + name = re.sub("\W+", "_", name) + if os.path.exists(os.path.join(INPUT_DIR, url)): + output.write('CONFORMANCE_TEST(%s,\n "%s");\n' % (name, url)) + else: + print >> sys.stderr, "WARNING: %s does not exist (skipped)." % url + input.close() + + output.write(HEADER_GUARD_END) + output.close() + return 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/chromium/content/browser/gpu/gpu_crash_browsertest.cc b/chromium/content/browser/gpu/gpu_crash_browsertest.cc new file mode 100644 index 00000000000..eafbc689cd0 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_crash_browsertest.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/path_service.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "content/browser/gpu/gpu_process_host_ui_shim.h" +#include "content/public/browser/notification_service.h" +#include "content/public/browser/notification_types.h" +#include "content/public/common/content_paths.h" +#include "content/public/test/browser_test_utils.h" +#include "content/public/test/test_utils.h" +#include "content/shell/shell.h" +#include "content/test/content_browser_test.h" +#include "content/test/content_browser_test_utils.h" + +namespace content { +class GpuCrashTest : public ContentBrowserTest { + protected: + virtual void SetUpInProcessBrowserTestFixture() OVERRIDE { + base::FilePath test_dir; + ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &test_dir)); + gpu_test_dir_ = test_dir.AppendASCII("gpu"); + } + base::FilePath gpu_test_dir_; +}; + +#if defined(OS_LINUX) && !defined(NDEBUG) +// http://crbug.com/254724 +#define IF_NOT_DEBUG_LINUX(x) DISABLED_ ## x +#else +#define IF_NOT_DEBUG_LINUX(x) x +#endif + +IN_PROC_BROWSER_TEST_F(GpuCrashTest, IF_NOT_DEBUG_LINUX(MANUAL_Kill)) { + DOMMessageQueue message_queue; + + content::GpuDataManagerImpl::GetInstance()-> + DisableDomainBlockingFor3DAPIsForTesting(); + + // Load page and wait for it to load. + content::WindowedNotificationObserver observer( + content::NOTIFICATION_LOAD_STOP, + content::NotificationService::AllSources()); + NavigateToURL( + shell(), + GetFileUrlWithQuery( + gpu_test_dir_.AppendASCII("webgl.html"), "query=kill")); + observer.Wait(); + + GpuProcessHostUIShim* host = + GpuProcessHostUIShim::GetOneInstance(); + ASSERT_TRUE(host); + host->SimulateCrash(); + + std::string m; + ASSERT_TRUE(message_queue.WaitForMessage(&m)); + EXPECT_EQ("\"SUCCESS\"", m); +} + +IN_PROC_BROWSER_TEST_F(GpuCrashTest, + IF_NOT_DEBUG_LINUX(MANUAL_WebkitLoseContext)) { + DOMMessageQueue message_queue; + + NavigateToURL( + shell(), + GetFileUrlWithQuery( + gpu_test_dir_.AppendASCII("webgl.html"), + "query=WEBGL_lose_context")); + + std::string m; + ASSERT_TRUE(message_queue.WaitForMessage(&m)); + EXPECT_EQ("\"SUCCESS\"", m); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_data_manager_impl.cc b/chromium/content/browser/gpu/gpu_data_manager_impl.cc new file mode 100644 index 00000000000..747503cedfe --- /dev/null +++ b/chromium/content/browser/gpu/gpu_data_manager_impl.cc @@ -0,0 +1,272 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/gpu_data_manager_impl.h" + +#include "content/browser/gpu/gpu_data_manager_impl_private.h" + +namespace content { + +// static +GpuDataManager* GpuDataManager::GetInstance() { + return GpuDataManagerImpl::GetInstance(); +} + +// static +GpuDataManagerImpl* GpuDataManagerImpl::GetInstance() { + return Singleton<GpuDataManagerImpl>::get(); +} + +void GpuDataManagerImpl::InitializeForTesting( + const std::string& gpu_blacklist_json, const gpu::GPUInfo& gpu_info) { + base::AutoLock auto_lock(lock_); + private_->InitializeForTesting(gpu_blacklist_json, gpu_info); +} + +bool GpuDataManagerImpl::IsFeatureBlacklisted(int feature) const { + base::AutoLock auto_lock(lock_); + return private_->IsFeatureBlacklisted(feature); +} + +gpu::GPUInfo GpuDataManagerImpl::GetGPUInfo() const { + base::AutoLock auto_lock(lock_); + return private_->GetGPUInfo(); +} + +void GpuDataManagerImpl::GetGpuProcessHandles( + const GetGpuProcessHandlesCallback& callback) const { + base::AutoLock auto_lock(lock_); + private_->GetGpuProcessHandles(callback); +} + +bool GpuDataManagerImpl::GpuAccessAllowed(std::string* reason) const { + base::AutoLock auto_lock(lock_); + return private_->GpuAccessAllowed(reason); +} + +void GpuDataManagerImpl::RequestCompleteGpuInfoIfNeeded() { + base::AutoLock auto_lock(lock_); + private_->RequestCompleteGpuInfoIfNeeded(); +} + +bool GpuDataManagerImpl::IsCompleteGpuInfoAvailable() const { + base::AutoLock auto_lock(lock_); + return private_->IsCompleteGpuInfoAvailable(); +} + +void GpuDataManagerImpl::RequestVideoMemoryUsageStatsUpdate() const { + base::AutoLock auto_lock(lock_); + private_->RequestVideoMemoryUsageStatsUpdate(); +} + +bool GpuDataManagerImpl::ShouldUseSwiftShader() const { + base::AutoLock auto_lock(lock_); + return private_->ShouldUseSwiftShader(); +} + +void GpuDataManagerImpl::RegisterSwiftShaderPath( + const base::FilePath& path) { + base::AutoLock auto_lock(lock_); + private_->RegisterSwiftShaderPath(path); +} + +void GpuDataManagerImpl::AddObserver( + GpuDataManagerObserver* observer) { + base::AutoLock auto_lock(lock_); + private_->AddObserver(observer); +} + +void GpuDataManagerImpl::RemoveObserver( + GpuDataManagerObserver* observer) { + base::AutoLock auto_lock(lock_); + private_->RemoveObserver(observer); +} + +void GpuDataManagerImpl::UnblockDomainFrom3DAPIs(const GURL& url) { + base::AutoLock auto_lock(lock_); + private_->UnblockDomainFrom3DAPIs(url); +} + +void GpuDataManagerImpl::DisableGpuWatchdog() { + base::AutoLock auto_lock(lock_); + private_->DisableGpuWatchdog(); +} + +void GpuDataManagerImpl::SetGLStrings(const std::string& gl_vendor, + const std::string& gl_renderer, + const std::string& gl_version) { + base::AutoLock auto_lock(lock_); + private_->SetGLStrings(gl_vendor, gl_renderer, gl_version); +} + +void GpuDataManagerImpl::GetGLStrings(std::string* gl_vendor, + std::string* gl_renderer, + std::string* gl_version) { + base::AutoLock auto_lock(lock_); + private_->GetGLStrings(gl_vendor, gl_renderer, gl_version); +} + +void GpuDataManagerImpl::DisableHardwareAcceleration() { + base::AutoLock auto_lock(lock_); + private_->DisableHardwareAcceleration(); +} + +void GpuDataManagerImpl::Initialize() { + base::AutoLock auto_lock(lock_); + private_->Initialize(); +} + +void GpuDataManagerImpl::UpdateGpuInfo(const gpu::GPUInfo& gpu_info) { + base::AutoLock auto_lock(lock_); + private_->UpdateGpuInfo(gpu_info); +} + +void GpuDataManagerImpl::UpdateVideoMemoryUsageStats( + const GPUVideoMemoryUsageStats& video_memory_usage_stats) { + base::AutoLock auto_lock(lock_); + private_->UpdateVideoMemoryUsageStats(video_memory_usage_stats); +} + +void GpuDataManagerImpl::AppendRendererCommandLine( + CommandLine* command_line) const { + base::AutoLock auto_lock(lock_); + private_->AppendRendererCommandLine(command_line); +} + +void GpuDataManagerImpl::AppendGpuCommandLine( + CommandLine* command_line) const { + base::AutoLock auto_lock(lock_); + private_->AppendGpuCommandLine(command_line); +} + +void GpuDataManagerImpl::AppendPluginCommandLine( + CommandLine* command_line) const { + base::AutoLock auto_lock(lock_); + private_->AppendPluginCommandLine(command_line); +} + +void GpuDataManagerImpl::UpdateRendererWebPrefs( + WebPreferences* prefs) const { + base::AutoLock auto_lock(lock_); + private_->UpdateRendererWebPrefs(prefs); +} + +gpu::GpuSwitchingOption GpuDataManagerImpl::GetGpuSwitchingOption() const { + base::AutoLock auto_lock(lock_); + return private_->GetGpuSwitchingOption(); +} + +std::string GpuDataManagerImpl::GetBlacklistVersion() const { + base::AutoLock auto_lock(lock_); + return private_->GetBlacklistVersion(); +} + +std::string GpuDataManagerImpl::GetDriverBugListVersion() const { + base::AutoLock auto_lock(lock_); + return private_->GetDriverBugListVersion(); +} + +void GpuDataManagerImpl::GetBlacklistReasons(base::ListValue* reasons) const { + base::AutoLock auto_lock(lock_); + private_->GetBlacklistReasons(reasons); +} + +void GpuDataManagerImpl::GetDriverBugWorkarounds( + base::ListValue* workarounds) const { + base::AutoLock auto_lock(lock_); + private_->GetDriverBugWorkarounds(workarounds); +} + +void GpuDataManagerImpl::AddLogMessage(int level, + const std::string& header, + const std::string& message) { + base::AutoLock auto_lock(lock_); + private_->AddLogMessage(level, header, message); +} + +void GpuDataManagerImpl::ProcessCrashed( + base::TerminationStatus exit_code) { + base::AutoLock auto_lock(lock_); + private_->ProcessCrashed(exit_code); +} + +base::ListValue* GpuDataManagerImpl::GetLogMessages() const { + base::AutoLock auto_lock(lock_); + return private_->GetLogMessages(); +} + +void GpuDataManagerImpl::HandleGpuSwitch() { + base::AutoLock auto_lock(lock_); + private_->HandleGpuSwitch(); +} + +#if defined(OS_WIN) +bool GpuDataManagerImpl::IsUsingAcceleratedSurface() const { + base::AutoLock auto_lock(lock_); + return private_->IsUsingAcceleratedSurface(); +} +#endif + +bool GpuDataManagerImpl::CanUseGpuBrowserCompositor() const { + base::AutoLock auto_lock(lock_); + return private_->CanUseGpuBrowserCompositor(); +} + +void GpuDataManagerImpl::BlockDomainFrom3DAPIs( + const GURL& url, DomainGuilt guilt) { + base::AutoLock auto_lock(lock_); + private_->BlockDomainFrom3DAPIs(url, guilt); +} + +bool GpuDataManagerImpl::Are3DAPIsBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester) { + base::AutoLock auto_lock(lock_); + return private_->Are3DAPIsBlocked( + url, render_process_id, render_view_id, requester); +} + +void GpuDataManagerImpl::DisableDomainBlockingFor3DAPIsForTesting() { + base::AutoLock auto_lock(lock_); + private_->DisableDomainBlockingFor3DAPIsForTesting(); +} + +size_t GpuDataManagerImpl::GetBlacklistedFeatureCount() const { + base::AutoLock auto_lock(lock_); + return private_->GetBlacklistedFeatureCount(); +} + +void GpuDataManagerImpl::SetDisplayCount(unsigned int display_count) { + base::AutoLock auto_lock(lock_); + private_->SetDisplayCount(display_count); +} + +unsigned int GpuDataManagerImpl::GetDisplayCount() const { + base::AutoLock auto_lock(lock_); + return private_->GetDisplayCount(); +} + +void GpuDataManagerImpl::Notify3DAPIBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester) { + base::AutoLock auto_lock(lock_); + private_->Notify3DAPIBlocked( + url, render_process_id, render_view_id, requester); +} + +void GpuDataManagerImpl::OnGpuProcessInitFailure() { + base::AutoLock auto_lock(lock_); + private_->OnGpuProcessInitFailure(); +} + +GpuDataManagerImpl::GpuDataManagerImpl() + : private_(GpuDataManagerImplPrivate::Create(this)) { +} + +GpuDataManagerImpl::~GpuDataManagerImpl() { +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_data_manager_impl.h b/chromium/content/browser/gpu/gpu_data_manager_impl.h new file mode 100644 index 00000000000..497d3bdf40b --- /dev/null +++ b/chromium/content/browser/gpu/gpu_data_manager_impl.h @@ -0,0 +1,220 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_GPU_DATA_MANAGER_IMPL_H_ +#define CONTENT_BROWSER_GPU_GPU_DATA_MANAGER_IMPL_H_ + +#include <string> + +#include "base/compiler_specific.h" +#include "base/files/file_path.h" +#include "base/gtest_prod_util.h" +#include "base/logging.h" +#include "base/memory/scoped_ptr.h" +#include "base/memory/singleton.h" +#include "base/process/kill.h" +#include "base/synchronization/lock.h" +#include "base/time/time.h" +#include "base/values.h" +#include "content/public/browser/gpu_data_manager.h" +#include "content/public/common/gpu_memory_stats.h" +#include "content/public/common/three_d_api_types.h" +#include "gpu/config/gpu_info.h" +#include "gpu/config/gpu_switching_option.h" + +class CommandLine; +class GURL; +struct WebPreferences; + +namespace content { + +class GpuDataManagerImplPrivate; + +class CONTENT_EXPORT GpuDataManagerImpl + : public NON_EXPORTED_BASE(GpuDataManager) { + public: + // Indicates the guilt level of a domain which caused a GPU reset. + // If a domain is 100% known to be guilty of resetting the GPU, then + // it will generally not cause other domains' use of 3D APIs to be + // blocked, unless system stability would be compromised. + enum DomainGuilt { + DOMAIN_GUILT_KNOWN, + DOMAIN_GUILT_UNKNOWN + }; + + // Indicates the reason that access to a given client API (like + // WebGL or Pepper 3D) was blocked or not. This state is distinct + // from blacklisting of an entire feature. + enum DomainBlockStatus { + DOMAIN_BLOCK_STATUS_BLOCKED, + DOMAIN_BLOCK_STATUS_ALL_DOMAINS_BLOCKED, + DOMAIN_BLOCK_STATUS_NOT_BLOCKED + }; + + // Getter for the singleton. This will return NULL on failure. + static GpuDataManagerImpl* GetInstance(); + + // GpuDataManager implementation. + virtual void InitializeForTesting( + const std::string& gpu_blacklist_json, + const gpu::GPUInfo& gpu_info) OVERRIDE; + virtual bool IsFeatureBlacklisted(int feature) const OVERRIDE; + virtual gpu::GPUInfo GetGPUInfo() const OVERRIDE; + virtual void GetGpuProcessHandles( + const GetGpuProcessHandlesCallback& callback) const OVERRIDE; + virtual bool GpuAccessAllowed(std::string* reason) const OVERRIDE; + virtual void RequestCompleteGpuInfoIfNeeded() OVERRIDE; + virtual bool IsCompleteGpuInfoAvailable() const OVERRIDE; + virtual void RequestVideoMemoryUsageStatsUpdate() const OVERRIDE; + virtual bool ShouldUseSwiftShader() const OVERRIDE; + virtual void RegisterSwiftShaderPath(const base::FilePath& path) OVERRIDE; + virtual void AddObserver(GpuDataManagerObserver* observer) OVERRIDE; + virtual void RemoveObserver(GpuDataManagerObserver* observer) OVERRIDE; + virtual void UnblockDomainFrom3DAPIs(const GURL& url) OVERRIDE; + virtual void DisableGpuWatchdog() OVERRIDE; + virtual void SetGLStrings(const std::string& gl_vendor, + const std::string& gl_renderer, + const std::string& gl_version) OVERRIDE; + virtual void GetGLStrings(std::string* gl_vendor, + std::string* gl_renderer, + std::string* gl_version) OVERRIDE; + virtual void DisableHardwareAcceleration() OVERRIDE; + + // This collects preliminary GPU info, load GpuBlacklist, and compute the + // preliminary blacklisted features; it should only be called at browser + // startup time in UI thread before the IO restriction is turned on. + void Initialize(); + + // Only update if the current GPUInfo is not finalized. If blacklist is + // loaded, run through blacklist and update blacklisted features. + void UpdateGpuInfo(const gpu::GPUInfo& gpu_info); + + void UpdateVideoMemoryUsageStats( + const GPUVideoMemoryUsageStats& video_memory_usage_stats); + + // Insert disable-feature switches corresponding to preliminary gpu feature + // flags into the renderer process command line. + void AppendRendererCommandLine(CommandLine* command_line) const; + + // Insert switches into gpu process command line: kUseGL, + // kDisableGLMultisampling. + void AppendGpuCommandLine(CommandLine* command_line) const; + + // Insert switches into plugin process command line: + // kDisableCoreAnimationPlugins. + void AppendPluginCommandLine(CommandLine* command_line) const; + + // Update WebPreferences for renderer based on blacklisting decisions. + void UpdateRendererWebPrefs(WebPreferences* prefs) const; + + gpu::GpuSwitchingOption GetGpuSwitchingOption() const; + + std::string GetBlacklistVersion() const; + std::string GetDriverBugListVersion() const; + + // Returns the reasons for the latest run of blacklisting decisions. + // For the structure of returned value, see documentation for + // GpuBlacklist::GetBlacklistedReasons(). + void GetBlacklistReasons(base::ListValue* reasons) const; + + // Returns the workarounds that are applied to the current system as + // a list of strings. + void GetDriverBugWorkarounds(base::ListValue* workarounds) const; + + void AddLogMessage(int level, + const std::string& header, + const std::string& message); + + void ProcessCrashed(base::TerminationStatus exit_code); + + // Returns a new copy of the ListValue. Caller is responsible to release + // the returned value. + base::ListValue* GetLogMessages() const; + + // Called when switching gpu. + void HandleGpuSwitch(); + +#if defined(OS_WIN) + // Is the GPU process using the accelerated surface to present, instead of + // presenting by itself. + bool IsUsingAcceleratedSurface() const; +#endif + + bool CanUseGpuBrowserCompositor() const; + + // Maintenance of domains requiring explicit user permission before + // using client-facing 3D APIs (WebGL, Pepper 3D), either because + // the domain has caused the GPU to reset, or because too many GPU + // resets have been observed globally recently, and system stability + // might be compromised. + // + // The given URL may be a partial URL (including at least the host) + // or a full URL to a page. + // + // Note that the unblocking API must be part of the content API + // because it is called from Chrome side code. + void BlockDomainFrom3DAPIs(const GURL& url, DomainGuilt guilt); + bool Are3DAPIsBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester); + + // Disables domain blocking for 3D APIs. For use only in tests. + void DisableDomainBlockingFor3DAPIsForTesting(); + + void Notify3DAPIBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester); + + // Get number of features being blacklisted. + size_t GetBlacklistedFeatureCount() const; + + void SetDisplayCount(unsigned int display_count); + unsigned int GetDisplayCount() const; + + // Called when GPU process initialization failed. + void OnGpuProcessInitFailure(); + + private: + friend class GpuDataManagerImplPrivate; + friend class GpuDataManagerImplPrivateTest; + friend struct DefaultSingletonTraits<GpuDataManagerImpl>; + + // It's similar to AutoUnlock, but we want to make it a no-op + // if the owner GpuDataManagerImpl is null. + // This should only be used by GpuDataManagerImplPrivate where + // callbacks are called, during which re-entering + // GpuDataManagerImpl is possible. + class UnlockedSession { + public: + explicit UnlockedSession(GpuDataManagerImpl* owner) + : owner_(owner) { + DCHECK(owner_); + owner_->lock_.AssertAcquired(); + owner_->lock_.Release(); + } + + ~UnlockedSession() { + DCHECK(owner_); + owner_->lock_.Acquire(); + } + + private: + GpuDataManagerImpl* owner_; + DISALLOW_COPY_AND_ASSIGN(UnlockedSession); + }; + + GpuDataManagerImpl(); + virtual ~GpuDataManagerImpl(); + + mutable base::Lock lock_; + scoped_ptr<GpuDataManagerImplPrivate> private_; + + DISALLOW_COPY_AND_ASSIGN(GpuDataManagerImpl); +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_GPU_DATA_MANAGER_IMPL_H_ diff --git a/chromium/content/browser/gpu/gpu_data_manager_impl_private.cc b/chromium/content/browser/gpu/gpu_data_manager_impl_private.cc new file mode 100644 index 00000000000..8491508667d --- /dev/null +++ b/chromium/content/browser/gpu/gpu_data_manager_impl_private.cc @@ -0,0 +1,1254 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/gpu_data_manager_impl_private.h" + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/command_line.h" +#include "base/debug/trace_event.h" +#include "base/metrics/field_trial.h" +#include "base/metrics/histogram.h" +#include "base/metrics/sparse_histogram.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/stringprintf.h" +#include "base/sys_info.h" +#include "base/version.h" +#include "cc/base/switches.h" +#include "content/browser/gpu/gpu_process_host.h" +#include "content/common/gpu/gpu_messages.h" +#include "content/public/browser/browser_thread.h" +#include "content/public/browser/gpu_data_manager_observer.h" +#include "content/public/common/content_client.h" +#include "content/public/common/content_constants.h" +#include "content/public/common/content_switches.h" +#include "gpu/command_buffer/service/gpu_switches.h" +#include "gpu/config/gpu_control_list_jsons.h" +#include "gpu/config/gpu_driver_bug_workaround_type.h" +#include "gpu/config/gpu_feature_type.h" +#include "gpu/config/gpu_info_collector.h" +#include "gpu/config/gpu_util.h" +#include "ui/base/ui_base_switches.h" +#include "ui/gl/gl_implementation.h" +#include "ui/gl/gl_switches.h" +#include "ui/gl/gpu_switching_manager.h" +#include "webkit/common/webpreferences.h" + +#if defined(OS_MACOSX) +#include <ApplicationServices/ApplicationServices.h> +#endif // OS_MACOSX +#if defined(OS_WIN) +#include "base/win/windows_version.h" +#endif // OS_WIN +#if defined(OS_ANDROID) +#include "ui/gfx/android/device_display_info.h" +#endif // OS_ANDROID + +namespace content { + +namespace { + +enum GpuFeatureStatus { + kGpuFeatureEnabled = 0, + kGpuFeatureBlacklisted = 1, + kGpuFeatureDisabled = 2, // disabled by user but not blacklisted + kGpuFeatureNumStatus +}; + +#if defined(OS_WIN) + +enum WinSubVersion { + kWinOthers = 0, + kWinXP, + kWinVista, + kWin7, + kWin8, + kNumWinSubVersions +}; + +int GetGpuBlacklistHistogramValueWin(GpuFeatureStatus status) { + static WinSubVersion sub_version = kNumWinSubVersions; + if (sub_version == kNumWinSubVersions) { + sub_version = kWinOthers; + std::string version_str = base::SysInfo::OperatingSystemVersion(); + size_t pos = version_str.find_first_not_of("0123456789."); + if (pos != std::string::npos) + version_str = version_str.substr(0, pos); + Version os_version(version_str); + if (os_version.IsValid() && os_version.components().size() >= 2) { + const std::vector<uint16>& version_numbers = os_version.components(); + if (version_numbers[0] == 5) + sub_version = kWinXP; + else if (version_numbers[0] == 6 && version_numbers[1] == 0) + sub_version = kWinVista; + else if (version_numbers[0] == 6 && version_numbers[1] == 1) + sub_version = kWin7; + else if (version_numbers[0] == 6 && version_numbers[1] == 2) + sub_version = kWin8; + } + } + int entry_index = static_cast<int>(sub_version) * kGpuFeatureNumStatus; + switch (status) { + case kGpuFeatureEnabled: + break; + case kGpuFeatureBlacklisted: + entry_index++; + break; + case kGpuFeatureDisabled: + entry_index += 2; + break; + } + return entry_index; +} +#endif // OS_WIN + +// Send UMA histograms about the enabled features and GPU properties. +void UpdateStats(const gpu::GPUInfo& gpu_info, + const gpu::GpuBlacklist* blacklist, + const std::set<int>& blacklisted_features) { + uint32 max_entry_id = blacklist->max_entry_id(); + if (max_entry_id == 0) { + // GPU Blacklist was not loaded. No need to go further. + return; + } + + const CommandLine& command_line = *CommandLine::ForCurrentProcess(); + bool disabled = false; + + // Use entry 0 to capture the total number of times that data + // was recorded in this histogram in order to have a convenient + // denominator to compute blacklist percentages for the rest of the + // entries. + UMA_HISTOGRAM_ENUMERATION("GPU.BlacklistTestResultsPerEntry", + 0, max_entry_id + 1); + + if (blacklisted_features.size() != 0) { + std::vector<uint32> flag_entries; + blacklist->GetDecisionEntries(&flag_entries, disabled); + DCHECK_GT(flag_entries.size(), 0u); + for (size_t i = 0; i < flag_entries.size(); ++i) { + UMA_HISTOGRAM_ENUMERATION("GPU.BlacklistTestResultsPerEntry", + flag_entries[i], max_entry_id + 1); + } + } + + // This counts how many users are affected by a disabled entry - this allows + // us to understand the impact of an entry before enable it. + std::vector<uint32> flag_disabled_entries; + disabled = true; + blacklist->GetDecisionEntries(&flag_disabled_entries, disabled); + for (size_t i = 0; i < flag_disabled_entries.size(); ++i) { + UMA_HISTOGRAM_ENUMERATION("GPU.BlacklistTestResultsPerDisabledEntry", + flag_disabled_entries[i], max_entry_id + 1); + } + + const gpu::GpuFeatureType kGpuFeatures[] = { + gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS, + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING, + gpu::GPU_FEATURE_TYPE_WEBGL, + gpu::GPU_FEATURE_TYPE_TEXTURE_SHARING + }; + const std::string kGpuBlacklistFeatureHistogramNames[] = { + "GPU.BlacklistFeatureTestResults.Accelerated2dCanvas", + "GPU.BlacklistFeatureTestResults.AcceleratedCompositing", + "GPU.BlacklistFeatureTestResults.Webgl", + "GPU.BlacklistFeatureTestResults.TextureSharing" + }; + const bool kGpuFeatureUserFlags[] = { + command_line.HasSwitch(switches::kDisableAccelerated2dCanvas), + command_line.HasSwitch(switches::kDisableAcceleratedCompositing), + command_line.HasSwitch(switches::kDisableExperimentalWebGL), + command_line.HasSwitch(switches::kDisableImageTransportSurface) + }; +#if defined(OS_WIN) + const std::string kGpuBlacklistFeatureHistogramNamesWin[] = { + "GPU.BlacklistFeatureTestResultsWindows.Accelerated2dCanvas", + "GPU.BlacklistFeatureTestResultsWindows.AcceleratedCompositing", + "GPU.BlacklistFeatureTestResultsWindows.Webgl", + "GPU.BlacklistFeatureTestResultsWindows.TextureSharing" + }; +#endif + const size_t kNumFeatures = + sizeof(kGpuFeatures) / sizeof(gpu::GpuFeatureType); + for (size_t i = 0; i < kNumFeatures; ++i) { + // We can't use UMA_HISTOGRAM_ENUMERATION here because the same name is + // expected if the macro is used within a loop. + GpuFeatureStatus value = kGpuFeatureEnabled; + if (blacklisted_features.count(kGpuFeatures[i])) + value = kGpuFeatureBlacklisted; + else if (kGpuFeatureUserFlags[i]) + value = kGpuFeatureDisabled; + base::HistogramBase* histogram_pointer = base::LinearHistogram::FactoryGet( + kGpuBlacklistFeatureHistogramNames[i], + 1, kGpuFeatureNumStatus, kGpuFeatureNumStatus + 1, + base::HistogramBase::kUmaTargetedHistogramFlag); + histogram_pointer->Add(value); +#if defined(OS_WIN) + histogram_pointer = base::LinearHistogram::FactoryGet( + kGpuBlacklistFeatureHistogramNamesWin[i], + 1, kNumWinSubVersions * kGpuFeatureNumStatus, + kNumWinSubVersions * kGpuFeatureNumStatus + 1, + base::HistogramBase::kUmaTargetedHistogramFlag); + histogram_pointer->Add(GetGpuBlacklistHistogramValueWin(value)); +#endif + } + + UMA_HISTOGRAM_SPARSE_SLOWLY("GPU.GLResetNotificationStrategy", + gpu_info.gl_reset_notification_strategy); +} + +// Strip out the non-digital info; if after that, we get an empty string, +// return "0". +std::string ProcessVersionString(const std::string& raw_string) { + const std::string valid_set = "0123456789."; + size_t start_pos = raw_string.find_first_of(valid_set); + if (start_pos == std::string::npos) + return "0"; + size_t end_pos = raw_string.find_first_not_of(raw_string, start_pos); + std::string version_string = raw_string.substr( + start_pos, end_pos - start_pos); + if (version_string.empty()) + return "0"; + return version_string; +} + +// Combine the integers into a string, seperated by ','. +std::string IntSetToString(const std::set<int>& list) { + std::string rt; + for (std::set<int>::const_iterator it = list.begin(); + it != list.end(); ++it) { + if (!rt.empty()) + rt += ","; + rt += base::IntToString(*it); + } + return rt; +} + +#if defined(OS_MACOSX) +void DisplayReconfigCallback(CGDirectDisplayID display, + CGDisplayChangeSummaryFlags flags, + void* gpu_data_manager) { + if(flags == kCGDisplayBeginConfigurationFlag) + return; // This call contains no information about the display change + + GpuDataManagerImpl* manager = + reinterpret_cast<GpuDataManagerImpl*>(gpu_data_manager); + DCHECK(manager); + + uint32_t displayCount; + CGGetActiveDisplayList(0, NULL, &displayCount); + + bool fireGpuSwitch = flags & kCGDisplayAddFlag; + + if (displayCount != manager->GetDisplayCount()) { + manager->SetDisplayCount(displayCount); + fireGpuSwitch = true; + } + + if (fireGpuSwitch) + manager->HandleGpuSwitch(); +} +#endif // OS_MACOSX + +#if defined(OS_ANDROID) +void ApplyAndroidWorkarounds(const gpu::GPUInfo& gpu_info, + CommandLine* command_line) { + std::string vendor(StringToLowerASCII(gpu_info.gl_vendor)); + std::string renderer(StringToLowerASCII(gpu_info.gl_renderer)); + bool is_img = + gpu_info.gl_vendor.find("Imagination") != std::string::npos; + bool is_arm = + gpu_info.gl_vendor.find("ARM") != std::string::npos; + bool is_qualcomm = + gpu_info.gl_vendor.find("Qualcomm") != std::string::npos; + bool is_broadcom = + gpu_info.gl_vendor.find("Broadcom") != std::string::npos; + bool is_mali_t604 = is_arm && + gpu_info.gl_renderer.find("Mali-T604") != std::string::npos; + bool is_nvidia = + gpu_info.gl_vendor.find("NVIDIA") != std::string::npos; + + bool is_vivante = + gpu_info.gl_extensions.find("GL_VIV_shader_binary") != + std::string::npos; + + bool is_nexus7 = + gpu_info.machine_model.find("Nexus 7") != std::string::npos; + bool is_nexus10 = + gpu_info.machine_model.find("Nexus 10") != std::string::npos; + + // IMG: avoid context switching perf problems, crashes with share groups + // Mali-T604: http://crbug.com/154715 + // QualComm, NVIDIA: Crashes with share groups + if (is_vivante || is_img || is_mali_t604 || is_nvidia || is_qualcomm || + is_broadcom) + command_line->AppendSwitch(switches::kEnableVirtualGLContexts); + + gfx::DeviceDisplayInfo info; + int default_tile_size = 256; + + // For very high resolution displays (eg. Nexus 10), set the default + // tile size to be 512. This should be removed in favour of a generic + // hueristic that works across all platforms and devices, once that + // exists: http://crbug.com/159524. This switches to 512 for screens + // containing 40 or more 256x256 tiles, such that 1080p devices do + // not use 512x512 tiles (eg. 1920x1280 requires 37.5 tiles) + int numTiles = (info.GetDisplayWidth() * + info.GetDisplayHeight()) / (256 * 256); + if (numTiles >= 40) + default_tile_size = 512; + + // IMG: Fast async texture uploads only work with non-power-of-two, + // but still multiple-of-eight sizes. + // http://crbug.com/168099 + if (is_img) + default_tile_size -= 8; + + // If we are using the MapImage API double the tile size to reduce + // the number of zero-copy buffers being used. + if (command_line->HasSwitch(cc::switches::kUseMapImage)) + default_tile_size *= 2; + + // Set the command line if it isn't already set and we changed + // the default tile size. + if (default_tile_size != 256 && + !command_line->HasSwitch(switches::kDefaultTileWidth) && + !command_line->HasSwitch(switches::kDefaultTileHeight)) { + std::stringstream size; + size << default_tile_size; + command_line->AppendSwitchASCII( + switches::kDefaultTileWidth, size.str()); + command_line->AppendSwitchASCII( + switches::kDefaultTileHeight, size.str()); + } + + // Increase the resolution of low resolution tiles for Nexus tablets. + if ((is_nexus7 || is_nexus10) && + !command_line->HasSwitch( + cc::switches::kLowResolutionContentsScaleFactor)) { + command_line->AppendSwitchASCII( + cc::switches::kLowResolutionContentsScaleFactor, "0.25"); + } +} +#endif // OS_ANDROID + +// Block all domains' use of 3D APIs for this many milliseconds if +// approaching a threshold where system stability might be compromised. +const int64 kBlockAllDomainsMs = 10000; +const int kNumResetsWithinDuration = 1; + +// Enums for UMA histograms. +enum BlockStatusHistogram { + BLOCK_STATUS_NOT_BLOCKED, + BLOCK_STATUS_SPECIFIC_DOMAIN_BLOCKED, + BLOCK_STATUS_ALL_DOMAINS_BLOCKED, + BLOCK_STATUS_MAX +}; + +} // namespace anonymous + +void GpuDataManagerImplPrivate::InitializeForTesting( + const std::string& gpu_blacklist_json, + const gpu::GPUInfo& gpu_info) { + // This function is for testing only, so disable histograms. + update_histograms_ = false; + + InitializeImpl(gpu_blacklist_json, std::string(), std::string(), gpu_info); +} + +bool GpuDataManagerImplPrivate::IsFeatureBlacklisted(int feature) const { + if (use_swiftshader_) { + // Skia's software rendering is probably more efficient than going through + // software emulation of the GPU, so use that. + if (feature == gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS) + return true; + return false; + } + + return (blacklisted_features_.count(feature) == 1); +} + +size_t GpuDataManagerImplPrivate::GetBlacklistedFeatureCount() const { + if (use_swiftshader_) + return 1; + return blacklisted_features_.size(); +} + +void GpuDataManagerImplPrivate::SetDisplayCount(unsigned int display_count) { + display_count_ = display_count; +} + +unsigned int GpuDataManagerImplPrivate::GetDisplayCount() const { + return display_count_; +} + +gpu::GPUInfo GpuDataManagerImplPrivate::GetGPUInfo() const { + return gpu_info_; +} + +void GpuDataManagerImplPrivate::GetGpuProcessHandles( + const GpuDataManager::GetGpuProcessHandlesCallback& callback) const { + GpuProcessHost::GetProcessHandles(callback); +} + +bool GpuDataManagerImplPrivate::GpuAccessAllowed( + std::string* reason) const { + if (use_swiftshader_) + return true; + + if (!gpu_process_accessible_) { + if (reason) { + *reason = "GPU process launch failed."; + } + return false; + } + + if (card_blacklisted_) { + if (reason) { + *reason = "GPU access is disabled "; + CommandLine* command_line = CommandLine::ForCurrentProcess(); + if (command_line->HasSwitch(switches::kDisableGpu)) + *reason += "through commandline switch --disable-gpu."; + else + *reason += "in chrome://settings."; + } + return false; + } + + // We only need to block GPU process if more features are disallowed other + // than those in the preliminary gpu feature flags because the latter work + // through renderer commandline switches. + std::set<int> features = preliminary_blacklisted_features_; + gpu::MergeFeatureSets(&features, blacklisted_features_); + if (features.size() > preliminary_blacklisted_features_.size()) { + if (reason) { + *reason = "Features are disabled upon full but not preliminary GPU info."; + } + return false; + } + + if (blacklisted_features_.size() == gpu::NUMBER_OF_GPU_FEATURE_TYPES) { + // On Linux, we use cached GL strings to make blacklist decsions at browser + // startup time. We need to launch the GPU process to validate these + // strings even if all features are blacklisted. If all GPU features are + // disabled, the GPU process will only initialize GL bindings, create a GL + // context, and collect full GPU info. +#if !defined(OS_LINUX) + if (reason) { + *reason = "All GPU features are blacklisted."; + } + return false; +#endif + } + + return true; +} + +void GpuDataManagerImplPrivate::RequestCompleteGpuInfoIfNeeded() { + if (complete_gpu_info_already_requested_ || gpu_info_.finalized) + return; + complete_gpu_info_already_requested_ = true; + + GpuProcessHost::SendOnIO( +#if defined(OS_WIN) + GpuProcessHost::GPU_PROCESS_KIND_UNSANDBOXED, +#else + GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED, +#endif + CAUSE_FOR_GPU_LAUNCH_GPUDATAMANAGER_REQUESTCOMPLETEGPUINFOIFNEEDED, + new GpuMsg_CollectGraphicsInfo()); +} + +bool GpuDataManagerImplPrivate::IsCompleteGpuInfoAvailable() const { + return gpu_info_.finalized; +} + +void GpuDataManagerImplPrivate::RequestVideoMemoryUsageStatsUpdate() const { + GpuProcessHost::SendOnIO( + GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED, + CAUSE_FOR_GPU_LAUNCH_NO_LAUNCH, + new GpuMsg_GetVideoMemoryUsageStats()); +} + +bool GpuDataManagerImplPrivate::ShouldUseSwiftShader() const { + return use_swiftshader_; +} + +void GpuDataManagerImplPrivate::RegisterSwiftShaderPath( + const base::FilePath& path) { + swiftshader_path_ = path; + EnableSwiftShaderIfNecessary(); +} + +void GpuDataManagerImplPrivate::AddObserver(GpuDataManagerObserver* observer) { + GpuDataManagerImpl::UnlockedSession session(owner_); + observer_list_->AddObserver(observer); +} + +void GpuDataManagerImplPrivate::RemoveObserver( + GpuDataManagerObserver* observer) { + GpuDataManagerImpl::UnlockedSession session(owner_); + observer_list_->RemoveObserver(observer); +} + +void GpuDataManagerImplPrivate::UnblockDomainFrom3DAPIs(const GURL& url) { + // This method must do two things: + // + // 1. If the specific domain is blocked, then unblock it. + // + // 2. Reset our notion of how many GPU resets have occurred recently. + // This is necessary even if the specific domain was blocked. + // Otherwise, if we call Are3DAPIsBlocked with the same domain right + // after unblocking it, it will probably still be blocked because of + // the recent GPU reset caused by that domain. + // + // These policies could be refined, but at a certain point the behavior + // will become difficult to explain. + std::string domain = GetDomainFromURL(url); + + blocked_domains_.erase(domain); + timestamps_of_gpu_resets_.clear(); +} + +void GpuDataManagerImplPrivate::DisableGpuWatchdog() { + GpuProcessHost::SendOnIO( + GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED, + CAUSE_FOR_GPU_LAUNCH_NO_LAUNCH, + new GpuMsg_DisableWatchdog); +} + +void GpuDataManagerImplPrivate::SetGLStrings(const std::string& gl_vendor, + const std::string& gl_renderer, + const std::string& gl_version) { + if (gl_vendor.empty() && gl_renderer.empty() && gl_version.empty()) + return; + + // If GPUInfo already got GL strings, do nothing. This is for the rare + // situation where GPU process collected GL strings before this call. + if (!gpu_info_.gl_vendor.empty() || + !gpu_info_.gl_renderer.empty() || + !gpu_info_.gl_version_string.empty()) + return; + + gpu::GPUInfo gpu_info = gpu_info_; + + gpu_info.gl_vendor = gl_vendor; + gpu_info.gl_renderer = gl_renderer; + gpu_info.gl_version_string = gl_version; + + gpu::CollectDriverInfoGL(&gpu_info); + + UpdateGpuInfo(gpu_info); + UpdateGpuSwitchingManager(gpu_info); + UpdatePreliminaryBlacklistedFeatures(); +} + +void GpuDataManagerImplPrivate::GetGLStrings(std::string* gl_vendor, + std::string* gl_renderer, + std::string* gl_version) { + DCHECK(gl_vendor && gl_renderer && gl_version); + + *gl_vendor = gpu_info_.gl_vendor; + *gl_renderer = gpu_info_.gl_renderer; + *gl_version = gpu_info_.gl_version_string; +} + +void GpuDataManagerImplPrivate::Initialize() { + TRACE_EVENT0("startup", "GpuDataManagerImpl::Initialize"); + CommandLine* command_line = CommandLine::ForCurrentProcess(); + if (command_line->HasSwitch(switches::kSkipGpuDataLoading) && + !command_line->HasSwitch(switches::kUseGpuInTests)) + return; + + gpu::GPUInfo gpu_info; + { + TRACE_EVENT0("startup", + "GpuDataManagerImpl::Initialize:CollectBasicGraphicsInfo"); + gpu::CollectBasicGraphicsInfo(&gpu_info); + } +#if defined(ARCH_CPU_X86_FAMILY) + if (!gpu_info.gpu.vendor_id || !gpu_info.gpu.device_id) + gpu_info.finalized = true; +#endif + + std::string gpu_blacklist_string; + std::string gpu_switching_list_string; + std::string gpu_driver_bug_list_string; + if (!command_line->HasSwitch(switches::kIgnoreGpuBlacklist) && + !command_line->HasSwitch(switches::kUseGpuInTests)) { + gpu_blacklist_string = gpu::kSoftwareRenderingListJson; + gpu_switching_list_string = gpu::kGpuSwitchingListJson; + } + if (!command_line->HasSwitch(switches::kDisableGpuDriverBugWorkarounds)) { + gpu_driver_bug_list_string = gpu::kGpuDriverBugListJson; + } + InitializeImpl(gpu_blacklist_string, + gpu_switching_list_string, + gpu_driver_bug_list_string, + gpu_info); +} + +void GpuDataManagerImplPrivate::UpdateGpuInfo(const gpu::GPUInfo& gpu_info) { + // No further update of gpu_info if falling back to SwiftShader. + if (use_swiftshader_) + return; + + gpu::MergeGPUInfo(&gpu_info_, gpu_info); + complete_gpu_info_already_requested_ = + complete_gpu_info_already_requested_ || gpu_info_.finalized; + + GetContentClient()->SetGpuInfo(gpu_info_); + + if (gpu_blacklist_) { + std::set<int> features = gpu_blacklist_->MakeDecision( + gpu::GpuControlList::kOsAny, std::string(), gpu_info_); + if (update_histograms_) + UpdateStats(gpu_info_, gpu_blacklist_.get(), features); + + UpdateBlacklistedFeatures(features); + } + if (gpu_switching_list_) { + std::set<int> option = gpu_switching_list_->MakeDecision( + gpu::GpuControlList::kOsAny, std::string(), gpu_info_); + if (option.size() == 1) { + // Blacklist decision should not overwrite commandline switch from users. + CommandLine* command_line = CommandLine::ForCurrentProcess(); + if (!command_line->HasSwitch(switches::kGpuSwitching)) { + gpu_switching_ = static_cast<gpu::GpuSwitchingOption>( + *(option.begin())); + } + } + } + if (gpu_driver_bug_list_) { + gpu_driver_bugs_ = gpu_driver_bug_list_->MakeDecision( + gpu::GpuControlList::kOsAny, std::string(), gpu_info_); + } + + // We have to update GpuFeatureType before notify all the observers. + NotifyGpuInfoUpdate(); +} + +void GpuDataManagerImplPrivate::UpdateVideoMemoryUsageStats( + const GPUVideoMemoryUsageStats& video_memory_usage_stats) { + GpuDataManagerImpl::UnlockedSession session(owner_); + observer_list_->Notify(&GpuDataManagerObserver::OnVideoMemoryUsageStatsUpdate, + video_memory_usage_stats); +} + +void GpuDataManagerImplPrivate::AppendRendererCommandLine( + CommandLine* command_line) const { + DCHECK(command_line); + + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_WEBGL)) { + if (!command_line->HasSwitch(switches::kDisableExperimentalWebGL)) + command_line->AppendSwitch(switches::kDisableExperimentalWebGL); + if (!command_line->HasSwitch(switches::kDisablePepper3d)) + command_line->AppendSwitch(switches::kDisablePepper3d); + } + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_MULTISAMPLING) && + !command_line->HasSwitch(switches::kDisableGLMultisampling)) + command_line->AppendSwitch(switches::kDisableGLMultisampling); + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING) && + !command_line->HasSwitch(switches::kDisableAcceleratedCompositing)) + command_line->AppendSwitch(switches::kDisableAcceleratedCompositing); + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS) && + !command_line->HasSwitch(switches::kDisableAccelerated2dCanvas)) + command_line->AppendSwitch(switches::kDisableAccelerated2dCanvas); + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE) && + !command_line->HasSwitch(switches::kDisableAcceleratedVideoDecode)) + command_line->AppendSwitch(switches::kDisableAcceleratedVideoDecode); + + if (use_software_compositor_ && + !command_line->HasSwitch(switches::kEnableSoftwareCompositing)) + command_line->AppendSwitch(switches::kEnableSoftwareCompositing); + +#if defined(USE_AURA) + if (!CanUseGpuBrowserCompositor()) { + command_line->AppendSwitch(switches::kDisableGpuCompositing); + command_line->AppendSwitch(switches::kDisablePepper3d); + } +#endif +} + +void GpuDataManagerImplPrivate::AppendGpuCommandLine( + CommandLine* command_line) const { + DCHECK(command_line); + + bool reduce_sandbox = false; + + std::string use_gl = + CommandLine::ForCurrentProcess()->GetSwitchValueASCII(switches::kUseGL); + base::FilePath swiftshader_path = + CommandLine::ForCurrentProcess()->GetSwitchValuePath( + switches::kSwiftShaderPath); + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_MULTISAMPLING) && + !command_line->HasSwitch(switches::kDisableGLMultisampling)) { + command_line->AppendSwitch(switches::kDisableGLMultisampling); + } + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_TEXTURE_SHARING)) { + command_line->AppendSwitch(switches::kDisableImageTransportSurface); + reduce_sandbox = true; + } + if (gpu_driver_bugs_.find(gpu::DISABLE_D3D11) != gpu_driver_bugs_.end()) + command_line->AppendSwitch(switches::kDisableD3D11); + if (use_swiftshader_) { + command_line->AppendSwitchASCII(switches::kUseGL, "swiftshader"); + if (swiftshader_path.empty()) + swiftshader_path = swiftshader_path_; + } else if ((IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_WEBGL) || + IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING) || + IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS)) && + (use_gl == "any")) { + command_line->AppendSwitchASCII( + switches::kUseGL, gfx::kGLImplementationOSMesaName); + } else if (!use_gl.empty()) { + command_line->AppendSwitchASCII(switches::kUseGL, use_gl); + } + if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) { + command_line->AppendSwitchASCII(switches::kSupportsDualGpus, "true"); + switch (gpu_switching_) { + case gpu::GPU_SWITCHING_OPTION_FORCE_DISCRETE: + command_line->AppendSwitchASCII(switches::kGpuSwitching, + switches::kGpuSwitchingOptionNameForceDiscrete); + break; + case gpu::GPU_SWITCHING_OPTION_FORCE_INTEGRATED: + command_line->AppendSwitchASCII(switches::kGpuSwitching, + switches::kGpuSwitchingOptionNameForceIntegrated); + break; + case gpu::GPU_SWITCHING_OPTION_AUTOMATIC: + case gpu::GPU_SWITCHING_OPTION_UNKNOWN: + break; + } + } else { + command_line->AppendSwitchASCII(switches::kSupportsDualGpus, "false"); + } + + if (!swiftshader_path.empty()) { + command_line->AppendSwitchPath(switches::kSwiftShaderPath, + swiftshader_path); + } + + if (!gpu_driver_bugs_.empty()) { + command_line->AppendSwitchASCII(switches::kGpuDriverBugWorkarounds, + IntSetToString(gpu_driver_bugs_)); + } + + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE) && + !command_line->HasSwitch(switches::kDisableAcceleratedVideoDecode)) { + command_line->AppendSwitch(switches::kDisableAcceleratedVideoDecode); + } + +#if defined(OS_WIN) + // DisplayLink 7.1 and earlier can cause the GPU process to crash on startup. + // http://crbug.com/177611 + // Thinkpad USB Port Replicator driver causes GPU process to crash when the + // sandbox is enabled. http://crbug.com/181665. + if ((gpu_info_.display_link_version.IsValid() + && gpu_info_.display_link_version.IsOlderThan("7.2")) || + gpu_info_.lenovo_dcute) { + reduce_sandbox = true; + } +#endif + + if (gpu_info_.optimus) + reduce_sandbox = true; + + if (reduce_sandbox) + command_line->AppendSwitch(switches::kReduceGpuSandbox); + + // Pass GPU and driver information to GPU process. We try to avoid full GPU + // info collection at GPU process startup, but we need gpu vendor_id, + // device_id, driver_vendor, driver_version for deciding whether we need to + // collect full info (on Linux) and for crash reporting purpose. + command_line->AppendSwitchASCII(switches::kGpuVendorID, + base::StringPrintf("0x%04x", gpu_info_.gpu.vendor_id)); + command_line->AppendSwitchASCII(switches::kGpuDeviceID, + base::StringPrintf("0x%04x", gpu_info_.gpu.device_id)); + command_line->AppendSwitchASCII(switches::kGpuDriverVendor, + gpu_info_.driver_vendor); + command_line->AppendSwitchASCII(switches::kGpuDriverVersion, + gpu_info_.driver_version); +} + +void GpuDataManagerImplPrivate::AppendPluginCommandLine( + CommandLine* command_line) const { + DCHECK(command_line); + +#if defined(OS_MACOSX) + // TODO(jbauman): Add proper blacklist support for core animation plugins so + // special-casing this video card won't be necessary. See + // http://crbug.com/134015 + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING) || + CommandLine::ForCurrentProcess()->HasSwitch( + switches::kDisableAcceleratedCompositing)) { + if (!command_line->HasSwitch( + switches::kDisableCoreAnimationPlugins)) + command_line->AppendSwitch( + switches::kDisableCoreAnimationPlugins); + } +#endif +} + +void GpuDataManagerImplPrivate::UpdateRendererWebPrefs( + WebPreferences* prefs) const { + DCHECK(prefs); + + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING)) + prefs->accelerated_compositing_enabled = false; + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_WEBGL)) + prefs->experimental_webgl_enabled = false; + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FLASH3D)) + prefs->flash_3d_enabled = false; + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FLASH_STAGE3D)) { + prefs->flash_stage3d_enabled = false; + prefs->flash_stage3d_baseline_enabled = false; + } + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE)) + prefs->flash_stage3d_baseline_enabled = false; + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS)) + prefs->accelerated_2d_canvas_enabled = false; + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_MULTISAMPLING) + || display_count_ > 1) + prefs->gl_multisampling_enabled = false; + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_3D_CSS)) { + prefs->accelerated_compositing_for_3d_transforms_enabled = false; + prefs->accelerated_compositing_for_animation_enabled = false; + } + if (IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO)) + prefs->accelerated_compositing_for_video_enabled = false; + + // Accelerated video and animation are slower than regular when using + // SwiftShader. 3D CSS may also be too slow to be worthwhile. + if (ShouldUseSwiftShader()) { + prefs->accelerated_compositing_for_video_enabled = false; + prefs->accelerated_compositing_for_animation_enabled = false; + prefs->accelerated_compositing_for_3d_transforms_enabled = false; + prefs->accelerated_compositing_for_plugins_enabled = false; + } + + if (use_software_compositor_) { + prefs->force_compositing_mode = true; + prefs->accelerated_compositing_enabled = true; + prefs->accelerated_compositing_for_3d_transforms_enabled = true; + prefs->accelerated_compositing_for_plugins_enabled = true; + } + +#if defined(USE_AURA) + if (!CanUseGpuBrowserCompositor()) + prefs->accelerated_2d_canvas_enabled = false; +#endif +} + +gpu::GpuSwitchingOption +GpuDataManagerImplPrivate::GetGpuSwitchingOption() const { + if (!ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) + return gpu::GPU_SWITCHING_OPTION_UNKNOWN; + return gpu_switching_; +} + +void GpuDataManagerImplPrivate::DisableHardwareAcceleration() { + card_blacklisted_ = true; + + for (int i = 0; i < gpu::NUMBER_OF_GPU_FEATURE_TYPES; ++i) + blacklisted_features_.insert(i); + + EnableSwiftShaderIfNecessary(); + NotifyGpuInfoUpdate(); +} + +std::string GpuDataManagerImplPrivate::GetBlacklistVersion() const { + if (gpu_blacklist_) + return gpu_blacklist_->version(); + return "0"; +} + +std::string GpuDataManagerImplPrivate::GetDriverBugListVersion() const { + if (gpu_driver_bug_list_) + return gpu_driver_bug_list_->version(); + return "0"; +} + +void GpuDataManagerImplPrivate::GetBlacklistReasons( + base::ListValue* reasons) const { + if (gpu_blacklist_) + gpu_blacklist_->GetReasons(reasons); +} + +void GpuDataManagerImplPrivate::GetDriverBugWorkarounds( + base::ListValue* workarounds) const { + for (std::set<int>::const_iterator it = gpu_driver_bugs_.begin(); + it != gpu_driver_bugs_.end(); ++it) { + workarounds->AppendString( + gpu::GpuDriverBugWorkaroundTypeToString( + static_cast<gpu::GpuDriverBugWorkaroundType>(*it))); + } +} + +void GpuDataManagerImplPrivate::AddLogMessage( + int level, const std::string& header, const std::string& message) { + base::DictionaryValue* dict = new base::DictionaryValue(); + dict->SetInteger("level", level); + dict->SetString("header", header); + dict->SetString("message", message); + log_messages_.Append(dict); +} + +void GpuDataManagerImplPrivate::ProcessCrashed( + base::TerminationStatus exit_code) { + if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { + // Unretained is ok, because it's posted to UI thread, the thread + // where the singleton GpuDataManagerImpl lives until the end. + BrowserThread::PostTask( + BrowserThread::UI, + FROM_HERE, + base::Bind(&GpuDataManagerImpl::ProcessCrashed, + base::Unretained(owner_), + exit_code)); + return; + } + { + GpuDataManagerImpl::UnlockedSession session(owner_); + observer_list_->Notify( + &GpuDataManagerObserver::OnGpuProcessCrashed, exit_code); + } +} + +base::ListValue* GpuDataManagerImplPrivate::GetLogMessages() const { + base::ListValue* value; + value = log_messages_.DeepCopy(); + return value; +} + +void GpuDataManagerImplPrivate::HandleGpuSwitch() { + GpuDataManagerImpl::UnlockedSession session(owner_); + observer_list_->Notify(&GpuDataManagerObserver::OnGpuSwitching); +} + +#if defined(OS_WIN) +bool GpuDataManagerImplPrivate::IsUsingAcceleratedSurface() const { + if (base::win::GetVersion() < base::win::VERSION_VISTA) + return false; + + if (use_swiftshader_) + return false; + CommandLine* command_line = CommandLine::ForCurrentProcess(); + if (command_line->HasSwitch(switches::kDisableImageTransportSurface)) + return false; + return !IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_TEXTURE_SHARING); +} +#endif + +bool GpuDataManagerImplPrivate::CanUseGpuBrowserCompositor() const { + return !ShouldUseSwiftShader() && + !IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING) && + !IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FORCE_COMPOSITING_MODE); +} + +void GpuDataManagerImplPrivate::BlockDomainFrom3DAPIs( + const GURL& url, GpuDataManagerImpl::DomainGuilt guilt) { + BlockDomainFrom3DAPIsAtTime(url, guilt, base::Time::Now()); +} + +bool GpuDataManagerImplPrivate::Are3DAPIsBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester) { + bool blocked = Are3DAPIsBlockedAtTime(url, base::Time::Now()) != + GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED; + if (blocked) { + // Unretained is ok, because it's posted to UI thread, the thread + // where the singleton GpuDataManagerImpl lives until the end. + BrowserThread::PostTask( + BrowserThread::UI, FROM_HERE, + base::Bind(&GpuDataManagerImpl::Notify3DAPIBlocked, + base::Unretained(owner_), url, render_process_id, + render_view_id, requester)); + } + + return blocked; +} + +void GpuDataManagerImplPrivate::DisableDomainBlockingFor3DAPIsForTesting() { + domain_blocking_enabled_ = false; +} + +// static +GpuDataManagerImplPrivate* GpuDataManagerImplPrivate::Create( + GpuDataManagerImpl* owner) { + return new GpuDataManagerImplPrivate(owner); +} + +GpuDataManagerImplPrivate::GpuDataManagerImplPrivate( + GpuDataManagerImpl* owner) + : complete_gpu_info_already_requested_(false), + gpu_switching_(gpu::GPU_SWITCHING_OPTION_AUTOMATIC), + observer_list_(new GpuDataManagerObserverList), + use_swiftshader_(false), + card_blacklisted_(false), + update_histograms_(true), + window_count_(0), + domain_blocking_enabled_(true), + owner_(owner), + display_count_(0), + gpu_process_accessible_(true), + use_software_compositor_(false) { + DCHECK(owner_); + CommandLine* command_line = CommandLine::ForCurrentProcess(); + if (command_line->HasSwitch(switches::kDisableAcceleratedCompositing)) { + command_line->AppendSwitch(switches::kDisableAccelerated2dCanvas); + command_line->AppendSwitch(switches::kDisableAcceleratedLayers); + } + if (command_line->HasSwitch(switches::kDisableGpu)) + DisableHardwareAcceleration(); + if (command_line->HasSwitch(switches::kEnableSoftwareCompositing)) + use_software_compositor_ = true; + //TODO(jbauman): enable for Chrome OS and Linux +#if defined(USE_AURA) && defined(OS_WIN) + use_software_compositor_ = true; +#endif + if (command_line->HasSwitch(switches::kGpuSwitching)) { + std::string option_string = command_line->GetSwitchValueASCII( + switches::kGpuSwitching); + gpu::GpuSwitchingOption option = + gpu::StringToGpuSwitchingOption(option_string); + if (option != gpu::GPU_SWITCHING_OPTION_UNKNOWN) + gpu_switching_ = option; + } + +#if defined(OS_MACOSX) + CGGetActiveDisplayList (0, NULL, &display_count_); + CGDisplayRegisterReconfigurationCallback(DisplayReconfigCallback, owner_); +#endif // OS_MACOSX +} + +GpuDataManagerImplPrivate::~GpuDataManagerImplPrivate() { +#if defined(OS_MACOSX) + CGDisplayRemoveReconfigurationCallback(DisplayReconfigCallback, owner_); +#endif +} + +void GpuDataManagerImplPrivate::InitializeImpl( + const std::string& gpu_blacklist_json, + const std::string& gpu_switching_list_json, + const std::string& gpu_driver_bug_list_json, + const gpu::GPUInfo& gpu_info) { + std::string browser_version_string = ProcessVersionString( + GetContentClient()->GetProduct()); + CHECK(!browser_version_string.empty()); + + if (!gpu_blacklist_json.empty()) { + gpu_blacklist_.reset(gpu::GpuBlacklist::Create()); + gpu_blacklist_->LoadList( + browser_version_string, gpu_blacklist_json, + gpu::GpuControlList::kCurrentOsOnly); + } + if (!gpu_switching_list_json.empty()) { + gpu_switching_list_.reset(gpu::GpuSwitchingList::Create()); + gpu_switching_list_->LoadList( + browser_version_string, gpu_switching_list_json, + gpu::GpuControlList::kCurrentOsOnly); + } + if (!gpu_driver_bug_list_json.empty()) { + gpu_driver_bug_list_.reset(gpu::GpuDriverBugList::Create()); + gpu_driver_bug_list_->LoadList( + browser_version_string, gpu_driver_bug_list_json, + gpu::GpuControlList::kCurrentOsOnly); + } + + gpu_info_ = gpu_info; + UpdateGpuInfo(gpu_info); + UpdateGpuSwitchingManager(gpu_info); + UpdatePreliminaryBlacklistedFeatures(); + + CommandLine* command_line = CommandLine::ForCurrentProcess(); + // We pass down the list to GPU command buffer through commandline + // switches at GPU process launch. However, in situations where we don't + // have a GPU process, we append the browser process commandline. + if (command_line->HasSwitch(switches::kSingleProcess) || + command_line->HasSwitch(switches::kInProcessGPU)) { + if (!gpu_driver_bugs_.empty()) { + command_line->AppendSwitchASCII(switches::kGpuDriverBugWorkarounds, + IntSetToString(gpu_driver_bugs_)); + } + } +#if defined(OS_ANDROID) + ApplyAndroidWorkarounds(gpu_info, command_line); +#endif // OS_ANDROID +} + +void GpuDataManagerImplPrivate::UpdateBlacklistedFeatures( + const std::set<int>& features) { + CommandLine* command_line = CommandLine::ForCurrentProcess(); + blacklisted_features_ = features; + + // Force disable using the GPU for these features, even if they would + // otherwise be allowed. + if (card_blacklisted_ || + command_line->HasSwitch(switches::kBlacklistAcceleratedCompositing)) { + blacklisted_features_.insert( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING); + } + if (card_blacklisted_ || + command_line->HasSwitch(switches::kBlacklistWebGL)) { + blacklisted_features_.insert(gpu::GPU_FEATURE_TYPE_WEBGL); + } + + EnableSwiftShaderIfNecessary(); +} + +void GpuDataManagerImplPrivate::UpdatePreliminaryBlacklistedFeatures() { + preliminary_blacklisted_features_ = blacklisted_features_; +} + +void GpuDataManagerImplPrivate::UpdateGpuSwitchingManager( + const gpu::GPUInfo& gpu_info) { + ui::GpuSwitchingManager::GetInstance()->SetGpuCount( + gpu_info.secondary_gpus.size() + 1); + + if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) { + switch (gpu_switching_) { + case gpu::GPU_SWITCHING_OPTION_FORCE_DISCRETE: + ui::GpuSwitchingManager::GetInstance()->ForceUseOfDiscreteGpu(); + break; + case gpu::GPU_SWITCHING_OPTION_FORCE_INTEGRATED: + ui::GpuSwitchingManager::GetInstance()->ForceUseOfIntegratedGpu(); + break; + case gpu::GPU_SWITCHING_OPTION_AUTOMATIC: + case gpu::GPU_SWITCHING_OPTION_UNKNOWN: + break; + } + } +} + +void GpuDataManagerImplPrivate::NotifyGpuInfoUpdate() { + observer_list_->Notify(&GpuDataManagerObserver::OnGpuInfoUpdate); +} + +void GpuDataManagerImplPrivate::EnableSwiftShaderIfNecessary() { + if (!GpuAccessAllowed(NULL) || + blacklisted_features_.count(gpu::GPU_FEATURE_TYPE_WEBGL)) { + if (!swiftshader_path_.empty() && + !CommandLine::ForCurrentProcess()->HasSwitch( + switches::kDisableSoftwareRasterizer)) + use_swiftshader_ = true; + } +} + +std::string GpuDataManagerImplPrivate::GetDomainFromURL( + const GURL& url) const { + // For the moment, we just use the host, or its IP address, as the + // entry in the set, rather than trying to figure out the top-level + // domain. This does mean that a.foo.com and b.foo.com will be + // treated independently in the blocking of a given domain, but it + // would require a third-party library to reliably figure out the + // top-level domain from a URL. + if (!url.has_host()) { + return std::string(); + } + + return url.host(); +} + +void GpuDataManagerImplPrivate::BlockDomainFrom3DAPIsAtTime( + const GURL& url, + GpuDataManagerImpl::DomainGuilt guilt, + base::Time at_time) { + if (!domain_blocking_enabled_) + return; + + std::string domain = GetDomainFromURL(url); + + DomainBlockEntry& entry = blocked_domains_[domain]; + entry.last_guilt = guilt; + timestamps_of_gpu_resets_.push_back(at_time); +} + +GpuDataManagerImpl::DomainBlockStatus +GpuDataManagerImplPrivate::Are3DAPIsBlockedAtTime( + const GURL& url, base::Time at_time) const { + if (!domain_blocking_enabled_) + return GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED; + + // Note: adjusting the policies in this code will almost certainly + // require adjusting the associated unit tests. + std::string domain = GetDomainFromURL(url); + + DomainBlockMap::const_iterator iter = blocked_domains_.find(domain); + if (iter != blocked_domains_.end()) { + // Err on the side of caution, and assume that if a particular + // domain shows up in the block map, it's there for a good + // reason and don't let its presence there automatically expire. + + UMA_HISTOGRAM_ENUMERATION("GPU.BlockStatusForClient3DAPIs", + BLOCK_STATUS_SPECIFIC_DOMAIN_BLOCKED, + BLOCK_STATUS_MAX); + + return GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_BLOCKED; + } + + // Look at the timestamps of the recent GPU resets to see if there are + // enough within the threshold which would cause us to blacklist all + // domains. This doesn't need to be overly precise -- if time goes + // backward due to a system clock adjustment, that's fine. + // + // TODO(kbr): make this pay attention to the TDR thresholds in the + // Windows registry, but make sure it continues to be testable. + { + std::list<base::Time>::iterator iter = timestamps_of_gpu_resets_.begin(); + int num_resets_within_timeframe = 0; + while (iter != timestamps_of_gpu_resets_.end()) { + base::Time time = *iter; + base::TimeDelta delta_t = at_time - time; + + // If this entry has "expired", just remove it. + if (delta_t.InMilliseconds() > kBlockAllDomainsMs) { + iter = timestamps_of_gpu_resets_.erase(iter); + continue; + } + + ++num_resets_within_timeframe; + ++iter; + } + + if (num_resets_within_timeframe >= kNumResetsWithinDuration) { + UMA_HISTOGRAM_ENUMERATION("GPU.BlockStatusForClient3DAPIs", + BLOCK_STATUS_ALL_DOMAINS_BLOCKED, + BLOCK_STATUS_MAX); + + return GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_ALL_DOMAINS_BLOCKED; + } + } + + UMA_HISTOGRAM_ENUMERATION("GPU.BlockStatusForClient3DAPIs", + BLOCK_STATUS_NOT_BLOCKED, + BLOCK_STATUS_MAX); + + return GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED; +} + +int64 GpuDataManagerImplPrivate::GetBlockAllDomainsDurationInMs() const { + return kBlockAllDomainsMs; +} + +void GpuDataManagerImplPrivate::Notify3DAPIBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester) { + GpuDataManagerImpl::UnlockedSession session(owner_); + observer_list_->Notify(&GpuDataManagerObserver::DidBlock3DAPIs, + url, render_process_id, render_view_id, requester); +} + +void GpuDataManagerImplPrivate::OnGpuProcessInitFailure() { + gpu_process_accessible_ = false; + gpu_info_.finalized = true; + complete_gpu_info_already_requested_ = true; + // Some observers might be waiting. + NotifyGpuInfoUpdate(); +} + +} // namespace content + diff --git a/chromium/content/browser/gpu/gpu_data_manager_impl_private.h b/chromium/content/browser/gpu/gpu_data_manager_impl_private.h new file mode 100644 index 00000000000..eb226e863dc --- /dev/null +++ b/chromium/content/browser/gpu/gpu_data_manager_impl_private.h @@ -0,0 +1,255 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_GPU_DATA_MANAGER_IMPL_PRIVATE_H_ +#define CONTENT_BROWSER_GPU_GPU_DATA_MANAGER_IMPL_PRIVATE_H_ + +#include <list> +#include <map> +#include <set> +#include <string> + +#include "base/memory/ref_counted.h" +#include "base/memory/singleton.h" +#include "base/observer_list_threadsafe.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "gpu/config/gpu_blacklist.h" +#include "gpu/config/gpu_driver_bug_list.h" +#include "gpu/config/gpu_switching_list.h" + +namespace content { + +class CONTENT_EXPORT GpuDataManagerImplPrivate { + public: + static GpuDataManagerImplPrivate* Create(GpuDataManagerImpl* owner); + + void InitializeForTesting( + const std::string& gpu_blacklist_json, + const gpu::GPUInfo& gpu_info); + bool IsFeatureBlacklisted(int feature) const; + gpu::GPUInfo GetGPUInfo() const; + void GetGpuProcessHandles( + const GpuDataManager::GetGpuProcessHandlesCallback& callback) const; + bool GpuAccessAllowed(std::string* reason) const; + void RequestCompleteGpuInfoIfNeeded(); + bool IsCompleteGpuInfoAvailable() const; + void RequestVideoMemoryUsageStatsUpdate() const; + bool ShouldUseSwiftShader() const; + void RegisterSwiftShaderPath(const base::FilePath& path); + void AddObserver(GpuDataManagerObserver* observer); + void RemoveObserver(GpuDataManagerObserver* observer); + void UnblockDomainFrom3DAPIs(const GURL& url); + void DisableGpuWatchdog(); + void SetGLStrings(const std::string& gl_vendor, + const std::string& gl_renderer, + const std::string& gl_version); + void GetGLStrings(std::string* gl_vendor, + std::string* gl_renderer, + std::string* gl_version); + void DisableHardwareAcceleration(); + + void Initialize(); + + void UpdateGpuInfo(const gpu::GPUInfo& gpu_info); + + void UpdateVideoMemoryUsageStats( + const GPUVideoMemoryUsageStats& video_memory_usage_stats); + + void AppendRendererCommandLine(CommandLine* command_line) const; + + void AppendGpuCommandLine(CommandLine* command_line) const; + + void AppendPluginCommandLine(CommandLine* command_line) const; + + void UpdateRendererWebPrefs(WebPreferences* prefs) const; + + gpu::GpuSwitchingOption GetGpuSwitchingOption() const; + + std::string GetBlacklistVersion() const; + std::string GetDriverBugListVersion() const; + + void GetBlacklistReasons(base::ListValue* reasons) const; + + void GetDriverBugWorkarounds(base::ListValue* workarounds) const; + + void AddLogMessage(int level, + const std::string& header, + const std::string& message); + + void ProcessCrashed(base::TerminationStatus exit_code); + + base::ListValue* GetLogMessages() const; + + void HandleGpuSwitch(); + +#if defined(OS_WIN) + // Is the GPU process using the accelerated surface to present, instead of + // presenting by itself. + bool IsUsingAcceleratedSurface() const; +#endif + + bool CanUseGpuBrowserCompositor() const; + + void BlockDomainFrom3DAPIs( + const GURL& url, GpuDataManagerImpl::DomainGuilt guilt); + bool Are3DAPIsBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester); + + void DisableDomainBlockingFor3DAPIsForTesting(); + + void Notify3DAPIBlocked(const GURL& url, + int render_process_id, + int render_view_id, + ThreeDAPIType requester); + + size_t GetBlacklistedFeatureCount() const; + + void SetDisplayCount(unsigned int display_count); + unsigned int GetDisplayCount() const; + + void OnGpuProcessInitFailure(); + + virtual ~GpuDataManagerImplPrivate(); + + private: + friend class GpuDataManagerImplPrivateTest; + + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + GpuSideBlacklisting); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + GpuSideExceptions); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + DisableHardwareAcceleration); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + SwiftShaderRendering); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + SwiftShaderRendering2); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + GpuInfoUpdate); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + NoGpuInfoUpdateWithSwiftShader); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + GPUVideoMemoryUsageStatsUpdate); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + BlockAllDomainsFrom3DAPIs); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + UnblockGuiltyDomainFrom3DAPIs); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + UnblockDomainOfUnknownGuiltFrom3DAPIs); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + UnblockOtherDomainFrom3DAPIs); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + UnblockThisDomainFrom3DAPIs); +#if defined(OS_LINUX) + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + SetGLStrings); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + SetGLStringsNoEffects); +#endif + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + GpuDriverBugListSingle); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + GpuDriverBugListMultiple); + FRIEND_TEST_ALL_PREFIXES(GpuDataManagerImplPrivateTest, + BlacklistAllFeatures); + + struct DomainBlockEntry { + GpuDataManagerImpl::DomainGuilt last_guilt; + }; + + typedef std::map<std::string, DomainBlockEntry> DomainBlockMap; + + typedef ObserverListThreadSafe<GpuDataManagerObserver> + GpuDataManagerObserverList; + + explicit GpuDataManagerImplPrivate(GpuDataManagerImpl* owner); + + void InitializeImpl(const std::string& gpu_blacklist_json, + const std::string& gpu_switching_list_json, + const std::string& gpu_driver_bug_list_json, + const gpu::GPUInfo& gpu_info); + + void UpdateBlacklistedFeatures(const std::set<int>& features); + + // This should only be called once at initialization time, when preliminary + // gpu info is collected. + void UpdatePreliminaryBlacklistedFeatures(); + + // Update the GPU switching status. + // This should only be called once at initialization time. + void UpdateGpuSwitchingManager(const gpu::GPUInfo& gpu_info); + + // Notify all observers whenever there is a GPU info update. + void NotifyGpuInfoUpdate(); + + // Try to switch to SwiftShader rendering, if possible and necessary. + void EnableSwiftShaderIfNecessary(); + + // Helper to extract the domain from a given URL. + std::string GetDomainFromURL(const GURL& url) const; + + // Implementation functions for blocking of 3D graphics APIs, used + // for unit testing. + void BlockDomainFrom3DAPIsAtTime(const GURL& url, + GpuDataManagerImpl::DomainGuilt guilt, + base::Time at_time); + GpuDataManagerImpl::DomainBlockStatus Are3DAPIsBlockedAtTime( + const GURL& url, base::Time at_time) const; + int64 GetBlockAllDomainsDurationInMs() const; + + bool complete_gpu_info_already_requested_; + + std::set<int> blacklisted_features_; + std::set<int> preliminary_blacklisted_features_; + + gpu::GpuSwitchingOption gpu_switching_; + + std::set<int> gpu_driver_bugs_; + + gpu::GPUInfo gpu_info_; + + scoped_ptr<gpu::GpuBlacklist> gpu_blacklist_; + scoped_ptr<gpu::GpuSwitchingList> gpu_switching_list_; + scoped_ptr<gpu::GpuDriverBugList> gpu_driver_bug_list_; + + const scoped_refptr<GpuDataManagerObserverList> observer_list_; + + base::ListValue log_messages_; + + bool use_swiftshader_; + + base::FilePath swiftshader_path_; + + // Current card force-blacklisted due to GPU crashes, or disabled through + // the --disable-gpu commandline switch. + bool card_blacklisted_; + + // We disable histogram stuff in testing, especially in unit tests because + // they cause random failures. + bool update_histograms_; + + // Number of currently open windows, to be used in gpu memory allocation. + int window_count_; + + DomainBlockMap blocked_domains_; + mutable std::list<base::Time> timestamps_of_gpu_resets_; + bool domain_blocking_enabled_; + + GpuDataManagerImpl* owner_; + + unsigned int display_count_; + + bool gpu_process_accessible_; + + bool use_software_compositor_; + + DISALLOW_COPY_AND_ASSIGN(GpuDataManagerImplPrivate); +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_GPU_DATA_MANAGER_IMPL_PRIVATE_H_ + diff --git a/chromium/content/browser/gpu/gpu_data_manager_impl_private_unittest.cc b/chromium/content/browser/gpu/gpu_data_manager_impl_private_unittest.cc new file mode 100644 index 00000000000..2d70b734c4e --- /dev/null +++ b/chromium/content/browser/gpu/gpu_data_manager_impl_private_unittest.cc @@ -0,0 +1,662 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/command_line.h" +#include "base/message_loop/message_loop.h" +#include "base/run_loop.h" +#include "base/time/time.h" +#include "content/browser/gpu/gpu_data_manager_impl_private.h" +#include "content/public/browser/gpu_data_manager_observer.h" +#include "content/public/common/gpu_feature_type.h" +#include "content/public/common/gpu_info.h" +#include "gpu/command_buffer/service/gpu_switches.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "url/gurl.h" + +#define LONG_STRING_CONST(...) #__VA_ARGS__ + +namespace content { +namespace { + +class TestObserver : public GpuDataManagerObserver { + public: + TestObserver() + : gpu_info_updated_(false), + video_memory_usage_stats_updated_(false) { + } + virtual ~TestObserver() { } + + bool gpu_info_updated() const { return gpu_info_updated_; } + bool video_memory_usage_stats_updated() const { + return video_memory_usage_stats_updated_; + } + + virtual void OnGpuInfoUpdate() OVERRIDE { + gpu_info_updated_ = true; + } + + virtual void OnVideoMemoryUsageStatsUpdate( + const GPUVideoMemoryUsageStats& stats) OVERRIDE { + video_memory_usage_stats_updated_ = true; + } + + private: + bool gpu_info_updated_; + bool video_memory_usage_stats_updated_; +}; + +static base::Time GetTimeForTesting() { + return base::Time::FromDoubleT(1000); +} + +static GURL GetDomain1ForTesting() { + return GURL("http://foo.com/"); +} + +static GURL GetDomain2ForTesting() { + return GURL("http://bar.com/"); +} + +} // namespace anonymous + +class GpuDataManagerImplPrivateTest : public testing::Test { + public: + GpuDataManagerImplPrivateTest() { } + + virtual ~GpuDataManagerImplPrivateTest() { } + + protected: + // scoped_ptr doesn't work with GpuDataManagerImpl because its + // destructor is private. GpuDataManagerImplPrivateTest is however a friend + // so we can make a little helper class here. + class ScopedGpuDataManagerImpl { + public: + ScopedGpuDataManagerImpl() : impl_(new GpuDataManagerImpl()) { + EXPECT_TRUE(impl_); + EXPECT_TRUE(impl_->private_.get()); + } + ~ScopedGpuDataManagerImpl() { delete impl_; } + + GpuDataManagerImpl* get() const { return impl_; } + + GpuDataManagerImpl* operator->() const { return impl_; } + + private: + GpuDataManagerImpl* impl_; + DISALLOW_COPY_AND_ASSIGN(ScopedGpuDataManagerImpl); + }; + + // We want to test the code path where GpuDataManagerImplPrivate is created + // in the GpuDataManagerImpl constructor. + class ScopedGpuDataManagerImplPrivate { + public: + ScopedGpuDataManagerImplPrivate() : impl_(new GpuDataManagerImpl()) { + EXPECT_TRUE(impl_); + EXPECT_TRUE(impl_->private_.get()); + } + ~ScopedGpuDataManagerImplPrivate() { delete impl_; } + + GpuDataManagerImplPrivate* get() const { + return impl_->private_.get(); + } + + GpuDataManagerImplPrivate* operator->() const { + return impl_->private_.get(); + } + + private: + GpuDataManagerImpl* impl_; + DISALLOW_COPY_AND_ASSIGN(ScopedGpuDataManagerImplPrivate); + }; + + virtual void SetUp() { + } + + virtual void TearDown() { + } + + base::Time JustBeforeExpiration(const GpuDataManagerImplPrivate* manager); + base::Time JustAfterExpiration(const GpuDataManagerImplPrivate* manager); + void TestBlockingDomainFrom3DAPIs( + GpuDataManagerImpl::DomainGuilt guilt_level); + void TestUnblockingDomainFrom3DAPIs( + GpuDataManagerImpl::DomainGuilt guilt_level); + + base::MessageLoop message_loop_; +}; + +// We use new method instead of GetInstance() method because we want +// each test to be independent of each other. + +TEST_F(GpuDataManagerImplPrivateTest, GpuSideBlacklisting) { + // If a feature is allowed in preliminary step (browser side), but + // disabled when GPU process launches and collects full GPU info, + // it's too late to let renderer know, so we basically block all GPU + // access, to be on the safe side. + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + std::string reason; + EXPECT_TRUE(manager->GpuAccessAllowed(&reason)); + EXPECT_TRUE(reason.empty()); + + const std::string blacklist_json = LONG_STRING_CONST( + { + "name": "gpu blacklist", + "version": "0.1", + "entries": [ + { + "id": 1, + "features": [ + "webgl" + ] + }, + { + "id": 2, + "gl_renderer": { + "op": "contains", + "value": "GeForce" + }, + "features": [ + "accelerated_2d_canvas" + ] + } + ] + } + ); + + GPUInfo gpu_info; + gpu_info.gpu.vendor_id = 0x10de; + gpu_info.gpu.device_id = 0x0640; + manager->InitializeForTesting(blacklist_json, gpu_info); + + EXPECT_TRUE(manager->GpuAccessAllowed(&reason)); + EXPECT_TRUE(reason.empty()); + EXPECT_EQ(1u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_WEBGL)); + + gpu_info.gl_vendor = "NVIDIA"; + gpu_info.gl_renderer = "NVIDIA GeForce GT 120"; + manager->UpdateGpuInfo(gpu_info); + EXPECT_FALSE(manager->GpuAccessAllowed(&reason)); + EXPECT_FALSE(reason.empty()); + EXPECT_EQ(2u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_WEBGL)); + EXPECT_TRUE(manager->IsFeatureBlacklisted( + GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS)); +} + +TEST_F(GpuDataManagerImplPrivateTest, GpuSideExceptions) { + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + + const std::string blacklist_json = LONG_STRING_CONST( + { + "name": "gpu blacklist", + "version": "0.1", + "entries": [ + { + "id": 1, + "exceptions": [ + { + "gl_renderer": { + "op": "contains", + "value": "GeForce" + } + } + ], + "features": [ + "webgl" + ] + } + ] + } + ); + GPUInfo gpu_info; + gpu_info.gpu.vendor_id = 0x10de; + gpu_info.gpu.device_id = 0x0640; + manager->InitializeForTesting(blacklist_json, gpu_info); + + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + + // Now assume gpu process launches and full GPU info is collected. + gpu_info.gl_renderer = "NVIDIA GeForce GT 120"; + manager->UpdateGpuInfo(gpu_info); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); +} + +TEST_F(GpuDataManagerImplPrivateTest, DisableHardwareAcceleration) { + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + std::string reason; + EXPECT_TRUE(manager->GpuAccessAllowed(&reason)); + EXPECT_TRUE(reason.empty()); + + manager->DisableHardwareAcceleration(); + EXPECT_FALSE(manager->GpuAccessAllowed(&reason)); + EXPECT_FALSE(reason.empty()); + EXPECT_EQ(static_cast<size_t>(NUMBER_OF_GPU_FEATURE_TYPES), + manager->GetBlacklistedFeatureCount()); +} + +TEST_F(GpuDataManagerImplPrivateTest, SwiftShaderRendering) { + // Blacklist, then register SwiftShader. + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_FALSE(manager->ShouldUseSwiftShader()); + + manager->DisableHardwareAcceleration(); + EXPECT_FALSE(manager->GpuAccessAllowed(NULL)); + EXPECT_FALSE(manager->ShouldUseSwiftShader()); + + // If SwiftShader is enabled, even if we blacklist GPU, + // GPU process is still allowed. + const base::FilePath test_path(FILE_PATH_LITERAL("AnyPath")); + manager->RegisterSwiftShaderPath(test_path); + EXPECT_TRUE(manager->ShouldUseSwiftShader()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(1u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE( + manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS)); +} + +TEST_F(GpuDataManagerImplPrivateTest, SwiftShaderRendering2) { + // Register SwiftShader, then blacklist. + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_FALSE(manager->ShouldUseSwiftShader()); + + const base::FilePath test_path(FILE_PATH_LITERAL("AnyPath")); + manager->RegisterSwiftShaderPath(test_path); + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_FALSE(manager->ShouldUseSwiftShader()); + + manager->DisableHardwareAcceleration(); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_TRUE(manager->ShouldUseSwiftShader()); + EXPECT_EQ(1u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE( + manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS)); +} + +TEST_F(GpuDataManagerImplPrivateTest, GpuInfoUpdate) { + ScopedGpuDataManagerImpl manager; + + TestObserver observer; + manager->AddObserver(&observer); + + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + EXPECT_FALSE(observer.gpu_info_updated()); + + GPUInfo gpu_info; + manager->UpdateGpuInfo(gpu_info); + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + EXPECT_TRUE(observer.gpu_info_updated()); +} + +TEST_F(GpuDataManagerImplPrivateTest, NoGpuInfoUpdateWithSwiftShader) { + ScopedGpuDataManagerImpl manager; + + manager->DisableHardwareAcceleration(); + const base::FilePath test_path(FILE_PATH_LITERAL("AnyPath")); + manager->RegisterSwiftShaderPath(test_path); + EXPECT_TRUE(manager->ShouldUseSwiftShader()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + + TestObserver observer; + manager->AddObserver(&observer); + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + EXPECT_FALSE(observer.gpu_info_updated()); + + GPUInfo gpu_info; + manager->UpdateGpuInfo(gpu_info); + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + EXPECT_FALSE(observer.gpu_info_updated()); +} + +TEST_F(GpuDataManagerImplPrivateTest, GPUVideoMemoryUsageStatsUpdate) { + ScopedGpuDataManagerImpl manager; + + TestObserver observer; + manager->AddObserver(&observer); + + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + EXPECT_FALSE(observer.video_memory_usage_stats_updated()); + + GPUVideoMemoryUsageStats vram_stats; + manager->UpdateVideoMemoryUsageStats(vram_stats); + { + base::RunLoop run_loop; + run_loop.RunUntilIdle(); + } + EXPECT_TRUE(observer.video_memory_usage_stats_updated()); +} + +base::Time GpuDataManagerImplPrivateTest::JustBeforeExpiration( + const GpuDataManagerImplPrivate* manager) { + return GetTimeForTesting() + base::TimeDelta::FromMilliseconds( + manager->GetBlockAllDomainsDurationInMs()) - + base::TimeDelta::FromMilliseconds(3); +} + +base::Time GpuDataManagerImplPrivateTest::JustAfterExpiration( + const GpuDataManagerImplPrivate* manager) { + return GetTimeForTesting() + base::TimeDelta::FromMilliseconds( + manager->GetBlockAllDomainsDurationInMs()) + + base::TimeDelta::FromMilliseconds(3); +} + +void GpuDataManagerImplPrivateTest::TestBlockingDomainFrom3DAPIs( + GpuDataManagerImpl::DomainGuilt guilt_level) { + ScopedGpuDataManagerImplPrivate manager; + + manager->BlockDomainFrom3DAPIsAtTime(GetDomain1ForTesting(), + guilt_level, + GetTimeForTesting()); + + // This domain should be blocked no matter what. + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_BLOCKED, + manager->Are3DAPIsBlockedAtTime(GetDomain1ForTesting(), + GetTimeForTesting())); + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain1ForTesting(), JustBeforeExpiration(manager.get()))); + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain1ForTesting(), JustAfterExpiration(manager.get()))); +} + +void GpuDataManagerImplPrivateTest::TestUnblockingDomainFrom3DAPIs( + GpuDataManagerImpl::DomainGuilt guilt_level) { + ScopedGpuDataManagerImplPrivate manager; + + manager->BlockDomainFrom3DAPIsAtTime(GetDomain1ForTesting(), + guilt_level, + GetTimeForTesting()); + + // Unblocking the domain should work. + manager->UnblockDomainFrom3DAPIs(GetDomain1ForTesting()); + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED, + manager->Are3DAPIsBlockedAtTime(GetDomain1ForTesting(), + GetTimeForTesting())); + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain1ForTesting(), JustBeforeExpiration(manager.get()))); + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain1ForTesting(), JustAfterExpiration(manager.get()))); +} + +TEST_F(GpuDataManagerImplPrivateTest, BlockGuiltyDomainFrom3DAPIs) { + TestBlockingDomainFrom3DAPIs(GpuDataManagerImpl::DOMAIN_GUILT_KNOWN); +} + +TEST_F(GpuDataManagerImplPrivateTest, BlockDomainOfUnknownGuiltFrom3DAPIs) { + TestBlockingDomainFrom3DAPIs(GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN); +} + +TEST_F(GpuDataManagerImplPrivateTest, BlockAllDomainsFrom3DAPIs) { + ScopedGpuDataManagerImplPrivate manager; + + manager->BlockDomainFrom3DAPIsAtTime(GetDomain1ForTesting(), + GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN, + GetTimeForTesting()); + + // Blocking of other domains should expire. + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_ALL_DOMAINS_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain2ForTesting(), JustBeforeExpiration(manager.get()))); + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain2ForTesting(), JustAfterExpiration(manager.get()))); +} + +TEST_F(GpuDataManagerImplPrivateTest, UnblockGuiltyDomainFrom3DAPIs) { + TestUnblockingDomainFrom3DAPIs(GpuDataManagerImpl::DOMAIN_GUILT_KNOWN); +} + +TEST_F(GpuDataManagerImplPrivateTest, UnblockDomainOfUnknownGuiltFrom3DAPIs) { + TestUnblockingDomainFrom3DAPIs(GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN); +} + +TEST_F(GpuDataManagerImplPrivateTest, UnblockOtherDomainFrom3DAPIs) { + ScopedGpuDataManagerImplPrivate manager; + + manager->BlockDomainFrom3DAPIsAtTime(GetDomain1ForTesting(), + GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN, + GetTimeForTesting()); + + manager->UnblockDomainFrom3DAPIs(GetDomain2ForTesting()); + + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain2ForTesting(), JustBeforeExpiration(manager.get()))); + + // The original domain should still be blocked. + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain1ForTesting(), JustBeforeExpiration(manager.get()))); +} + +TEST_F(GpuDataManagerImplPrivateTest, UnblockThisDomainFrom3DAPIs) { + ScopedGpuDataManagerImplPrivate manager; + + manager->BlockDomainFrom3DAPIsAtTime(GetDomain1ForTesting(), + GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN, + GetTimeForTesting()); + + manager->UnblockDomainFrom3DAPIs(GetDomain1ForTesting()); + + // This behavior is debatable. Perhaps the GPU reset caused by + // domain 1 should still cause other domains to be blocked. + EXPECT_EQ(GpuDataManagerImpl::DOMAIN_BLOCK_STATUS_NOT_BLOCKED, + manager->Are3DAPIsBlockedAtTime( + GetDomain2ForTesting(), JustBeforeExpiration(manager.get()))); +} + +#if defined(OS_LINUX) +TEST_F(GpuDataManagerImplPrivateTest, SetGLStrings) { + const char* kGLVendorMesa = "Tungsten Graphics, Inc"; + const char* kGLRendererMesa = "Mesa DRI Intel(R) G41"; + const char* kGLVersionMesa801 = "2.1 Mesa 8.0.1-DEVEL"; + + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + + const std::string blacklist_json = LONG_STRING_CONST( + { + "name": "gpu blacklist", + "version": "0.1", + "entries": [ + { + "id": 1, + "vendor_id": "0x8086", + "exceptions": [ + { + "device_id": ["0x0042"], + "driver_version": { + "op": ">=", + "number": "8.0.2" + } + } + ], + "features": [ + "webgl" + ] + } + ] + } + ); + GPUInfo gpu_info; + gpu_info.gpu.vendor_id = 0x8086; + gpu_info.gpu.device_id = 0x0042; + manager->InitializeForTesting(blacklist_json, gpu_info); + + // Not enough GPUInfo. + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + + // Now assume browser gets GL strings from local state. + // The entry applies, blacklist more features than from the preliminary step. + // However, GPU process is not blocked because this is all browser side and + // happens before renderer launching. + manager->SetGLStrings(kGLVendorMesa, kGLRendererMesa, kGLVersionMesa801); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(1u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_WEBGL)); +} + +TEST_F(GpuDataManagerImplPrivateTest, SetGLStringsNoEffects) { + const char* kGLVendorMesa = "Tungsten Graphics, Inc"; + const char* kGLRendererMesa = "Mesa DRI Intel(R) G41"; + const char* kGLVersionMesa801 = "2.1 Mesa 8.0.1-DEVEL"; + const char* kGLVersionMesa802 = "2.1 Mesa 8.0.2-DEVEL"; + + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + + const std::string blacklist_json = LONG_STRING_CONST( + { + "name": "gpu blacklist", + "version": "0.1", + "entries": [ + { + "id": 1, + "vendor_id": "0x8086", + "exceptions": [ + { + "device_id": ["0x0042"], + "driver_version": { + "op": ">=", + "number": "8.0.2" + } + } + ], + "features": [ + "webgl" + ] + } + ] + } + ); + GPUInfo gpu_info; + gpu_info.gpu.vendor_id = 0x8086; + gpu_info.gpu.device_id = 0x0042; + gpu_info.gl_vendor = kGLVendorMesa; + gpu_info.gl_renderer = kGLRendererMesa; + gpu_info.gl_version = kGLVersionMesa801; + gpu_info.driver_vendor = "Mesa"; + gpu_info.driver_version = "8.0.1"; + manager->InitializeForTesting(blacklist_json, gpu_info); + + // Full GPUInfo, the entry applies. + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(1u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_WEBGL)); + + // Now assume browser gets GL strings from local state. + // SetGLStrings() has no effects because GPUInfo already got these strings. + // (Otherwise the entry should not apply.) + manager->SetGLStrings(kGLVendorMesa, kGLRendererMesa, kGLVersionMesa802); + EXPECT_TRUE(manager->GpuAccessAllowed(NULL)); + EXPECT_EQ(1u, manager->GetBlacklistedFeatureCount()); + EXPECT_TRUE(manager->IsFeatureBlacklisted(GPU_FEATURE_TYPE_WEBGL)); +} +#endif // OS_LINUX + +TEST_F(GpuDataManagerImplPrivateTest, GpuDriverBugListSingle) { + ScopedGpuDataManagerImplPrivate manager; + manager->gpu_driver_bugs_.insert(5); + + CommandLine command_line(0, NULL); + manager->AppendGpuCommandLine(&command_line); + + EXPECT_TRUE(command_line.HasSwitch(switches::kGpuDriverBugWorkarounds)); + std::string args = command_line.GetSwitchValueASCII( + switches::kGpuDriverBugWorkarounds); + EXPECT_STREQ("5", args.c_str()); +} + +TEST_F(GpuDataManagerImplPrivateTest, GpuDriverBugListMultiple) { + ScopedGpuDataManagerImplPrivate manager; + manager->gpu_driver_bugs_.insert(5); + manager->gpu_driver_bugs_.insert(7); + + CommandLine command_line(0, NULL); + manager->AppendGpuCommandLine(&command_line); + + EXPECT_TRUE(command_line.HasSwitch(switches::kGpuDriverBugWorkarounds)); + std::string args = command_line.GetSwitchValueASCII( + switches::kGpuDriverBugWorkarounds); + EXPECT_STREQ("5,7", args.c_str()); +} + +TEST_F(GpuDataManagerImplPrivateTest, BlacklistAllFeatures) { + ScopedGpuDataManagerImplPrivate manager; + EXPECT_EQ(0u, manager->GetBlacklistedFeatureCount()); + std::string reason; + EXPECT_TRUE(manager->GpuAccessAllowed(&reason)); + EXPECT_TRUE(reason.empty()); + + const std::string blacklist_json = LONG_STRING_CONST( + { + "name": "gpu blacklist", + "version": "0.1", + "entries": [ + { + "id": 1, + "features": [ + "all" + ] + } + ] + } + ); + + GPUInfo gpu_info; + gpu_info.gpu.vendor_id = 0x10de; + gpu_info.gpu.device_id = 0x0640; + manager->InitializeForTesting(blacklist_json, gpu_info); + + EXPECT_EQ(static_cast<size_t>(NUMBER_OF_GPU_FEATURE_TYPES), + manager->GetBlacklistedFeatureCount()); + // TODO(zmo): remove the Linux specific behavior once we fix + // crbug.com/238466. +#if defined(OS_LINUX) + EXPECT_TRUE(manager->GpuAccessAllowed(&reason)); + EXPECT_TRUE(reason.empty()); +#else + EXPECT_FALSE(manager->GpuAccessAllowed(&reason)); + EXPECT_FALSE(reason.empty()); +#endif +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_functional_browsertest.cc b/chromium/content/browser/gpu/gpu_functional_browsertest.cc new file mode 100644 index 00000000000..b3c2a57319f --- /dev/null +++ b/chromium/content/browser/gpu/gpu_functional_browsertest.cc @@ -0,0 +1,143 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/command_line.h" +#include "base/path_service.h" +#include "content/browser/gpu/gpu_process_host.h" +#include "content/public/browser/browser_thread.h" +#include "content/public/common/content_paths.h" +#include "content/public/common/content_switches.h" +#include "content/public/common/url_constants.h" +#include "content/public/test/browser_test_utils.h" +#include "content/shell/shell.h" +#include "content/test/content_browser_test.h" +#include "content/test/content_browser_test_utils.h" + +namespace content { + +namespace { + void VerifyGPUProcessLaunch(bool* result) { + GpuProcessHost* host = + GpuProcessHost::Get(GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED, + content::CAUSE_FOR_GPU_LAUNCH_NO_LAUNCH); + *result = !!host; + } +} + +class GpuFunctionalTest : public ContentBrowserTest { + protected: + virtual void SetUpInProcessBrowserTestFixture() OVERRIDE { + base::FilePath test_dir; + ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &test_dir)); + gpu_test_dir_ = test_dir.AppendASCII("gpu"); + } + + virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { + command_line->AppendSwitch(switches::kDisableGpuProcessPrelaunch); + } + + void VerifyHardwareAccelerated(const std::string& feature) { + NavigateToURL(shell(), + GURL(std::string(chrome::kChromeUIScheme). + append("://"). + append(kChromeUIGpuHost))); + + { + // Verify that the given feature is hardware accelerated.. + std::string javascript = + "function VerifyHardwareAccelerated(feature) {" + " var list = document.querySelector(\".feature-status-list\");" + " for (var i=0; i < list.childElementCount; i++) {" + " var span_list = list.children[i].getElementsByTagName('span');" + " var feature_str = span_list[0].textContent;" + " var value_str = span_list[1].textContent;" + " if ((feature_str == feature) &&" + " (value_str == 'Hardware accelerated')) {" + " domAutomationController.send(\"success\");" + " }" + " }" + "};"; + javascript.append("VerifyHardwareAccelerated(\""); + javascript.append(feature); + javascript.append("\");"); + std::string result; + EXPECT_TRUE(ExecuteScriptAndExtractString(shell()->web_contents(), + javascript, + &result)); + EXPECT_EQ(result, "success"); + } + } + + void VerifyGPUProcessOnPage(std::string filename, bool wait) { + Shell::Initialize(); + ASSERT_TRUE(test_server()->Start()); + DOMMessageQueue message_queue; + + std::string url("files/gpu/"); + GURL full_url = test_server()->GetURL(url.append(filename)); + NavigateToURL(shell(), full_url); + + if (wait) { + std::string result_string; + ASSERT_TRUE(message_queue.WaitForMessage(&result_string)); + } + + bool result = false; + BrowserThread::PostTaskAndReply( + BrowserThread::IO, + FROM_HERE, + base::Bind(&VerifyGPUProcessLaunch, &result), + base::MessageLoop::QuitClosure()); + base::MessageLoop::current()->Run(); + EXPECT_TRUE(result); + } + + base::FilePath gpu_test_dir_; +}; + +#if defined(OS_LINUX) && !defined(NDEBUG) +// http://crbug.com/254724 +#define IF_NOT_DEBUG_LINUX(x) DISABLED_ ## x +#else +#define IF_NOT_DEBUG_LINUX(x) x +#endif + +IN_PROC_BROWSER_TEST_F( + GpuFunctionalTest, + IF_NOT_DEBUG_LINUX(MANUAL_TestFeatureHardwareAccelerated)) { + VerifyHardwareAccelerated("WebGL: "); + VerifyHardwareAccelerated("Canvas: "); + VerifyHardwareAccelerated("3D CSS: "); +} + +// Verify that gpu process is spawned in webgl example. +IN_PROC_BROWSER_TEST_F(GpuFunctionalTest, + IF_NOT_DEBUG_LINUX(MANUAL_TestWebGL)) { + VerifyGPUProcessOnPage("functional_webgl.html", false); +} + +// Verify that gpu process is spawned when viewing a 2D canvas. +IN_PROC_BROWSER_TEST_F(GpuFunctionalTest, + IF_NOT_DEBUG_LINUX(MANUAL_Test2dCanvas)) { + VerifyGPUProcessOnPage("functional_canvas_demo.html", false); +} + +// Verify that gpu process is spawned when viewing a 3D CSS page. +IN_PROC_BROWSER_TEST_F(GpuFunctionalTest, + IF_NOT_DEBUG_LINUX(MANUAL_Test3dCss)) { + VerifyGPUProcessOnPage("functional_3d_css.html", false); +} + +#if defined(OS_LINUX) +// crbug.com/257109 +#define MANUAL_TestGpuWithVideo DISABLED_TestGpuWithVideo +#endif + +// Verify that gpu process is started when viewing video. +IN_PROC_BROWSER_TEST_F(GpuFunctionalTest, + MANUAL_TestGpuWithVideo) { + VerifyGPUProcessOnPage("functional_video.html", true); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_info_browsertest.cc b/chromium/content/browser/gpu/gpu_info_browsertest.cc new file mode 100644 index 00000000000..d555d1d05f0 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_info_browsertest.cc @@ -0,0 +1,110 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/command_line.h" +#include "base/logging.h" +#include "base/message_loop/message_loop.h" +#include "base/strings/stringprintf.h" +#include "base/sys_info.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "content/public/browser/gpu_data_manager_observer.h" +#include "content/public/common/content_switches.h" +#include "content/test/content_browser_test.h" + +namespace content { + +namespace { + +class TestObserver : public GpuDataManagerObserver { + public: + explicit TestObserver(base::MessageLoop* message_loop) + : message_loop_(message_loop) { + } + + virtual ~TestObserver() { } + + virtual void OnGpuInfoUpdate() OVERRIDE { + // Display GPU/Driver information. + gpu::GPUInfo gpu_info = + GpuDataManagerImpl::GetInstance()->GetGPUInfo(); + std::string vendor_id = base::StringPrintf( + "0x%04x", gpu_info.gpu.vendor_id); + std::string device_id = base::StringPrintf( + "0x%04x", gpu_info.gpu.device_id); + LOG(INFO) << "GPU[0]: vendor_id = " << vendor_id + << ", device_id = " << device_id; + for (size_t i = 0; i < gpu_info.secondary_gpus.size(); ++i) { + gpu::GPUInfo::GPUDevice gpu = gpu_info.secondary_gpus[i]; + vendor_id = base::StringPrintf("0x%04x", gpu.vendor_id); + device_id = base::StringPrintf("0x%04x", gpu.device_id); + LOG(INFO) << "GPU[" << (i + 1) + << "]: vendor_id = " << vendor_id + << ", device_od = " << device_id; + } + LOG(INFO) << "GPU Driver: vendor = " << gpu_info.driver_vendor + << ", version = " << gpu_info.driver_version + << ", date = " << gpu_info.driver_date; + + // Display GL information. + LOG(INFO) << "GL: vendor = " << gpu_info.gl_vendor + << ", renderer = " << gpu_info.gl_renderer; + + // Display GL window system binding information. + LOG(INFO) << "GL Window System: vendor = " << gpu_info.gl_ws_vendor + << ", version = " << gpu_info.gl_ws_version; + + // Display OS information. + LOG(INFO) << "OS = " << base::SysInfo::OperatingSystemName() + << " " << base::SysInfo::OperatingSystemVersion(); + + message_loop_->Quit(); + } + + private: + base::MessageLoop* message_loop_; +}; + +} // namespace anonymous + +class GpuInfoBrowserTest : public ContentBrowserTest { + public: + GpuInfoBrowserTest() + : message_loop_(base::MessageLoop::TYPE_UI) { + } + + virtual void SetUp() { + // We expect real pixel output for these tests. + UseRealGLContexts(); + + ContentBrowserTest::SetUp(); + } + + base::MessageLoop* GetMessageLoop() { return &message_loop_; } + + private: + base::MessageLoop message_loop_; + + DISALLOW_COPY_AND_ASSIGN(GpuInfoBrowserTest); +}; + +IN_PROC_BROWSER_TEST_F(GpuInfoBrowserTest, MANUAL_DisplayGpuInfo) { + // crbug.com/262287 +#if defined(OS_MACOSX) + // TODO(zmo): crashing on Mac, and also we don't have the full info + // collected. + return; +#endif +#if defined(OS_LINUX) && !defined(NDEBUG) + // TODO(zmo): crashing on Linux Debug. + return; +#endif + TestObserver observer(GetMessageLoop()); + GpuDataManagerImpl::GetInstance()->AddObserver(&observer); + GpuDataManagerImpl::GetInstance()->RequestCompleteGpuInfoIfNeeded(); + + GetMessageLoop()->Run(); +} + +} // namespace content + diff --git a/chromium/content/browser/gpu/gpu_internals_ui.cc b/chromium/content/browser/gpu/gpu_internals_ui.cc new file mode 100644 index 00000000000..0e778c277b9 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_internals_ui.cc @@ -0,0 +1,661 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/gpu_internals_ui.h" + +#include <string> + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/command_line.h" +#include "base/i18n/time_formatting.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/stringprintf.h" +#include "base/sys_info.h" +#include "base/values.h" +#include "cc/base/switches.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "content/public/browser/browser_thread.h" +#include "content/public/browser/compositor_util.h" +#include "content/public/browser/gpu_data_manager_observer.h" +#include "content/public/browser/web_contents.h" +#include "content/public/browser/web_ui.h" +#include "content/public/browser/web_ui_data_source.h" +#include "content/public/browser/web_ui_message_handler.h" +#include "content/public/common/content_client.h" +#include "content/public/common/content_switches.h" +#include "content/public/common/url_constants.h" +#include "gpu/config/gpu_feature_type.h" +#include "gpu/config/gpu_info.h" +#include "grit/content_resources.h" +#include "third_party/angle_dx11/src/common/version.h" + +namespace content { +namespace { + +struct GpuFeatureInfo { + std::string name; + uint32 blocked; + bool disabled; + std::string disabled_description; + bool fallback_to_software; +}; + +WebUIDataSource* CreateGpuHTMLSource() { + WebUIDataSource* source = WebUIDataSource::Create(kChromeUIGpuHost); + + source->SetJsonPath("strings.js"); + source->AddResourcePath("gpu_internals.js", IDR_GPU_INTERNALS_JS); + source->SetDefaultResource(IDR_GPU_INTERNALS_HTML); + return source; +} + +base::DictionaryValue* NewDescriptionValuePair(const std::string& desc, + const std::string& value) { + base::DictionaryValue* dict = new base::DictionaryValue(); + dict->SetString("description", desc); + dict->SetString("value", value); + return dict; +} + +base::DictionaryValue* NewDescriptionValuePair(const std::string& desc, + base::Value* value) { + base::DictionaryValue* dict = new base::DictionaryValue(); + dict->SetString("description", desc); + dict->Set("value", value); + return dict; +} + +base::Value* NewStatusValue(const char* name, const char* status) { + base::DictionaryValue* value = new base::DictionaryValue(); + value->SetString("name", name); + value->SetString("status", status); + return value; +} + +#if defined(OS_WIN) +// Output DxDiagNode tree as nested array of {description,value} pairs +base::ListValue* DxDiagNodeToList(const gpu::DxDiagNode& node) { + base::ListValue* list = new base::ListValue(); + for (std::map<std::string, std::string>::const_iterator it = + node.values.begin(); + it != node.values.end(); + ++it) { + list->Append(NewDescriptionValuePair(it->first, it->second)); + } + + for (std::map<std::string, gpu::DxDiagNode>::const_iterator it = + node.children.begin(); + it != node.children.end(); + ++it) { + base::ListValue* sublist = DxDiagNodeToList(it->second); + list->Append(NewDescriptionValuePair(it->first, sublist)); + } + return list; +} +#endif + +std::string GPUDeviceToString(const gpu::GPUInfo::GPUDevice& gpu) { + std::string vendor = base::StringPrintf("0x%04x", gpu.vendor_id); + if (!gpu.vendor_string.empty()) + vendor += " [" + gpu.vendor_string + "]"; + std::string device = base::StringPrintf("0x%04x", gpu.device_id); + if (!gpu.device_string.empty()) + device += " [" + gpu.device_string + "]"; + return base::StringPrintf( + "VENDOR = %s, DEVICE= %s", vendor.c_str(), device.c_str()); +} + +base::DictionaryValue* GpuInfoAsDictionaryValue() { + gpu::GPUInfo gpu_info = GpuDataManagerImpl::GetInstance()->GetGPUInfo(); + base::ListValue* basic_info = new base::ListValue(); + basic_info->Append(NewDescriptionValuePair( + "Initialization time", + base::Int64ToString(gpu_info.initialization_time.InMilliseconds()))); + basic_info->Append(NewDescriptionValuePair( + "Sandboxed", new base::FundamentalValue(gpu_info.sandboxed))); + basic_info->Append(NewDescriptionValuePair( + "GPU0", GPUDeviceToString(gpu_info.gpu))); + for (size_t i = 0; i < gpu_info.secondary_gpus.size(); ++i) { + basic_info->Append(NewDescriptionValuePair( + base::StringPrintf("GPU%d", static_cast<int>(i + 1)), + GPUDeviceToString(gpu_info.secondary_gpus[i]))); + } + basic_info->Append(NewDescriptionValuePair( + "Optimus", new base::FundamentalValue(gpu_info.optimus))); + basic_info->Append(NewDescriptionValuePair( + "AMD switchable", new base::FundamentalValue(gpu_info.amd_switchable))); + if (gpu_info.lenovo_dcute) { + basic_info->Append(NewDescriptionValuePair( + "Lenovo dCute", new base::FundamentalValue(true))); + } + if (gpu_info.display_link_version.IsValid()) { + basic_info->Append(NewDescriptionValuePair( + "DisplayLink Version", gpu_info.display_link_version.GetString())); + } + basic_info->Append(NewDescriptionValuePair("Driver vendor", + gpu_info.driver_vendor)); + basic_info->Append(NewDescriptionValuePair("Driver version", + gpu_info.driver_version)); + basic_info->Append(NewDescriptionValuePair("Driver date", + gpu_info.driver_date)); + basic_info->Append(NewDescriptionValuePair("Pixel shader version", + gpu_info.pixel_shader_version)); + basic_info->Append(NewDescriptionValuePair("Vertex shader version", + gpu_info.vertex_shader_version)); + basic_info->Append(NewDescriptionValuePair("Machine model", + gpu_info.machine_model)); + basic_info->Append(NewDescriptionValuePair("GL version", + gpu_info.gl_version)); + basic_info->Append(NewDescriptionValuePair("GL_VENDOR", + gpu_info.gl_vendor)); + basic_info->Append(NewDescriptionValuePair("GL_RENDERER", + gpu_info.gl_renderer)); + basic_info->Append(NewDescriptionValuePair("GL_VERSION", + gpu_info.gl_version_string)); + basic_info->Append(NewDescriptionValuePair("GL_EXTENSIONS", + gpu_info.gl_extensions)); + basic_info->Append(NewDescriptionValuePair("Window system binding vendor", + gpu_info.gl_ws_vendor)); + basic_info->Append(NewDescriptionValuePair("Window system binding version", + gpu_info.gl_ws_version)); + basic_info->Append(NewDescriptionValuePair("Window system binding extensions", + gpu_info.gl_ws_extensions)); + std::string reset_strategy = + base::StringPrintf("0x%04x", gpu_info.gl_reset_notification_strategy); + basic_info->Append(NewDescriptionValuePair( + "Reset notification strategy", reset_strategy)); + + base::DictionaryValue* info = new base::DictionaryValue(); + info->Set("basic_info", basic_info); + +#if defined(OS_WIN) + base::ListValue* perf_info = new base::ListValue(); + perf_info->Append(NewDescriptionValuePair( + "Graphics", + base::StringPrintf("%.1f", gpu_info.performance_stats.graphics))); + perf_info->Append(NewDescriptionValuePair( + "Gaming", + base::StringPrintf("%.1f", gpu_info.performance_stats.gaming))); + perf_info->Append(NewDescriptionValuePair( + "Overall", + base::StringPrintf("%.1f", gpu_info.performance_stats.overall))); + info->Set("performance_info", perf_info); + + base::Value* dx_info = gpu_info.dx_diagnostics.children.size() ? + DxDiagNodeToList(gpu_info.dx_diagnostics) : + base::Value::CreateNullValue(); + info->Set("diagnostics", dx_info); +#endif + + return info; +} + +// Determine if accelerated-2d-canvas is supported, which depends on whether +// lose_context could happen. +bool SupportsAccelerated2dCanvas() { + if (GpuDataManagerImpl::GetInstance()->GetGPUInfo().can_lose_context) + return false; + return true; +} + +base::Value* GetFeatureStatus() { + const CommandLine& command_line = *CommandLine::ForCurrentProcess(); + GpuDataManagerImpl* manager = GpuDataManagerImpl::GetInstance(); + std::string gpu_access_blocked_reason; + bool gpu_access_blocked = + !manager->GpuAccessAllowed(&gpu_access_blocked_reason); + + base::DictionaryValue* status = new base::DictionaryValue(); + + const GpuFeatureInfo kGpuFeatureInfo[] = { + { + "2d_canvas", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS), + command_line.HasSwitch(switches::kDisableAccelerated2dCanvas) || + !SupportsAccelerated2dCanvas(), + "Accelerated 2D canvas is unavailable: either disabled at the command" + " line or not supported by the current system.", + true + }, + { + "compositing", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING), + command_line.HasSwitch(switches::kDisableAcceleratedCompositing), + "Accelerated compositing has been disabled, either via about:flags or" + " command line. This adversely affects performance of all hardware" + " accelerated features.", + true + }, + { + "3d_css", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING) || + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_3D_CSS), + command_line.HasSwitch(switches::kDisableAcceleratedLayers), + "Accelerated layers have been disabled at the command line.", + false + }, + { + "css_animation", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING) || + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_3D_CSS), + command_line.HasSwitch(cc::switches::kDisableThreadedAnimation) || + command_line.HasSwitch(switches::kDisableAcceleratedCompositing) || + command_line.HasSwitch(switches::kDisableAcceleratedLayers), + "Accelerated CSS animation has been disabled at the command line.", + true + }, + { + "webgl", + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_WEBGL), + command_line.HasSwitch(switches::kDisableExperimentalWebGL), + "WebGL has been disabled, either via about:flags or command line.", + false + }, + { + "multisampling", + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_MULTISAMPLING), + command_line.HasSwitch(switches::kDisableGLMultisampling), + "Multisampling has been disabled, either via about:flags or command" + " line.", + false + }, + { + "flash_3d", + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FLASH3D), + command_line.HasSwitch(switches::kDisableFlash3d), + "Using 3d in flash has been disabled, either via about:flags or" + " command line.", + false + }, + { + "flash_stage3d", + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FLASH_STAGE3D), + command_line.HasSwitch(switches::kDisableFlashStage3d), + "Using Stage3d in Flash has been disabled, either via about:flags or" + " command line.", + false + }, + { + "flash_stage3d_baseline", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE) || + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_FLASH_STAGE3D), + command_line.HasSwitch(switches::kDisableFlashStage3d), + "Using Stage3d Baseline profile in Flash has been disabled, either" + " via about:flags or command line.", + false + }, + { + "texture_sharing", + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_TEXTURE_SHARING), + command_line.HasSwitch(switches::kDisableImageTransportSurface), + "Sharing textures between processes has been disabled, either via" + " about:flags or command line.", + false + }, + { + "video_decode", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE), + command_line.HasSwitch(switches::kDisableAcceleratedVideoDecode), + "Accelerated video decode has been disabled, either via about:flags" + " or command line.", + true + }, + { + "video", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO), + command_line.HasSwitch(switches::kDisableAcceleratedVideo) || + command_line.HasSwitch(switches::kDisableAcceleratedCompositing), + "Accelerated video presentation has been disabled, either via" + " about:flags or command line.", + true + }, +#if defined(OS_CHROMEOS) + { + "panel_fitting", + manager->IsFeatureBlacklisted(gpu::GPU_FEATURE_TYPE_PANEL_FITTING), + command_line.HasSwitch(switches::kDisablePanelFitting), + "Panel fitting has been disabled, either via about:flags or command" + " line.", + false + }, +#endif + { + "force_compositing_mode", + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_FORCE_COMPOSITING_MODE) && + !IsForceCompositingModeEnabled(), + !IsForceCompositingModeEnabled() && + !manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_FORCE_COMPOSITING_MODE), + "Force compositing mode is off, either disabled at the command" + " line or not supported by the current system.", + false + }, + }; + const size_t kNumFeatures = sizeof(kGpuFeatureInfo) / sizeof(GpuFeatureInfo); + + // Build the feature_status field. + { + base::ListValue* feature_status_list = new base::ListValue(); + + for (size_t i = 0; i < kNumFeatures; ++i) { + // force_compositing_mode status is part of the compositing status. + if (kGpuFeatureInfo[i].name == "force_compositing_mode") + continue; + + std::string status; + if (kGpuFeatureInfo[i].disabled) { + status = "disabled"; + if (kGpuFeatureInfo[i].name == "css_animation") { + status += "_software_animated"; + } else if (kGpuFeatureInfo[i].name == "raster") { + if (cc::switches::IsImplSidePaintingEnabled()) + status += "_software_multithreaded"; + else + status += "_software"; + } else { + if (kGpuFeatureInfo[i].fallback_to_software) + status += "_software"; + else + status += "_off"; + } + } else if (GpuDataManagerImpl::GetInstance()->ShouldUseSwiftShader()) { + status = "unavailable_software"; + } else if (kGpuFeatureInfo[i].blocked || + gpu_access_blocked) { + status = "unavailable"; + if (kGpuFeatureInfo[i].fallback_to_software) + status += "_software"; + else + status += "_off"; + } else { + status = "enabled"; + if (kGpuFeatureInfo[i].name == "webgl" && + (command_line.HasSwitch(switches::kDisableAcceleratedCompositing) || + manager->IsFeatureBlacklisted( + gpu::GPU_FEATURE_TYPE_ACCELERATED_COMPOSITING))) + status += "_readback"; + bool has_thread = IsThreadedCompositingEnabled(); + if (kGpuFeatureInfo[i].name == "compositing") { + bool force_compositing = IsForceCompositingModeEnabled(); + if (force_compositing) + status += "_force"; + if (has_thread) + status += "_threaded"; + } + if (kGpuFeatureInfo[i].name == "css_animation") { + if (has_thread) + status = "accelerated_threaded"; + else + status = "accelerated"; + } + } + // TODO(reveman): Remove this when crbug.com/223286 has been fixed. + if (kGpuFeatureInfo[i].name == "raster" && + cc::switches::IsImplSidePaintingEnabled()) { + status = "disabled_software_multithreaded"; + } + feature_status_list->Append( + NewStatusValue(kGpuFeatureInfo[i].name.c_str(), status.c_str())); + } + gpu::GpuSwitchingOption gpu_switching_option = + GpuDataManagerImpl::GetInstance()->GetGpuSwitchingOption(); + if (gpu_switching_option != gpu::GPU_SWITCHING_OPTION_UNKNOWN) { + std::string gpu_switching; + switch (gpu_switching_option) { + case gpu::GPU_SWITCHING_OPTION_AUTOMATIC: + gpu_switching = "gpu_switching_automatic"; + break; + case gpu::GPU_SWITCHING_OPTION_FORCE_DISCRETE: + gpu_switching = "gpu_switching_force_discrete"; + break; + case gpu::GPU_SWITCHING_OPTION_FORCE_INTEGRATED: + gpu_switching = "gpu_switching_force_integrated"; + break; + default: + break; + } + feature_status_list->Append( + NewStatusValue("gpu_switching", gpu_switching.c_str())); + } + status->Set("featureStatus", feature_status_list); + } + + // Build the problems list. + { + base::ListValue* problem_list = new base::ListValue(); + GpuDataManagerImpl::GetInstance()->GetBlacklistReasons(problem_list); + + if (gpu_access_blocked) { + base::DictionaryValue* problem = new base::DictionaryValue(); + problem->SetString("description", + "GPU process was unable to boot: " + gpu_access_blocked_reason); + problem->Set("crBugs", new base::ListValue()); + problem->Set("webkitBugs", new base::ListValue()); + problem_list->Insert(0, problem); + } + + for (size_t i = 0; i < kNumFeatures; ++i) { + if (kGpuFeatureInfo[i].disabled) { + base::DictionaryValue* problem = new base::DictionaryValue(); + problem->SetString( + "description", kGpuFeatureInfo[i].disabled_description); + problem->Set("crBugs", new base::ListValue()); + problem->Set("webkitBugs", new base::ListValue()); + problem_list->Append(problem); + } + } + + status->Set("problems", problem_list); + } + + // Build driver bug workaround list. + { + base::ListValue* workaround_list = new base::ListValue(); + GpuDataManagerImpl::GetInstance()->GetDriverBugWorkarounds(workaround_list); + status->Set("workarounds", workaround_list); + } + + return status; +} + +// This class receives javascript messages from the renderer. +// Note that the WebUI infrastructure runs on the UI thread, therefore all of +// this class's methods are expected to run on the UI thread. +class GpuMessageHandler + : public WebUIMessageHandler, + public base::SupportsWeakPtr<GpuMessageHandler>, + public GpuDataManagerObserver { + public: + GpuMessageHandler(); + virtual ~GpuMessageHandler(); + + // WebUIMessageHandler implementation. + virtual void RegisterMessages() OVERRIDE; + + // GpuDataManagerObserver implementation. + virtual void OnGpuInfoUpdate() OVERRIDE; + virtual void OnGpuSwitching() OVERRIDE; + + // Messages + void OnBrowserBridgeInitialized(const base::ListValue* list); + void OnCallAsync(const base::ListValue* list); + + // Submessages dispatched from OnCallAsync + base::Value* OnRequestClientInfo(const base::ListValue* list); + base::Value* OnRequestLogMessages(const base::ListValue* list); + + private: + // True if observing the GpuDataManager (re-attaching as observer would + // DCHECK). + bool observing_; + + DISALLOW_COPY_AND_ASSIGN(GpuMessageHandler); +}; + +//////////////////////////////////////////////////////////////////////////////// +// +// GpuMessageHandler +// +//////////////////////////////////////////////////////////////////////////////// + +GpuMessageHandler::GpuMessageHandler() + : observing_(false) { +} + +GpuMessageHandler::~GpuMessageHandler() { + GpuDataManagerImpl::GetInstance()->RemoveObserver(this); +} + +/* BrowserBridge.callAsync prepends a requestID to these messages. */ +void GpuMessageHandler::RegisterMessages() { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + + web_ui()->RegisterMessageCallback("browserBridgeInitialized", + base::Bind(&GpuMessageHandler::OnBrowserBridgeInitialized, + base::Unretained(this))); + web_ui()->RegisterMessageCallback("callAsync", + base::Bind(&GpuMessageHandler::OnCallAsync, + base::Unretained(this))); +} + +void GpuMessageHandler::OnCallAsync(const base::ListValue* args) { + DCHECK_GE(args->GetSize(), static_cast<size_t>(2)); + // unpack args into requestId, submessage and submessageArgs + bool ok; + const base::Value* requestId; + ok = args->Get(0, &requestId); + DCHECK(ok); + + std::string submessage; + ok = args->GetString(1, &submessage); + DCHECK(ok); + + base::ListValue* submessageArgs = new base::ListValue(); + for (size_t i = 2; i < args->GetSize(); ++i) { + const base::Value* arg; + ok = args->Get(i, &arg); + DCHECK(ok); + + base::Value* argCopy = arg->DeepCopy(); + submessageArgs->Append(argCopy); + } + + // call the submessage handler + base::Value* ret = NULL; + if (submessage == "requestClientInfo") { + ret = OnRequestClientInfo(submessageArgs); + } else if (submessage == "requestLogMessages") { + ret = OnRequestLogMessages(submessageArgs); + } else { // unrecognized submessage + NOTREACHED(); + delete submessageArgs; + return; + } + delete submessageArgs; + + // call BrowserBridge.onCallAsyncReply with result + if (ret) { + web_ui()->CallJavascriptFunction("browserBridge.onCallAsyncReply", + *requestId, + *ret); + delete ret; + } else { + web_ui()->CallJavascriptFunction("browserBridge.onCallAsyncReply", + *requestId); + } +} + +void GpuMessageHandler::OnBrowserBridgeInitialized( + const base::ListValue* args) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + + // Watch for changes in GPUInfo + if (!observing_) + GpuDataManagerImpl::GetInstance()->AddObserver(this); + observing_ = true; + + // Tell GpuDataManager it should have full GpuInfo. If the + // Gpu process has not run yet, this will trigger its launch. + GpuDataManagerImpl::GetInstance()->RequestCompleteGpuInfoIfNeeded(); + + // Run callback immediately in case the info is ready and no update in the + // future. + OnGpuInfoUpdate(); +} + +base::Value* GpuMessageHandler::OnRequestClientInfo( + const base::ListValue* list) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + + base::DictionaryValue* dict = new base::DictionaryValue(); + + dict->SetString("version", GetContentClient()->GetProduct()); + dict->SetString("command_line", + CommandLine::ForCurrentProcess()->GetCommandLineString()); + dict->SetString("operating_system", + base::SysInfo::OperatingSystemName() + " " + + base::SysInfo::OperatingSystemVersion()); + dict->SetString("angle_revision", base::UintToString(BUILD_REVISION)); + dict->SetString("graphics_backend", "Skia"); + dict->SetString("blacklist_version", + GpuDataManagerImpl::GetInstance()->GetBlacklistVersion()); + dict->SetString("driver_bug_list_version", + GpuDataManagerImpl::GetInstance()->GetDriverBugListVersion()); + + return dict; +} + +base::Value* GpuMessageHandler::OnRequestLogMessages(const base::ListValue*) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + + return GpuDataManagerImpl::GetInstance()->GetLogMessages(); +} + +void GpuMessageHandler::OnGpuInfoUpdate() { + // Get GPU Info. + scoped_ptr<base::DictionaryValue> gpu_info_val(GpuInfoAsDictionaryValue()); + + // Add in blacklisting features + base::Value* feature_status = GetFeatureStatus(); + if (feature_status) + gpu_info_val->Set("featureStatus", feature_status); + + // Send GPU Info to javascript. + web_ui()->CallJavascriptFunction("browserBridge.onGpuInfoUpdate", + *(gpu_info_val.get())); +} + +void GpuMessageHandler::OnGpuSwitching() { + GpuDataManagerImpl::GetInstance()->RequestCompleteGpuInfoIfNeeded(); +} + +} // namespace + + +//////////////////////////////////////////////////////////////////////////////// +// +// GpuInternalsUI +// +//////////////////////////////////////////////////////////////////////////////// + +GpuInternalsUI::GpuInternalsUI(WebUI* web_ui) + : WebUIController(web_ui) { + web_ui->AddMessageHandler(new GpuMessageHandler()); + + // Set up the chrome://gpu/ source. + BrowserContext* browser_context = + web_ui->GetWebContents()->GetBrowserContext(); + WebUIDataSource::Add(browser_context, CreateGpuHTMLSource()); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_internals_ui.h b/chromium/content/browser/gpu/gpu_internals_ui.h new file mode 100644 index 00000000000..45912e30703 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_internals_ui.h @@ -0,0 +1,23 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CHROME_BROWSER_UI_WEBUI_GPU_INTERNALS_UI_H_ +#define CHROME_BROWSER_UI_WEBUI_GPU_INTERNALS_UI_H_ + +#include "content/public/browser/web_ui_controller.h" + +namespace content { + +class GpuInternalsUI : public WebUIController { + public: + explicit GpuInternalsUI(WebUI* web_ui); + + private: + DISALLOW_COPY_AND_ASSIGN(GpuInternalsUI); +}; + +} // namespace content + +#endif // CHROME_BROWSER_UI_WEBUI_GPU_INTERNALS_UI_H_ + diff --git a/chromium/content/browser/gpu/gpu_ipc_browsertests.cc b/chromium/content/browser/gpu/gpu_ipc_browsertests.cc new file mode 100644 index 00000000000..0ee93dba806 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_ipc_browsertests.cc @@ -0,0 +1,43 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/command_line.h" +#include "content/browser/gpu/browser_gpu_channel_host_factory.h" +#include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h" +#include "content/public/common/content_switches.h" +#include "content/test/content_browser_test.h" +#include "ui/gl/gl_switches.h" +#include "webkit/common/gpu/webgraphicscontext3d_in_process_command_buffer_impl.h" + +namespace { + +class ContextTestBase : public content::ContentBrowserTest { + public: + virtual void SetUpOnMainThread() OVERRIDE { + CHECK(content::BrowserGpuChannelHostFactory::instance()); + context_.reset( + content::WebGraphicsContext3DCommandBufferImpl::CreateOffscreenContext( + content::BrowserGpuChannelHostFactory::instance(), + WebKit::WebGraphicsContext3D::Attributes(), + GURL())); + CHECK(context_.get()); + context_->makeContextCurrent(); + ContentBrowserTest::SetUpOnMainThread(); + } + + virtual void TearDownOnMainThread() OVERRIDE { + // Must delete the context first. + context_.reset(NULL); + ContentBrowserTest::TearDownOnMainThread(); + } + + protected: + scoped_ptr<WebKit::WebGraphicsContext3D> context_; +}; + +} // namespace + +// Include the actual tests. +#define CONTEXT_TEST_F IN_PROC_BROWSER_TEST_F +#include "content/common/gpu/client/gpu_context_tests.h" diff --git a/chromium/content/browser/gpu/gpu_memory_test.cc b/chromium/content/browser/gpu/gpu_memory_test.cc new file mode 100644 index 00000000000..cb74a629299 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_memory_test.cc @@ -0,0 +1,241 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/callback.h" +#include "base/command_line.h" +#include "base/path_service.h" +#include "content/public/browser/gpu_data_manager.h" +#include "content/public/browser/gpu_data_manager_observer.h" +#include "content/public/browser/web_contents.h" +#include "content/public/common/content_paths.h" +#include "content/public/common/content_switches.h" +#include "content/public/test/browser_test_utils.h" +#include "content/public/test/test_utils.h" +#include "content/shell/shell.h" +#include "content/test/content_browser_test.h" +#include "content/test/content_browser_test_utils.h" +#include "gpu/command_buffer/service/gpu_switches.h" +#include "gpu/config/gpu_test_config.h" +#include "net/base/net_util.h" + +namespace content { + +// Run the tests with a memory limit of 256MB, and give +// and extra 4MB of wiggle-room for over-allocation. +const char* kMemoryLimitMBSwitch = "256"; +const size_t kMemoryLimitMB = 256; +const size_t kSingleTabLimitMB = 128; +const size_t kWiggleRoomMB = 4; + +// Observer to report GPU memory usage when requested. +class GpuMemoryBytesAllocatedObserver : public GpuDataManagerObserver { + public: + GpuMemoryBytesAllocatedObserver() + : bytes_allocated_(0) { + } + + virtual ~GpuMemoryBytesAllocatedObserver() { + } + + virtual void OnVideoMemoryUsageStatsUpdate( + const GPUVideoMemoryUsageStats& video_memory_usage_stats) OVERRIDE { + bytes_allocated_ = video_memory_usage_stats.bytes_allocated; + message_loop_runner_->Quit(); + } + + size_t GetBytesAllocated() { + message_loop_runner_ = new MessageLoopRunner; + GpuDataManager::GetInstance()->AddObserver(this); + GpuDataManager::GetInstance()->RequestVideoMemoryUsageStatsUpdate(); + message_loop_runner_->Run(); + GpuDataManager::GetInstance()->RemoveObserver(this); + message_loop_runner_ = NULL; + return bytes_allocated_; + } + + private: + size_t bytes_allocated_; + scoped_refptr<MessageLoopRunner> message_loop_runner_; +}; + +class GpuMemoryTest : public ContentBrowserTest { + public: + GpuMemoryTest() + : allow_tests_to_run_(false), + has_used_first_shell_(false) { + } + virtual ~GpuMemoryTest() { + } + + virtual void SetUpInProcessBrowserTestFixture() OVERRIDE { + base::FilePath test_dir; + ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &test_dir)); + gpu_test_dir_ = test_dir.AppendASCII("gpu"); + } + + virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { + command_line->AppendSwitch(switches::kEnableLogging); + command_line->AppendSwitch(switches::kForceCompositingMode); + command_line->AppendSwitchASCII(switches::kForceGpuMemAvailableMb, + kMemoryLimitMBSwitch); + // Only run this on GPU bots for now. These tests should work with + // any GPU process, but may be slow. + if (command_line->HasSwitch(switches::kUseGpuInTests)) { + allow_tests_to_run_ = true; + } + // Don't enable these tests on Android just yet (they use lots of memory and + // may not be stable). +#if defined(OS_ANDROID) + allow_tests_to_run_ = false; +#endif + } + + enum PageType { + PAGE_CSS3D, + PAGE_WEBGL, + }; + + // Load a page and consume a specified amount of GPU memory. + void LoadPage(Shell* tab_to_load, + PageType page_type, + size_t mb_to_use) { + base::FilePath url; + switch (page_type) { + case PAGE_CSS3D: + url = gpu_test_dir_.AppendASCII("mem_css3d.html"); + break; + case PAGE_WEBGL: + url = gpu_test_dir_.AppendASCII("mem_webgl.html"); + break; + } + + NavigateToURL(tab_to_load, net::FilePathToFileURL(url)); + std::ostringstream js_call; + js_call << "useGpuMemory("; + js_call << mb_to_use; + js_call << ");"; + std::string message; + ASSERT_TRUE(ExecuteScriptInFrameAndExtractString( + tab_to_load->web_contents(), std::string(), js_call.str(), &message)); + EXPECT_EQ("DONE_USE_GPU_MEMORY", message); + } + + // Create a new tab. + Shell* CreateNewTab() { + // The ContentBrowserTest will create one shell by default, use that one + // first so that we don't confuse the memory manager into thinking there + // are more windows than there are. + Shell* new_tab = has_used_first_shell_ ? CreateBrowser() : shell(); + has_used_first_shell_ = true; + tabs_.insert(new_tab); + visible_tabs_.insert(new_tab); + return new_tab; + } + + void SetTabBackgrounded(Shell* tab_to_background) { + ASSERT_TRUE( + visible_tabs_.find(tab_to_background) != visible_tabs_.end()); + visible_tabs_.erase(tab_to_background); + tab_to_background->web_contents()->WasHidden(); + } + + bool MemoryUsageInRange(size_t low, size_t high) { + FinishGpuMemoryChanges(); + size_t memory_usage_bytes = GetMemoryUsageMbytes(); + + // If it's not immediately the case that low <= usage <= high, then + // allow + // Because we haven't implemented the full delay in FinishGpuMemoryChanges, + // keep re-reading the GPU memory usage for 2 seconds before declaring + // failure. + base::Time start_time = base::Time::Now(); + while (low > memory_usage_bytes || memory_usage_bytes > high) { + memory_usage_bytes = GetMemoryUsageMbytes(); + base::TimeDelta delta = base::Time::Now() - start_time; + if (delta.InMilliseconds() >= 2000) + break; + } + + return (low <= memory_usage_bytes && memory_usage_bytes <= high); + } + + bool AllowTestsToRun() const { + return allow_tests_to_run_; + } + + private: + void FinishGpuMemoryChanges() { + // This should wait until all effects of memory management complete. + // We will need to wait until all + // 1. pending commits from the main thread to the impl thread in the + // compositor complete (for visible compositors). + // 2. allocations that the renderer's impl thread will make due to the + // compositor and WebGL are completed. + // 3. pending GpuMemoryManager::Manage() calls to manage are made. + // 4. renderers' OnMemoryAllocationChanged callbacks in response to + // manager are made. + // Each step in this sequence can cause trigger the next (as a 1-2-3-4-1 + // cycle), so we will need to pump this cycle until it stabilizes. + + // Pump the cycle 8 times (in principle it could take an infinite number + // of iterations to settle). + for (size_t pump_it = 0; pump_it < 8; ++pump_it) { + // Wait for a RequestAnimationFrame to complete from all visible tabs + // for stage 1 of the cycle. + for (std::set<Shell*>::iterator it = visible_tabs_.begin(); + it != visible_tabs_.end(); + ++it) { + std::string js_call( + "window.webkitRequestAnimationFrame(function() {" + " domAutomationController.setAutomationId(1);" + " domAutomationController.send(\"DONE_RAF\");" + "})"); + std::string message; + ASSERT_TRUE(ExecuteScriptInFrameAndExtractString( + (*it)->web_contents(), std::string(), js_call, &message)); + EXPECT_EQ("DONE_RAF", message); + } + // TODO(ccameron): send an IPC from Browser -> Renderer (delay it until + // painting finishes) -> GPU process (delay it until any pending manages + // happen) -> All Renderers -> Browser to flush parts 2, 3, and 4. + } + } + + size_t GetMemoryUsageMbytes() { + GpuMemoryBytesAllocatedObserver observer; + observer.GetBytesAllocated(); + return observer.GetBytesAllocated() / 1048576; + } + + bool allow_tests_to_run_; + std::set<Shell*> tabs_; + std::set<Shell*> visible_tabs_; + bool has_used_first_shell_; + base::FilePath gpu_test_dir_; +}; + +#if defined(OS_LINUX) && !defined(NDEBUG) +// http://crbug.com/254724 +#define IF_NOT_DEBUG_LINUX(x) DISABLED_ ## x +#else +#define IF_NOT_DEBUG_LINUX(x) x +#endif + +// When trying to load something that doesn't fit into our total GPU memory +// limit, we shouldn't exceed that limit. +IN_PROC_BROWSER_TEST_F(GpuMemoryTest, + IF_NOT_DEBUG_LINUX(SingleWindowDoesNotExceedLimit)) { + if (!AllowTestsToRun()) + return; + + Shell* tab = CreateNewTab(); + LoadPage(tab, PAGE_CSS3D, kMemoryLimitMB); + // Make sure that the CSS3D page maxes out a single tab's budget (otherwise + // the test doesn't test anything) but still stays under the limit. + EXPECT_TRUE(MemoryUsageInRange( + kSingleTabLimitMB - kWiggleRoomMB, + kMemoryLimitMB + kWiggleRoomMB)); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_pixel_browsertest.cc b/chromium/content/browser/gpu/gpu_pixel_browsertest.cc new file mode 100644 index 00000000000..5909321377f --- /dev/null +++ b/chromium/content/browser/gpu/gpu_pixel_browsertest.cc @@ -0,0 +1,568 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/command_line.h" +#include "base/file_util.h" +#include "base/files/file_enumerator.h" +#include "base/files/file_path.h" +#include "base/path_service.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/string_util.h" +#include "base/strings/stringprintf.h" +#include "content/public/browser/render_view_host.h" +#include "content/public/browser/render_widget_host_view.h" +#include "content/public/browser/web_contents.h" +#include "content/public/common/content_paths.h" +#include "content/public/common/content_switches.h" +#include "content/public/test/browser_test_utils.h" +#include "content/shell/shell.h" +#include "content/test/content_browser_test.h" +#include "content/test/content_browser_test_utils.h" +#include "gpu/config/gpu_test_config.h" +#include "net/base/net_util.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "third_party/skia/include/core/SkBitmap.h" +#include "third_party/skia/include/core/SkColor.h" +#include "ui/gfx/codec/png_codec.h" +#include "ui/gfx/size.h" +#include "ui/gl/gl_switches.h" +#include "ui/snapshot/snapshot.h" + +namespace { + +enum ReferenceImageOption { + kReferenceImageLocal, + kReferenceImageCheckedIn, + kReferenceImageNone // Only check a few key pixels. +}; + +struct ReferencePixel { + int x, y; + unsigned char r, g, b; +}; + +// Command line flag for overriding the default location for putting generated +// test images that do not match references. +const char kGeneratedDir[] = "generated-dir"; +// Command line flag for overriding the default location for reference images. +const char kReferenceDir[] = "reference-dir"; +// Command line flag for Chromium build revision. +const char kBuildRevision[] = "build-revision"; + +// Reads and decodes a PNG image to a bitmap. Returns true on success. The PNG +// should have been encoded using |gfx::PNGCodec::Encode|. +bool ReadPNGFile(const base::FilePath& file_path, SkBitmap* bitmap) { + DCHECK(bitmap); + base::FilePath abs_path(base::MakeAbsoluteFilePath(file_path)); + if (abs_path.empty()) + return false; + + std::string png_data; + return file_util::ReadFileToString(abs_path, &png_data) && + gfx::PNGCodec::Decode(reinterpret_cast<unsigned char*>(&png_data[0]), + png_data.length(), + bitmap); +} + +// Encodes a bitmap into a PNG and write to disk. Returns true on success. The +// parent directory does not have to exist. +bool WritePNGFile(const SkBitmap& bitmap, const base::FilePath& file_path) { + std::vector<unsigned char> png_data; + if (gfx::PNGCodec::EncodeBGRASkBitmap(bitmap, true, &png_data) && + file_util::CreateDirectory(file_path.DirName())) { + int bytes_written = file_util::WriteFile( + file_path, reinterpret_cast<char*>(&png_data[0]), png_data.size()); + if (bytes_written == static_cast<int>(png_data.size())) + return true; + } + return false; +} + +// Write an empty file, whose name indicates the chrome revision when the ref +// image was generated. +bool WriteREVFile(const base::FilePath& file_path) { + if (file_util::CreateDirectory(file_path.DirName())) { + char one_byte = 0; + int bytes_written = file_util::WriteFile(file_path, &one_byte, 1); + if (bytes_written == 1) + return true; + } + return false; +} + +} // namespace anonymous + +namespace content { + +// Test fixture for GPU image comparison tests. +// TODO(kkania): Document how to add to/modify these tests. +class GpuPixelBrowserTest : public ContentBrowserTest { + public: + GpuPixelBrowserTest() + : ref_img_revision_(0), + ref_img_revision_no_older_than_(0), + ref_img_option_(kReferenceImageNone) { + } + + virtual void SetUp() { + // We expect real pixel output for these tests. + UseRealGLContexts(); + + ContentBrowserTest::SetUp(); + } + + virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { + command_line->AppendSwitchASCII(switches::kTestGLLib, + "libllvmpipe.so"); + } + + virtual void SetUpInProcessBrowserTestFixture() OVERRIDE { + ContentBrowserTest::SetUpInProcessBrowserTestFixture(); + + CommandLine* command_line = CommandLine::ForCurrentProcess(); + if (command_line->HasSwitch(switches::kUseGpuInTests)) + ref_img_option_ = kReferenceImageLocal; + + if (command_line->HasSwitch(kBuildRevision)) + build_revision_ = command_line->GetSwitchValueASCII(kBuildRevision); + + ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &test_data_dir_)); + test_data_dir_ = test_data_dir_.AppendASCII("gpu"); + + if (command_line->HasSwitch(kGeneratedDir)) + generated_img_dir_ = command_line->GetSwitchValuePath(kGeneratedDir); + else + generated_img_dir_ = test_data_dir_.AppendASCII("generated"); + + switch (ref_img_option_) { + case kReferenceImageLocal: + if (command_line->HasSwitch(kReferenceDir)) + ref_img_dir_ = command_line->GetSwitchValuePath(kReferenceDir); + else + ref_img_dir_ = test_data_dir_.AppendASCII("gpu_reference"); + break; + case kReferenceImageCheckedIn: + ref_img_dir_ = test_data_dir_.AppendASCII("llvmpipe_reference"); + break; + default: + break; + } + + test_name_ = testing::UnitTest::GetInstance()->current_test_info()->name(); + const char* test_status_prefixes[] = { + "DISABLED_", "FLAKY_", "FAILS_", "MANUAL_"}; + for (size_t i = 0; i < arraysize(test_status_prefixes); ++i) { + ReplaceFirstSubstringAfterOffset( + &test_name_, 0, test_status_prefixes[i], std::string()); + } + } + + // If the existing ref image was saved from an revision older than the + // ref_img_update_revision, refresh the ref image. + void RunPixelTest(const gfx::Size& tab_container_size, + const base::FilePath& url, + int64 ref_img_update_revision, + const ReferencePixel* ref_pixels, + size_t ref_pixel_count) { + if (ref_img_option_ == kReferenceImageLocal) { + ref_img_revision_no_older_than_ = ref_img_update_revision; + ObtainLocalRefImageRevision(); + } + + DOMMessageQueue message_queue; + NavigateToURL(shell(), net::FilePathToFileURL(url)); + + std::string message; + // Wait for notification that page is loaded. + ASSERT_TRUE(message_queue.WaitForMessage(&message)); + EXPECT_STREQ("\"SUCCESS\"", message.c_str()) << message; + + SkBitmap bitmap; + ASSERT_TRUE(TabSnapShotToImage(&bitmap, tab_container_size)); + bool same_pixels = true; + if (ref_img_option_ == kReferenceImageNone && ref_pixels && ref_pixel_count) + same_pixels = ComparePixels(bitmap, ref_pixels, ref_pixel_count); + else + same_pixels = CompareImages(bitmap); + EXPECT_TRUE(same_pixels); + } + + const base::FilePath& test_data_dir() const { + return test_data_dir_; + } + + private: + base::FilePath test_data_dir_; + base::FilePath generated_img_dir_; + base::FilePath ref_img_dir_; + int64 ref_img_revision_; + std::string build_revision_; + // The name of the test, with any special prefixes dropped. + std::string test_name_; + + // Any local ref image generated from older revision is ignored. + int64 ref_img_revision_no_older_than_; + + // Whether use locally generated ref images, or checked in ref images, or + // simply check a few key pixels. + ReferenceImageOption ref_img_option_; + + // Compares the generated bitmap with the appropriate reference image on disk. + // Returns true iff the images were the same. + // + // If no valid reference image exists, save the generated bitmap to the disk. + // The image format is: + // <test_name>_<revision>.png + // E.g., + // WebGLTeapot_19762.png + // The number is the chromium revision that generated the image. + // + // On failure or on ref image generation, the image and diff image will be + // written to disk. The formats are: + // FAIL_<ref_image_name>, DIFF_<ref_image_name> + // E.g., + // FAIL_WebGLTeapot_19762.png, DIFF_WebGLTeapot_19762.png + bool CompareImages(const SkBitmap& gen_bmp) { + SkBitmap ref_bmp_on_disk; + + base::FilePath img_path = ref_img_dir_.AppendASCII(test_name_ + ".png"); + bool found_ref_img = ReadPNGFile(img_path, &ref_bmp_on_disk); + + if (!found_ref_img && ref_img_option_ == kReferenceImageCheckedIn) { + LOG(ERROR) << "Couldn't find reference image: " + << img_path.value(); + // No image to compare to, exit early. + return false; + } + + const SkBitmap* ref_bmp; + bool save_gen = false; + bool save_diff = true; + bool rt = true; + + if ((ref_img_revision_ <= 0 && ref_img_option_ == kReferenceImageLocal) || + !found_ref_img) { + base::FilePath rev_path = ref_img_dir_.AppendASCII( + test_name_ + "_" + build_revision_ + ".rev"); + if (!WritePNGFile(gen_bmp, img_path)) { + LOG(ERROR) << "Can't save generated image to: " + << img_path.value() + << " as future reference."; + rt = false; + } else { + LOG(INFO) << "Saved reference image to: " + << img_path.value(); + } + if (rt) { + if (!WriteREVFile(rev_path)) { + LOG(ERROR) << "Can't save revision file to: " + << rev_path.value(); + rt = false; + base::DeleteFile(img_path, false); + } else { + LOG(INFO) << "Saved revision file to: " + << rev_path.value(); + } + } + if (ref_img_revision_ > 0) { + LOG(ERROR) << "Can't read the local ref image: " + << img_path.value() + << ", reset it."; + rt = false; + } + // If we re-generate the ref image, we save the gen and diff images so + // the ref image can be uploaded to the server and be viewed later. + save_gen = true; + save_diff = true; + ref_bmp = &gen_bmp; + } else { + ref_bmp = &ref_bmp_on_disk; + } + + SkBitmap diff_bmp; + if (ref_bmp->width() != gen_bmp.width() || + ref_bmp->height() != gen_bmp.height()) { + LOG(ERROR) + << "Dimensions do not match (Expected) vs (Actual):" + << "(" << ref_bmp->width() << "x" << ref_bmp->height() + << ") vs. " + << "(" << gen_bmp.width() << "x" << gen_bmp.height() << ")"; + if (ref_img_option_ == kReferenceImageLocal) + save_gen = true; + rt = false; + } else { + // Compare pixels and create a simple diff image. + int diff_pixels_count = 0; + diff_bmp.setConfig(SkBitmap::kARGB_8888_Config, + gen_bmp.width(), gen_bmp.height()); + diff_bmp.allocPixels(); + diff_bmp.eraseColor(SK_ColorWHITE); + SkAutoLockPixels lock_bmp(gen_bmp); + SkAutoLockPixels lock_ref_bmp(*ref_bmp); + SkAutoLockPixels lock_diff_bmp(diff_bmp); + // The reference images were saved with no alpha channel. Use the mask to + // set alpha to 0. + uint32_t kAlphaMask = 0x00FFFFFF; + for (int x = 0; x < gen_bmp.width(); ++x) { + for (int y = 0; y < gen_bmp.height(); ++y) { + if ((*gen_bmp.getAddr32(x, y) & kAlphaMask) != + (*ref_bmp->getAddr32(x, y) & kAlphaMask)) { + ++diff_pixels_count; + *diff_bmp.getAddr32(x, y) = 192 << 16; // red + } + } + } + if (diff_pixels_count > 0) { + LOG(ERROR) << diff_pixels_count + << " pixels do not match."; + if (ref_img_option_ == kReferenceImageLocal) { + save_gen = true; + save_diff = true; + } + rt = false; + } + } + + std::string ref_img_filename = img_path.BaseName().MaybeAsASCII(); + if (save_gen) { + base::FilePath img_fail_path = generated_img_dir_.AppendASCII( + "FAIL_" + ref_img_filename); + if (!WritePNGFile(gen_bmp, img_fail_path)) { + LOG(ERROR) << "Can't save generated image to: " + << img_fail_path.value(); + } else { + LOG(INFO) << "Saved generated image to: " + << img_fail_path.value(); + } + } + if (save_diff) { + base::FilePath img_diff_path = generated_img_dir_.AppendASCII( + "DIFF_" + ref_img_filename); + if (!WritePNGFile(diff_bmp, img_diff_path)) { + LOG(ERROR) << "Can't save generated diff image to: " + << img_diff_path.value(); + } else { + LOG(INFO) << "Saved difference image to: " + << img_diff_path.value(); + } + } + return rt; + } + + bool ComparePixels(const SkBitmap& gen_bmp, + const ReferencePixel* ref_pixels, + size_t ref_pixel_count) { + SkAutoLockPixels lock_bmp(gen_bmp); + + for (size_t i = 0; i < ref_pixel_count; ++i) { + int x = ref_pixels[i].x; + int y = ref_pixels[i].y; + unsigned char r = ref_pixels[i].r; + unsigned char g = ref_pixels[i].g; + unsigned char b = ref_pixels[i].b; + + DCHECK(x >= 0 && x < gen_bmp.width() && y >= 0 && y < gen_bmp.height()); + + unsigned char* rgba = reinterpret_cast<unsigned char*>( + gen_bmp.getAddr32(x, y)); + DCHECK(rgba); + if (rgba[0] != b || rgba[1] != g || rgba[2] != r) { + std::string error_message = base::StringPrintf( + "pixel(%d,%d) expects [%u,%u,%u], but gets [%u,%u,%u] instead", + x, y, r, g, b, rgba[0], rgba[1], rgba[2]); + LOG(ERROR) << error_message.c_str(); + return false; + } + } + return true; + } + + // Take snapshot of the tab, encode it as PNG, and save to a SkBitmap. + bool TabSnapShotToImage(SkBitmap* bitmap, const gfx::Size& size) { + CHECK(bitmap); + std::vector<unsigned char> png; + + gfx::Rect snapshot_bounds(size); + RenderViewHost* view_host = shell()->web_contents()->GetRenderViewHost(); + if (!ui::GrabViewSnapshot(view_host->GetView()->GetNativeView(), + &png, snapshot_bounds)) { + LOG(ERROR) << "ui::GrabViewSnapShot() failed"; + return false; + } + + if (!gfx::PNGCodec::Decode(reinterpret_cast<unsigned char*>(&*png.begin()), + png.size(), bitmap)) { + LOG(ERROR) << "Decode PNG to a SkBitmap failed"; + return false; + } + return true; + } + + // If no valid local revision file is located, the ref_img_revision_ is 0. + void ObtainLocalRefImageRevision() { + base::FilePath filter; + filter = filter.AppendASCII(test_name_ + "_*.rev"); + base::FileEnumerator locator(ref_img_dir_, + false, // non recursive + base::FileEnumerator::FILES, + filter.value()); + int64 max_revision = 0; + std::vector<base::FilePath> outdated_revs; + for (base::FilePath full_path = locator.Next(); + !full_path.empty(); + full_path = locator.Next()) { + std::string filename = + full_path.BaseName().RemoveExtension().MaybeAsASCII(); + std::string revision_string = + filename.substr(test_name_.length() + 1); + int64 revision = 0; + bool converted = base::StringToInt64(revision_string, &revision); + if (!converted) + continue; + if (revision < ref_img_revision_no_older_than_ || + revision < max_revision) { + outdated_revs.push_back(full_path); + continue; + } + max_revision = revision; + } + ref_img_revision_ = max_revision; + for (size_t i = 0; i < outdated_revs.size(); ++i) + base::DeleteFile(outdated_revs[i], false); + } + + DISALLOW_COPY_AND_ASSIGN(GpuPixelBrowserTest); +}; + +IN_PROC_BROWSER_TEST_F(GpuPixelBrowserTest, MANUAL_WebGLGreenTriangle) { + // If test baseline needs to be updated after a given revision, update the + // following number. If no revision requirement, then 0. + const int64 ref_img_revision_update = 123489; + + const ReferencePixel ref_pixels[] = { + // x, y, r, g, b + {50, 100, 0, 0, 0}, + {100, 100, 0, 255, 0}, + {150, 100, 0, 0, 0}, + {50, 150, 0, 255, 0}, + {100, 150, 0, 255, 0}, + {150, 150, 0, 255, 0} + }; + const size_t ref_pixel_count = sizeof(ref_pixels) / sizeof(ReferencePixel); + + gfx::Size container_size(400, 300); + base::FilePath url = + test_data_dir().AppendASCII("pixel_webgl.html"); + RunPixelTest(container_size, url, ref_img_revision_update, + ref_pixels, ref_pixel_count); +} + +IN_PROC_BROWSER_TEST_F(GpuPixelBrowserTest, MANUAL_CSS3DBlueBox) { + // If test baseline needs to be updated after a given revision, update the + // following number. If no revision requirement, then 0. + const int64 ref_img_revision_update = 209827; + + const ReferencePixel ref_pixels[] = { + // x, y, r, g, b + {70, 50, 0, 0, 255}, + {150, 50, 0, 0, 0}, + {70, 90, 0, 0, 255}, + {150, 90, 0, 0, 255}, + {70, 125, 0, 0, 255}, + {150, 125, 0, 0, 0} + }; + const size_t ref_pixel_count = sizeof(ref_pixels) / sizeof(ReferencePixel); + + gfx::Size container_size(400, 300); + base::FilePath url = + test_data_dir().AppendASCII("pixel_css3d.html"); + RunPixelTest(container_size, url, ref_img_revision_update, + ref_pixels, ref_pixel_count); +} + +IN_PROC_BROWSER_TEST_F(GpuPixelBrowserTest, MANUAL_Canvas2DRedBoxHD) { + // If test baseline needs to be updated after a given revision, update the + // following number. If no revision requirement, then 0. + const int64 ref_img_revision_update = 123489; + + const ReferencePixel ref_pixels[] = { + // x, y, r, g, b + {40, 100, 0, 0, 0}, + {60, 100, 127, 0, 0}, + {140, 100, 127, 0, 0}, + {160, 100, 0, 0, 0} + }; + const size_t ref_pixel_count = sizeof(ref_pixels) / sizeof(ReferencePixel); + + gfx::Size container_size(400, 300); + base::FilePath url = + test_data_dir().AppendASCII("pixel_canvas2d.html"); + RunPixelTest(container_size, url, ref_img_revision_update, + ref_pixels, ref_pixel_count); +} + +class GpuPixelTestCanvas2DSD : public GpuPixelBrowserTest { + public: + virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { + GpuPixelBrowserTest::SetUpCommandLine(command_line); + command_line->AppendSwitch(switches::kDisableAccelerated2dCanvas); + } +}; + +IN_PROC_BROWSER_TEST_F(GpuPixelTestCanvas2DSD, MANUAL_Canvas2DRedBoxSD) { + // If test baseline needs to be updated after a given revision, update the + // following number. If no revision requirement, then 0. + const int64 ref_img_revision_update = 123489; + + const ReferencePixel ref_pixels[] = { + // x, y, r, g, b + {40, 100, 0, 0, 0}, + {60, 100, 127, 0, 0}, + {140, 100, 127, 0, 0}, + {160, 100, 0, 0, 0} + }; + const size_t ref_pixel_count = sizeof(ref_pixels) / sizeof(ReferencePixel); + + gfx::Size container_size(400, 300); + base::FilePath url = + test_data_dir().AppendASCII("pixel_canvas2d.html"); + RunPixelTest(container_size, url, ref_img_revision_update, + ref_pixels, ref_pixel_count); +} + +class GpuPixelTestBrowserPlugin : public GpuPixelBrowserTest { + public: + virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { + GpuPixelBrowserTest::SetUpCommandLine(command_line); + command_line->AppendSwitch(switches::kEnableBrowserPluginForAllViewTypes); + } +}; + +// TODO(fsamuel): re-enable as MANUAL_BrowserPluginBlueBox: crbug.com/166165 +IN_PROC_BROWSER_TEST_F(GpuPixelTestBrowserPlugin, + DISABLED_BrowserPluginBlueBox) { + // If test baseline needs to be updated after a given revision, update the + // following number. If no revision requirement, then 0. + const int64 ref_img_revision_update = 209445; + + const ReferencePixel ref_pixels[] = { + // x, y, r, g, b + {70, 50, 0, 0, 255}, + {150, 50, 0, 0, 0}, + {70, 90, 0, 0, 255}, + {150, 90, 0, 0, 255}, + {70, 125, 0, 0, 255}, + {150, 125, 0, 0, 0} + }; + const size_t ref_pixel_count = sizeof(ref_pixels) / sizeof(ReferencePixel); + + gfx::Size container_size(400, 300); + base::FilePath url = + test_data_dir().AppendASCII("pixel_browser_plugin.html"); + RunPixelTest(container_size, url, ref_img_revision_update, + ref_pixels, ref_pixel_count); +} + +} // namespace content + diff --git a/chromium/content/browser/gpu/gpu_process_host.cc b/chromium/content/browser/gpu/gpu_process_host.cc new file mode 100644 index 00000000000..8cbb7aa1d82 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_process_host.cc @@ -0,0 +1,1285 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/gpu_process_host.h" + +#include "base/base64.h" +#include "base/base_switches.h" +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/command_line.h" +#include "base/debug/trace_event.h" +#include "base/logging.h" +#include "base/memory/ref_counted.h" +#include "base/metrics/histogram.h" +#include "base/sha1.h" +#include "base/threading/thread.h" +#include "content/browser/browser_child_process_host_impl.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "content/browser/gpu/gpu_process_host_ui_shim.h" +#include "content/browser/gpu/shader_disk_cache.h" +#include "content/browser/renderer_host/render_widget_helper.h" +#include "content/browser/renderer_host/render_widget_host_impl.h" +#include "content/common/child_process_host_impl.h" +#include "content/common/gpu/gpu_messages.h" +#include "content/common/view_messages.h" +#include "content/gpu/gpu_child_thread.h" +#include "content/gpu/gpu_process.h" +#include "content/port/browser/render_widget_host_view_frame_subscriber.h" +#include "content/public/browser/browser_thread.h" +#include "content/public/browser/content_browser_client.h" +#include "content/public/browser/render_process_host.h" +#include "content/public/browser/render_widget_host_view.h" +#include "content/public/common/content_client.h" +#include "content/public/common/content_switches.h" +#include "content/public/common/result_codes.h" +#include "gpu/command_buffer/service/gpu_switches.h" +#include "ipc/ipc_channel_handle.h" +#include "ipc/ipc_switches.h" +#include "ui/base/latency_info.h" +#include "ui/gl/gl_switches.h" + + +#if defined(OS_WIN) +#include "base/win/windows_version.h" +#include "content/common/sandbox_win.h" +#include "content/public/common/sandboxed_process_launcher_delegate.h" +#include "sandbox/win/src/sandbox_policy.h" +#include "ui/surface/accelerated_surface_win.h" +#endif + +namespace content { + +bool GpuProcessHost::gpu_enabled_ = true; +bool GpuProcessHost::hardware_gpu_enabled_ = true; + +namespace { + +enum GPUProcessLifetimeEvent { + LAUNCHED, + DIED_FIRST_TIME, + DIED_SECOND_TIME, + DIED_THIRD_TIME, + DIED_FOURTH_TIME, + GPU_PROCESS_LIFETIME_EVENT_MAX = 100 +}; + +// Indexed by GpuProcessKind. There is one of each kind maximum. This array may +// only be accessed from the IO thread. +GpuProcessHost* g_gpu_process_hosts[GpuProcessHost::GPU_PROCESS_KIND_COUNT]; + + +void SendGpuProcessMessage(GpuProcessHost::GpuProcessKind kind, + CauseForGpuLaunch cause, + IPC::Message* message) { + GpuProcessHost* host = GpuProcessHost::Get(kind, cause); + if (host) { + host->Send(message); + } else { + delete message; + } +} + +void AcceleratedSurfaceBuffersSwappedCompletedForGPU(int host_id, + int route_id, + bool alive) { + if (!BrowserThread::CurrentlyOn(BrowserThread::IO)) { + BrowserThread::PostTask( + BrowserThread::IO, + FROM_HERE, + base::Bind(&AcceleratedSurfaceBuffersSwappedCompletedForGPU, + host_id, + route_id, + alive)); + return; + } + + GpuProcessHost* host = GpuProcessHost::FromID(host_id); + if (host) { + if (alive) { + AcceleratedSurfaceMsg_BufferPresented_Params ack_params; + ack_params.sync_point = 0; + host->Send( + new AcceleratedSurfaceMsg_BufferPresented(route_id, ack_params)); + } else { + host->ForceShutdown(); + } + } +} + +#if defined(OS_WIN) +// This sends a ViewMsg_SwapBuffers_ACK directly to the renderer process +// (RenderWidget). +void AcceleratedSurfaceBuffersSwappedCompletedForRenderer( + int surface_id, + base::TimeTicks timebase, + base::TimeDelta interval, + const ui::LatencyInfo& latency_info) { + if (!BrowserThread::CurrentlyOn(BrowserThread::UI)) { + BrowserThread::PostTask( + BrowserThread::UI, + FROM_HERE, + base::Bind(&AcceleratedSurfaceBuffersSwappedCompletedForRenderer, + surface_id, timebase, interval, latency_info)); + return; + } + + int render_process_id = 0; + int render_widget_id = 0; + if (!GpuSurfaceTracker::Get()->GetRenderWidgetIDForSurface( + surface_id, &render_process_id, &render_widget_id)) { + RenderWidgetHostImpl::CompositorFrameDrawn(latency_info); + return; + } + RenderWidgetHost* rwh = + RenderWidgetHost::FromID(render_process_id, render_widget_id); + if (!rwh) + return; + RenderWidgetHostImpl::From(rwh)->AcknowledgeSwapBuffersToRenderer(); + if (interval != base::TimeDelta()) + RenderWidgetHostImpl::From(rwh)->UpdateVSyncParameters(timebase, interval); + RenderWidgetHostImpl::From(rwh)->FrameSwapped(latency_info); + RenderWidgetHostImpl::From(rwh)->DidReceiveRendererFrame(); +} + +void AcceleratedSurfaceBuffersSwappedCompleted( + int host_id, + int route_id, + int surface_id, + bool alive, + base::TimeTicks timebase, + base::TimeDelta interval, + const ui::LatencyInfo& latency_info) { + AcceleratedSurfaceBuffersSwappedCompletedForGPU(host_id, route_id, + alive); + AcceleratedSurfaceBuffersSwappedCompletedForRenderer(surface_id, timebase, + interval, latency_info); +} + +// NOTE: changes to this class need to be reviewed by the security team. +class GpuSandboxedProcessLauncherDelegate + : public SandboxedProcessLauncherDelegate { + public: + explicit GpuSandboxedProcessLauncherDelegate(CommandLine* cmd_line) + : cmd_line_(cmd_line) {} + virtual ~GpuSandboxedProcessLauncherDelegate() {} + + virtual void ShouldSandbox(bool* in_sandbox) OVERRIDE { + if (cmd_line_->HasSwitch(switches::kDisableGpuSandbox)) { + *in_sandbox = false; + DVLOG(1) << "GPU sandbox is disabled"; + } + } + + virtual void PreSandbox(bool* disable_default_policy, + base::FilePath* exposed_dir) OVERRIDE { + *disable_default_policy = true; + } + + // For the GPU process we gotten as far as USER_LIMITED. The next level + // which is USER_RESTRICTED breaks both the DirectX backend and the OpenGL + // backend. Note that the GPU process is connected to the interactive + // desktop. + virtual void PreSpawnTarget(sandbox::TargetPolicy* policy, + bool* success) { + if (base::win::GetVersion() > base::win::VERSION_XP) { + if (cmd_line_->GetSwitchValueASCII(switches::kUseGL) == + gfx::kGLImplementationDesktopName) { + // Open GL path. + policy->SetTokenLevel(sandbox::USER_RESTRICTED_SAME_ACCESS, + sandbox::USER_LIMITED); + SetJobLevel(*cmd_line_, sandbox::JOB_UNPROTECTED, 0, policy); + policy->SetDelayedIntegrityLevel(sandbox::INTEGRITY_LEVEL_LOW); + } else { + if (cmd_line_->GetSwitchValueASCII(switches::kUseGL) == + gfx::kGLImplementationSwiftShaderName || + cmd_line_->HasSwitch(switches::kReduceGpuSandbox) || + cmd_line_->HasSwitch(switches::kDisableImageTransportSurface)) { + // Swiftshader path. + policy->SetTokenLevel(sandbox::USER_RESTRICTED_SAME_ACCESS, + sandbox::USER_LIMITED); + } else { + // Angle + DirectX path. + policy->SetTokenLevel(sandbox::USER_RESTRICTED_SAME_ACCESS, + sandbox::USER_RESTRICTED); + // This is a trick to keep the GPU out of low-integrity processes. It + // starts at low-integrity for UIPI to work, then drops below + // low-integrity after warm-up. + policy->SetDelayedIntegrityLevel(sandbox::INTEGRITY_LEVEL_UNTRUSTED); + } + + // UI restrictions break when we access Windows from outside our job. + // However, we don't want a proxy window in this process because it can + // introduce deadlocks where the renderer blocks on the gpu, which in + // turn blocks on the browser UI thread. So, instead we forgo a window + // message pump entirely and just add job restrictions to prevent child + // processes. + SetJobLevel(*cmd_line_, + sandbox::JOB_LIMITED_USER, + JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS | + JOB_OBJECT_UILIMIT_DESKTOP | + JOB_OBJECT_UILIMIT_EXITWINDOWS | + JOB_OBJECT_UILIMIT_DISPLAYSETTINGS, + policy); + + policy->SetIntegrityLevel(sandbox::INTEGRITY_LEVEL_LOW); + } + } else { + SetJobLevel(*cmd_line_, sandbox::JOB_UNPROTECTED, 0, policy); + policy->SetTokenLevel(sandbox::USER_UNPROTECTED, + sandbox::USER_LIMITED); + } + + // Allow the server side of GPU sockets, which are pipes that have + // the "chrome.gpu" namespace and an arbitrary suffix. + sandbox::ResultCode result = policy->AddRule( + sandbox::TargetPolicy::SUBSYS_NAMED_PIPES, + sandbox::TargetPolicy::NAMEDPIPES_ALLOW_ANY, + L"\\\\.\\pipe\\chrome.gpu.*"); + if (result != sandbox::SBOX_ALL_OK) { + *success = false; + return; + } + + // Block this DLL even if it is not loaded by the browser process. + policy->AddDllToUnload(L"cmsetac.dll"); + +#ifdef USE_AURA + // GPU also needs to add sections to the browser for aura + // TODO(jschuh): refactor the GPU channel to remove this. crbug.com/128786 + result = policy->AddRule(sandbox::TargetPolicy::SUBSYS_HANDLES, + sandbox::TargetPolicy::HANDLES_DUP_BROKER, + L"Section"); + if (result != sandbox::SBOX_ALL_OK) { + *success = false; + return; + } +#endif + + if (cmd_line_->HasSwitch(switches::kEnableLogging)) { + string16 log_file_path = logging::GetLogFileFullPath(); + if (!log_file_path.empty()) { + result = policy->AddRule(sandbox::TargetPolicy::SUBSYS_FILES, + sandbox::TargetPolicy::FILES_ALLOW_ANY, + log_file_path.c_str()); + if (result != sandbox::SBOX_ALL_OK) { + *success = false; + return; + } + } + } + } + + private: + CommandLine* cmd_line_; +}; +#endif // defined(OS_WIN) + +} // anonymous namespace + +// Single process not supported in multiple dll mode currently. +#if !defined(CHROME_MULTIPLE_DLL) +// This class creates a GPU thread (instead of a GPU process), when running +// with --in-process-gpu or --single-process. +class GpuMainThread : public base::Thread { + public: + explicit GpuMainThread(const std::string& channel_id) + : base::Thread("Chrome_InProcGpuThread"), + channel_id_(channel_id), + gpu_process_(NULL) { + } + + virtual ~GpuMainThread() { + Stop(); + } + + protected: + virtual void Init() OVERRIDE { + gpu_process_ = new GpuProcess(); + // The process object takes ownership of the thread object, so do not + // save and delete the pointer. + gpu_process_->set_main_thread(new GpuChildThread(channel_id_)); + } + + virtual void CleanUp() OVERRIDE { + delete gpu_process_; + } + + private: + std::string channel_id_; + // Deleted in CleanUp() on the gpu thread, so don't use smart pointers. + GpuProcess* gpu_process_; + + DISALLOW_COPY_AND_ASSIGN(GpuMainThread); +}; +#endif // !CHROME_MULTIPLE_DLL + +// static +bool GpuProcessHost::ValidateHost(GpuProcessHost* host) { + if (!host) + return false; + + // The Gpu process is invalid if it's not using SwiftShader, the card is + // blacklisted, and we can kill it and start over. + if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kSingleProcess) || + CommandLine::ForCurrentProcess()->HasSwitch(switches::kInProcessGPU) || + (host->valid_ && + (host->swiftshader_rendering_ || + !GpuDataManagerImpl::GetInstance()->ShouldUseSwiftShader()))) { + return true; + } + + host->ForceShutdown(); + return false; +} + +// static +GpuProcessHost* GpuProcessHost::Get(GpuProcessKind kind, + CauseForGpuLaunch cause) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + + // Don't grant further access to GPU if it is not allowed. + GpuDataManagerImpl* gpu_data_manager = GpuDataManagerImpl::GetInstance(); + DCHECK(gpu_data_manager); + if (!gpu_data_manager->GpuAccessAllowed(NULL)) + return NULL; + + if (g_gpu_process_hosts[kind] && ValidateHost(g_gpu_process_hosts[kind])) + return g_gpu_process_hosts[kind]; + + if (cause == CAUSE_FOR_GPU_LAUNCH_NO_LAUNCH) + return NULL; + + static int last_host_id = 0; + int host_id; + host_id = ++last_host_id; + + UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLaunchCause", + cause, + CAUSE_FOR_GPU_LAUNCH_MAX_ENUM); + + GpuProcessHost* host = new GpuProcessHost(host_id, kind); + if (host->Init()) + return host; + + delete host; + return NULL; +} + +// static +void GpuProcessHost::GetProcessHandles( + const GpuDataManager::GetGpuProcessHandlesCallback& callback) { + if (!BrowserThread::CurrentlyOn(BrowserThread::IO)) { + BrowserThread::PostTask( + BrowserThread::IO, + FROM_HERE, + base::Bind(&GpuProcessHost::GetProcessHandles, callback)); + return; + } + std::list<base::ProcessHandle> handles; + for (size_t i = 0; i < arraysize(g_gpu_process_hosts); ++i) { + GpuProcessHost* host = g_gpu_process_hosts[i]; + if (host && ValidateHost(host)) + handles.push_back(host->process_->GetHandle()); + } + BrowserThread::PostTask( + BrowserThread::UI, + FROM_HERE, + base::Bind(callback, handles)); +} + +// static +void GpuProcessHost::SendOnIO(GpuProcessKind kind, + CauseForGpuLaunch cause, + IPC::Message* message) { + if (!BrowserThread::PostTask( + BrowserThread::IO, FROM_HERE, + base::Bind( + &SendGpuProcessMessage, kind, cause, message))) { + delete message; + } +} + +// static +GpuProcessHost* GpuProcessHost::FromID(int host_id) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + + for (int i = 0; i < GPU_PROCESS_KIND_COUNT; ++i) { + GpuProcessHost* host = g_gpu_process_hosts[i]; + if (host && host->host_id_ == host_id && ValidateHost(host)) + return host; + } + + return NULL; +} + +GpuProcessHost::GpuProcessHost(int host_id, GpuProcessKind kind) + : host_id_(host_id), + valid_(true), + in_process_(false), + swiftshader_rendering_(false), + kind_(kind), + process_launched_(false), + initialized_(false), + uma_memory_stats_received_(false) { + if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kSingleProcess) || + CommandLine::ForCurrentProcess()->HasSwitch(switches::kInProcessGPU)) { + in_process_ = true; + } + + // If the 'single GPU process' policy ever changes, we still want to maintain + // it for 'gpu thread' mode and only create one instance of host and thread. + DCHECK(!in_process_ || g_gpu_process_hosts[kind] == NULL); + + g_gpu_process_hosts[kind] = this; + + // Post a task to create the corresponding GpuProcessHostUIShim. The + // GpuProcessHostUIShim will be destroyed if either the browser exits, + // in which case it calls GpuProcessHostUIShim::DestroyAll, or the + // GpuProcessHost is destroyed, which happens when the corresponding GPU + // process terminates or fails to launch. + BrowserThread::PostTask( + BrowserThread::UI, + FROM_HERE, + base::Bind(base::IgnoreResult(&GpuProcessHostUIShim::Create), host_id)); + + process_.reset(new BrowserChildProcessHostImpl(PROCESS_TYPE_GPU, this)); +} + +GpuProcessHost::~GpuProcessHost() { + DCHECK(CalledOnValidThread()); + + SendOutstandingReplies(); + + // Maximum number of times the gpu process is allowed to crash in a session. + // Once this limit is reached, any request to launch the gpu process will + // fail. + const int kGpuMaxCrashCount = 3; + + // Number of times the gpu process has crashed in the current browser session. + static int gpu_crash_count = 0; + static int gpu_recent_crash_count = 0; + static base::Time last_gpu_crash_time; + static bool crashed_before = false; + static int swiftshader_crash_count = 0; + + // Ending only acts as a failure if the GPU process was actually started and + // was intended for actual rendering (and not just checking caps or other + // options). + if (process_launched_ && kind_ == GPU_PROCESS_KIND_SANDBOXED) { + if (swiftshader_rendering_) { + UMA_HISTOGRAM_ENUMERATION("GPU.SwiftShaderLifetimeEvents", + DIED_FIRST_TIME + swiftshader_crash_count, + GPU_PROCESS_LIFETIME_EVENT_MAX); + + if (++swiftshader_crash_count >= kGpuMaxCrashCount) { + // SwiftShader is too unstable to use. Disable it for current session. + gpu_enabled_ = false; + } + } else { + ++gpu_crash_count; + UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLifetimeEvents", + std::min(DIED_FIRST_TIME + gpu_crash_count, + GPU_PROCESS_LIFETIME_EVENT_MAX - 1), + GPU_PROCESS_LIFETIME_EVENT_MAX); + + // Allow about 1 GPU crash per hour to be removed from the crash count, + // so very occasional crashes won't eventually add up and prevent the + // GPU process from launching. + ++gpu_recent_crash_count; + base::Time current_time = base::Time::Now(); + if (crashed_before) { + int hours_different = (current_time - last_gpu_crash_time).InHours(); + gpu_recent_crash_count = + std::max(0, gpu_recent_crash_count - hours_different); + } + + crashed_before = true; + last_gpu_crash_time = current_time; + + if (gpu_recent_crash_count >= kGpuMaxCrashCount || + !initialized_) { +#if !defined(OS_CHROMEOS) + // The gpu process is too unstable to use. Disable it for current + // session. + hardware_gpu_enabled_ = false; + GpuDataManagerImpl::GetInstance()->DisableHardwareAcceleration(); +#endif + } + } + } + + // In case we never started, clean up. + while (!queued_messages_.empty()) { + delete queued_messages_.front(); + queued_messages_.pop(); + } + + // This is only called on the IO thread so no race against the constructor + // for another GpuProcessHost. + if (g_gpu_process_hosts[kind_] == this) + g_gpu_process_hosts[kind_] = NULL; + + // If there are any remaining offscreen contexts at the point the + // GPU process exits, assume something went wrong, and block their + // URLs from accessing client 3D APIs without prompting. + BlockLiveOffscreenContexts(); + + UMA_HISTOGRAM_COUNTS_100("GPU.AtExitSurfaceCount", + GpuSurfaceTracker::Get()->GetSurfaceCount()); + UMA_HISTOGRAM_BOOLEAN("GPU.AtExitReceivedMemoryStats", + uma_memory_stats_received_); + + if (uma_memory_stats_received_) { + UMA_HISTOGRAM_COUNTS_100("GPU.AtExitManagedMemoryClientCount", + uma_memory_stats_.client_count); + UMA_HISTOGRAM_COUNTS_100("GPU.AtExitContextGroupCount", + uma_memory_stats_.context_group_count); + UMA_HISTOGRAM_CUSTOM_COUNTS( + "GPU.AtExitMBytesAllocated", + uma_memory_stats_.bytes_allocated_current / 1024 / 1024, 1, 2000, 50); + UMA_HISTOGRAM_CUSTOM_COUNTS( + "GPU.AtExitMBytesAllocatedMax", + uma_memory_stats_.bytes_allocated_max / 1024 / 1024, 1, 2000, 50); + UMA_HISTOGRAM_CUSTOM_COUNTS( + "GPU.AtExitMBytesLimit", + uma_memory_stats_.bytes_limit / 1024 / 1024, 1, 2000, 50); + } + + std::string message; + if (!in_process_) { + int exit_code; + base::TerminationStatus status = process_->GetTerminationStatus(&exit_code); + UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessTerminationStatus", + status, + base::TERMINATION_STATUS_MAX_ENUM); + + if (status == base::TERMINATION_STATUS_NORMAL_TERMINATION || + status == base::TERMINATION_STATUS_ABNORMAL_TERMINATION) { + UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessExitCode", + exit_code, + RESULT_CODE_LAST_CODE); + } + + switch (status) { + case base::TERMINATION_STATUS_NORMAL_TERMINATION: + message = "The GPU process exited normally. Everything is okay."; + break; + case base::TERMINATION_STATUS_ABNORMAL_TERMINATION: + message = base::StringPrintf( + "The GPU process exited with code %d.", + exit_code); + break; + case base::TERMINATION_STATUS_PROCESS_WAS_KILLED: + message = "You killed the GPU process! Why?"; + break; + case base::TERMINATION_STATUS_PROCESS_CRASHED: + message = "The GPU process crashed!"; + break; + default: + break; + } + } + + BrowserThread::PostTask(BrowserThread::UI, + FROM_HERE, + base::Bind(&GpuProcessHostUIShim::Destroy, + host_id_, + message)); +} + +bool GpuProcessHost::Init() { + init_start_time_ = base::TimeTicks::Now(); + + TRACE_EVENT_INSTANT0("gpu", "LaunchGpuProcess", TRACE_EVENT_SCOPE_THREAD); + + std::string channel_id = process_->GetHost()->CreateChannel(); + if (channel_id.empty()) + return false; + + // Single process not supported in multiple dll mode currently. +#if !defined(CHROME_MULTIPLE_DLL) + if (in_process_) { + CommandLine::ForCurrentProcess()->AppendSwitch( + switches::kDisableGpuWatchdog); + + in_process_gpu_thread_.reset(new GpuMainThread(channel_id)); + in_process_gpu_thread_->Start(); + + OnProcessLaunched(); // Fake a callback that the process is ready. + } else +#endif // !CHROME_MULTIPLE_DLL + if (!LaunchGpuProcess(channel_id)) { + return false; + } + + if (!Send(new GpuMsg_Initialize())) + return false; + + return true; +} + +void GpuProcessHost::RouteOnUIThread(const IPC::Message& message) { + BrowserThread::PostTask( + BrowserThread::UI, + FROM_HERE, + base::Bind(&RouteToGpuProcessHostUIShimTask, host_id_, message)); +} + +bool GpuProcessHost::Send(IPC::Message* msg) { + DCHECK(CalledOnValidThread()); + if (process_->GetHost()->IsChannelOpening()) { + queued_messages_.push(msg); + return true; + } + + bool result = process_->Send(msg); + if (!result) { + // Channel is hosed, but we may not get destroyed for a while. Send + // outstanding channel creation failures now so that the caller can restart + // with a new process/channel without waiting. + SendOutstandingReplies(); + } + return result; +} + +void GpuProcessHost::AddFilter(IPC::ChannelProxy::MessageFilter* filter) { + DCHECK(CalledOnValidThread()); + process_->GetHost()->AddFilter(filter); +} + +bool GpuProcessHost::OnMessageReceived(const IPC::Message& message) { + DCHECK(CalledOnValidThread()); + IPC_BEGIN_MESSAGE_MAP(GpuProcessHost, message) + IPC_MESSAGE_HANDLER(GpuHostMsg_Initialized, OnInitialized) + IPC_MESSAGE_HANDLER(GpuHostMsg_ChannelEstablished, OnChannelEstablished) + IPC_MESSAGE_HANDLER(GpuHostMsg_CommandBufferCreated, OnCommandBufferCreated) + IPC_MESSAGE_HANDLER(GpuHostMsg_DestroyCommandBuffer, OnDestroyCommandBuffer) + IPC_MESSAGE_HANDLER(GpuHostMsg_ImageCreated, OnImageCreated) + IPC_MESSAGE_HANDLER(GpuHostMsg_DidCreateOffscreenContext, + OnDidCreateOffscreenContext) + IPC_MESSAGE_HANDLER(GpuHostMsg_DidLoseContext, OnDidLoseContext) + IPC_MESSAGE_HANDLER(GpuHostMsg_DidDestroyOffscreenContext, + OnDidDestroyOffscreenContext) + IPC_MESSAGE_HANDLER(GpuHostMsg_GpuMemoryUmaStats, + OnGpuMemoryUmaStatsReceived) +#if defined(OS_MACOSX) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceBuffersSwapped, + OnAcceleratedSurfaceBuffersSwapped) +#endif +#if defined(OS_WIN) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceBuffersSwapped, + OnAcceleratedSurfaceBuffersSwapped) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfacePostSubBuffer, + OnAcceleratedSurfacePostSubBuffer) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceSuspend, + OnAcceleratedSurfaceSuspend) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceRelease, + OnAcceleratedSurfaceRelease) +#endif + IPC_MESSAGE_HANDLER(GpuHostMsg_DestroyChannel, + OnDestroyChannel) + IPC_MESSAGE_HANDLER(GpuHostMsg_CacheShader, + OnCacheShader) + + IPC_MESSAGE_UNHANDLED(RouteOnUIThread(message)) + IPC_END_MESSAGE_MAP() + + return true; +} + +void GpuProcessHost::OnChannelConnected(int32 peer_pid) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnChannelConnected"); + + while (!queued_messages_.empty()) { + Send(queued_messages_.front()); + queued_messages_.pop(); + } +} + +void GpuProcessHost::EstablishGpuChannel( + int client_id, + bool share_context, + const EstablishChannelCallback& callback) { + DCHECK(CalledOnValidThread()); + TRACE_EVENT0("gpu", "GpuProcessHost::EstablishGpuChannel"); + + // If GPU features are already blacklisted, no need to establish the channel. + if (!GpuDataManagerImpl::GetInstance()->GpuAccessAllowed(NULL)) { + callback.Run(IPC::ChannelHandle(), gpu::GPUInfo()); + return; + } + + if (Send(new GpuMsg_EstablishChannel(client_id, share_context))) { + channel_requests_.push(callback); + } else { + callback.Run(IPC::ChannelHandle(), gpu::GPUInfo()); + } + + if (!CommandLine::ForCurrentProcess()->HasSwitch( + switches::kDisableGpuShaderDiskCache)) { + CreateChannelCache(client_id); + } +} + +void GpuProcessHost::CreateViewCommandBuffer( + const gfx::GLSurfaceHandle& compositing_surface, + int surface_id, + int client_id, + const GPUCreateCommandBufferConfig& init_params, + const CreateCommandBufferCallback& callback) { + TRACE_EVENT0("gpu", "GpuProcessHost::CreateViewCommandBuffer"); + + DCHECK(CalledOnValidThread()); + + if (!compositing_surface.is_null() && + Send(new GpuMsg_CreateViewCommandBuffer( + compositing_surface, surface_id, client_id, init_params))) { + create_command_buffer_requests_.push(callback); + surface_refs_.insert(std::make_pair(surface_id, + GpuSurfaceTracker::GetInstance()->GetSurfaceRefForSurface(surface_id))); + } else { + callback.Run(MSG_ROUTING_NONE); + } +} + +void GpuProcessHost::CreateImage(gfx::PluginWindowHandle window, + int client_id, + int image_id, + const CreateImageCallback& callback) { + TRACE_EVENT0("gpu", "GpuProcessHost::CreateImage"); + + DCHECK(CalledOnValidThread()); + + if (Send(new GpuMsg_CreateImage(window, client_id, image_id))) { + create_image_requests_.push(callback); + } else { + callback.Run(gfx::Size()); + } +} + +void GpuProcessHost::DeleteImage(int client_id, + int image_id, + int sync_point) { + TRACE_EVENT0("gpu", "GpuProcessHost::DeleteImage"); + + DCHECK(CalledOnValidThread()); + + Send(new GpuMsg_DeleteImage(client_id, image_id, sync_point)); +} + +void GpuProcessHost::OnInitialized(bool result, const gpu::GPUInfo& gpu_info) { + UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessInitialized", result); + initialized_ = result; + +#if defined(OS_WIN) + if (kind_ == GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED) + AcceleratedPresenter::SetAdapterLUID(gpu_info.adapter_luid); +#endif + + if (!initialized_) + GpuDataManagerImpl::GetInstance()->OnGpuProcessInitFailure(); +} + +void GpuProcessHost::OnChannelEstablished( + const IPC::ChannelHandle& channel_handle) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnChannelEstablished"); + + if (channel_requests_.empty()) { + // This happens when GPU process is compromised. + RouteOnUIThread(GpuHostMsg_OnLogMessage( + logging::LOG_WARNING, + "WARNING", + "Received a ChannelEstablished message but no requests in queue.")); + return; + } + EstablishChannelCallback callback = channel_requests_.front(); + channel_requests_.pop(); + + // Currently if any of the GPU features are blacklisted, we don't establish a + // GPU channel. + if (!channel_handle.name.empty() && + !GpuDataManagerImpl::GetInstance()->GpuAccessAllowed(NULL)) { + Send(new GpuMsg_CloseChannel(channel_handle)); + callback.Run(IPC::ChannelHandle(), gpu::GPUInfo()); + RouteOnUIThread(GpuHostMsg_OnLogMessage( + logging::LOG_WARNING, + "WARNING", + "Hardware acceleration is unavailable.")); + return; + } + + callback.Run(channel_handle, + GpuDataManagerImpl::GetInstance()->GetGPUInfo()); +} + +void GpuProcessHost::OnCommandBufferCreated(const int32 route_id) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnCommandBufferCreated"); + + if (create_command_buffer_requests_.empty()) + return; + + CreateCommandBufferCallback callback = + create_command_buffer_requests_.front(); + create_command_buffer_requests_.pop(); + callback.Run(route_id); +} + +void GpuProcessHost::OnDestroyCommandBuffer(int32 surface_id) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnDestroyCommandBuffer"); + SurfaceRefMap::iterator it = surface_refs_.find(surface_id); + if (it != surface_refs_.end()) { + surface_refs_.erase(it); + } +} + +void GpuProcessHost::OnImageCreated(const gfx::Size size) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnImageCreated"); + + if (create_image_requests_.empty()) + return; + + CreateImageCallback callback = create_image_requests_.front(); + create_image_requests_.pop(); + callback.Run(size); +} + +void GpuProcessHost::OnDidCreateOffscreenContext(const GURL& url) { + urls_with_live_offscreen_contexts_.insert(url); +} + +void GpuProcessHost::OnDidLoseContext(bool offscreen, + gpu::error::ContextLostReason reason, + const GURL& url) { + // TODO(kbr): would be nice to see the "offscreen" flag too. + TRACE_EVENT2("gpu", "GpuProcessHost::OnDidLoseContext", + "reason", reason, + "url", + url.possibly_invalid_spec()); + + if (!offscreen || url.is_empty()) { + // Assume that the loss of the compositor's or accelerated canvas' + // context is a serious event and blame the loss on all live + // offscreen contexts. This more robustly handles situations where + // the GPU process may not actually detect the context loss in the + // offscreen context. + BlockLiveOffscreenContexts(); + return; + } + + GpuDataManagerImpl::DomainGuilt guilt; + switch (reason) { + case gpu::error::kGuilty: + guilt = GpuDataManagerImpl::DOMAIN_GUILT_KNOWN; + break; + case gpu::error::kUnknown: + guilt = GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN; + break; + case gpu::error::kInnocent: + return; + default: + NOTREACHED(); + return; + } + + GpuDataManagerImpl::GetInstance()->BlockDomainFrom3DAPIs(url, guilt); +} + +void GpuProcessHost::OnDidDestroyOffscreenContext(const GURL& url) { + urls_with_live_offscreen_contexts_.erase(url); +} + +void GpuProcessHost::OnGpuMemoryUmaStatsReceived( + const GPUMemoryUmaStats& stats) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnGpuMemoryUmaStatsReceived"); + uma_memory_stats_received_ = true; + uma_memory_stats_ = stats; +} + +#if defined(OS_MACOSX) +void GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped( + const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped"); + + gfx::GLSurfaceHandle surface_handle = + GpuSurfaceTracker::Get()->GetSurfaceHandle(params.surface_id); + // Compositor window is always gfx::kNullPluginWindow. + // TODO(jbates) http://crbug.com/105344 This will be removed when there are no + // plugin windows. + if (surface_handle.handle != gfx::kNullPluginWindow || + surface_handle.transport_type == gfx::TEXTURE_TRANSPORT) { + RouteOnUIThread(GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params)); + return; + } + + base::ScopedClosureRunner scoped_completion_runner( + base::Bind(&AcceleratedSurfaceBuffersSwappedCompletedForGPU, + host_id_, params.route_id, + true /* alive */)); + + int render_process_id = 0; + int render_widget_id = 0; + if (!GpuSurfaceTracker::Get()->GetRenderWidgetIDForSurface( + params.surface_id, &render_process_id, &render_widget_id)) { + return; + } + RenderWidgetHelper* helper = + RenderWidgetHelper::FromProcessHostID(render_process_id); + if (!helper) + return; + + // Pass the SwapBuffers on to the RenderWidgetHelper to wake up the UI thread + // if the browser is waiting for a new frame. Otherwise the RenderWidgetHelper + // will forward to the RenderWidgetHostView via RenderProcessHostImpl and + // RenderWidgetHostImpl. + scoped_completion_runner.Release(); + + ViewHostMsg_CompositorSurfaceBuffersSwapped_Params view_params; + view_params.surface_id = params.surface_id; + view_params.surface_handle = params.surface_handle; + view_params.route_id = params.route_id; + view_params.size = params.size; + view_params.scale_factor = params.scale_factor; + view_params.gpu_process_host_id = host_id_; + view_params.latency_info = params.latency_info; + helper->DidReceiveBackingStoreMsg(ViewHostMsg_CompositorSurfaceBuffersSwapped( + render_widget_id, + view_params)); +} +#endif // OS_MACOSX + +#if defined(OS_WIN) +void GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped( + const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped"); + + base::ScopedClosureRunner scoped_completion_runner( + base::Bind(&AcceleratedSurfaceBuffersSwappedCompleted, + host_id_, params.route_id, params.surface_id, + true, base::TimeTicks(), base::TimeDelta(), ui::LatencyInfo())); + + gfx::GLSurfaceHandle handle = + GpuSurfaceTracker::Get()->GetSurfaceHandle(params.surface_id); + + if (handle.is_null()) + return; + + if (handle.transport_type == gfx::TEXTURE_TRANSPORT) { + TRACE_EVENT1("gpu", "SurfaceIDNotFound_RoutingToUI", + "surface_id", params.surface_id); + // This is a content area swap, send it on to the UI thread. + scoped_completion_runner.Release(); + RouteOnUIThread(GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params)); + return; + } + + // Otherwise it's the UI swap. + + scoped_refptr<AcceleratedPresenter> presenter( + AcceleratedPresenter::GetForWindow(handle.handle)); + if (!presenter) { + TRACE_EVENT1("gpu", + "EarlyOut_NativeWindowNotFound", + "handle", + handle.handle); + scoped_completion_runner.Release(); + AcceleratedSurfaceBuffersSwappedCompleted(host_id_, + params.route_id, + params.surface_id, + true, + base::TimeTicks(), + base::TimeDelta(), + params.latency_info); + return; + } + + scoped_completion_runner.Release(); + presenter->AsyncPresentAndAcknowledge( + params.size, + params.surface_handle, + params.latency_info, + base::Bind(&AcceleratedSurfaceBuffersSwappedCompleted, + host_id_, + params.route_id, + params.surface_id)); + + FrameSubscriberMap::iterator it = frame_subscribers_.find(params.surface_id); + if (it != frame_subscribers_.end() && it->second) { + const base::Time present_time = base::Time::Now(); + scoped_refptr<media::VideoFrame> target_frame; + RenderWidgetHostViewFrameSubscriber::DeliverFrameCallback copy_callback; + if (it->second->ShouldCaptureFrame(present_time, + &target_frame, ©_callback)) { + // It is a potential improvement to do the copy in present, but we use a + // simpler approach for now. + presenter->AsyncCopyToVideoFrame( + gfx::Rect(params.size), target_frame, + base::Bind(copy_callback, present_time)); + } + } +} + +void GpuProcessHost::OnAcceleratedSurfacePostSubBuffer( + const GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params& params) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnAcceleratedSurfacePostSubBuffer"); + + NOTIMPLEMENTED(); +} + +void GpuProcessHost::OnAcceleratedSurfaceSuspend(int32 surface_id) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnAcceleratedSurfaceSuspend"); + + gfx::PluginWindowHandle handle = + GpuSurfaceTracker::Get()->GetSurfaceHandle(surface_id).handle; + + if (!handle) { +#if defined(USE_AURA) + RouteOnUIThread(GpuHostMsg_AcceleratedSurfaceSuspend(surface_id)); +#endif + return; + } + + scoped_refptr<AcceleratedPresenter> presenter( + AcceleratedPresenter::GetForWindow(handle)); + if (!presenter) + return; + + presenter->Suspend(); +} + +void GpuProcessHost::OnAcceleratedSurfaceRelease( + const GpuHostMsg_AcceleratedSurfaceRelease_Params& params) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnAcceleratedSurfaceRelease"); + + gfx::PluginWindowHandle handle = + GpuSurfaceTracker::Get()->GetSurfaceHandle(params.surface_id).handle; + if (!handle) { +#if defined(USE_AURA) + RouteOnUIThread(GpuHostMsg_AcceleratedSurfaceRelease(params)); + return; +#endif + } + + scoped_refptr<AcceleratedPresenter> presenter( + AcceleratedPresenter::GetForWindow(handle)); + if (!presenter) + return; + + presenter->ReleaseSurface(); +} + +#endif // OS_WIN + +void GpuProcessHost::OnProcessLaunched() { + UMA_HISTOGRAM_TIMES("GPU.GPUProcessLaunchTime", + base::TimeTicks::Now() - init_start_time_); +} + +void GpuProcessHost::OnProcessCrashed(int exit_code) { + SendOutstandingReplies(); + GpuDataManagerImpl::GetInstance()->ProcessCrashed( + process_->GetTerminationStatus(NULL)); +} + +GpuProcessHost::GpuProcessKind GpuProcessHost::kind() { + return kind_; +} + +void GpuProcessHost::ForceShutdown() { + // This is only called on the IO thread so no race against the constructor + // for another GpuProcessHost. + if (g_gpu_process_hosts[kind_] == this) + g_gpu_process_hosts[kind_] = NULL; + + process_->ForceShutdown(); +} + +void GpuProcessHost::BeginFrameSubscription( + int surface_id, + base::WeakPtr<RenderWidgetHostViewFrameSubscriber> subscriber) { + frame_subscribers_[surface_id] = subscriber; +} + +void GpuProcessHost::EndFrameSubscription(int surface_id) { + frame_subscribers_.erase(surface_id); +} + +bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) { + if (!(gpu_enabled_ && + GpuDataManagerImpl::GetInstance()->ShouldUseSwiftShader()) && + !hardware_gpu_enabled_) { + SendOutstandingReplies(); + return false; + } + + const CommandLine& browser_command_line = *CommandLine::ForCurrentProcess(); + + CommandLine::StringType gpu_launcher = + browser_command_line.GetSwitchValueNative(switches::kGpuLauncher); + +#if defined(OS_LINUX) + int child_flags = gpu_launcher.empty() ? ChildProcessHost::CHILD_ALLOW_SELF : + ChildProcessHost::CHILD_NORMAL; +#else + int child_flags = ChildProcessHost::CHILD_NORMAL; +#endif + + base::FilePath exe_path = ChildProcessHost::GetChildPath(child_flags); + if (exe_path.empty()) + return false; + + CommandLine* cmd_line = new CommandLine(exe_path); + cmd_line->AppendSwitchASCII(switches::kProcessType, switches::kGpuProcess); + cmd_line->AppendSwitchASCII(switches::kProcessChannelID, channel_id); + + if (kind_ == GPU_PROCESS_KIND_UNSANDBOXED) + cmd_line->AppendSwitch(switches::kDisableGpuSandbox); + + // Propagate relevant command line switches. + static const char* const kSwitchNames[] = { + switches::kDisableAcceleratedVideoDecode, + switches::kDisableBreakpad, + switches::kDisableGLMultisampling, + switches::kDisableGpuSandbox, + switches::kDisableGpuWatchdog, + switches::kDisableImageTransportSurface, + switches::kDisableLogging, + switches::kDisableSeccompFilterSandbox, + switches::kEnableLogging, + switches::kEnableShareGroupAsyncTextureUpload, + switches::kEnableVirtualGLContexts, + switches::kGpuStartupDialog, + switches::kGpuSandboxAllowSysVShm, + switches::kLoggingLevel, + switches::kNoSandbox, + switches::kReduceGpuSandbox, + switches::kTestGLLib, + switches::kTraceStartup, + switches::kV, + switches::kVModule, +#if defined(OS_MACOSX) + switches::kEnableSandboxLogging, +#endif +#if defined(USE_AURA) + switches::kUIPrioritizeInGpuProcess, +#endif + }; + cmd_line->CopySwitchesFrom(browser_command_line, kSwitchNames, + arraysize(kSwitchNames)); + cmd_line->CopySwitchesFrom( + browser_command_line, switches::kGpuSwitches, switches::kNumGpuSwitches); + cmd_line->CopySwitchesFrom( + browser_command_line, switches::kGLSwitchesCopiedFromGpuProcessHost, + switches::kGLSwitchesCopiedFromGpuProcessHostNumSwitches); + + GetContentClient()->browser()->AppendExtraCommandLineSwitches( + cmd_line, process_->GetData().id); + + GpuDataManagerImpl::GetInstance()->AppendGpuCommandLine(cmd_line); + + if (cmd_line->HasSwitch(switches::kUseGL)) { + swiftshader_rendering_ = + (cmd_line->GetSwitchValueASCII(switches::kUseGL) == "swiftshader"); + } + + UMA_HISTOGRAM_BOOLEAN("GPU.GPU.GPUProcessSoftwareRendering", + swiftshader_rendering_); + + // If specified, prepend a launcher program to the command line. + if (!gpu_launcher.empty()) + cmd_line->PrependWrapper(gpu_launcher); + + process_->Launch( +#if defined(OS_WIN) + new GpuSandboxedProcessLauncherDelegate(cmd_line), +#elif defined(OS_POSIX) + false, + base::EnvironmentVector(), +#endif + cmd_line); + process_launched_ = true; + + UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLifetimeEvents", + LAUNCHED, GPU_PROCESS_LIFETIME_EVENT_MAX); + return true; +} + +void GpuProcessHost::SendOutstandingReplies() { + valid_ = false; + // First send empty channel handles for all EstablishChannel requests. + while (!channel_requests_.empty()) { + EstablishChannelCallback callback = channel_requests_.front(); + channel_requests_.pop(); + callback.Run(IPC::ChannelHandle(), gpu::GPUInfo()); + } + + while (!create_command_buffer_requests_.empty()) { + CreateCommandBufferCallback callback = + create_command_buffer_requests_.front(); + create_command_buffer_requests_.pop(); + callback.Run(MSG_ROUTING_NONE); + } +} + +void GpuProcessHost::BlockLiveOffscreenContexts() { + for (std::multiset<GURL>::iterator iter = + urls_with_live_offscreen_contexts_.begin(); + iter != urls_with_live_offscreen_contexts_.end(); ++iter) { + GpuDataManagerImpl::GetInstance()->BlockDomainFrom3DAPIs( + *iter, GpuDataManagerImpl::DOMAIN_GUILT_UNKNOWN); + } +} + +std::string GpuProcessHost::GetShaderPrefixKey() { + if (shader_prefix_key_.empty()) { + gpu::GPUInfo info = GpuDataManagerImpl::GetInstance()->GetGPUInfo(); + + std::string in_str = GetContentClient()->GetProduct() + "-" + + info.gl_vendor + "-" + info.gl_renderer + "-" + + info.driver_version + "-" + info.driver_vendor; + + base::Base64Encode(base::SHA1HashString(in_str), &shader_prefix_key_); + } + + return shader_prefix_key_; +} + +void GpuProcessHost::LoadedShader(const std::string& key, + const std::string& data) { + std::string prefix = GetShaderPrefixKey(); + if (!key.compare(0, prefix.length(), prefix)) + Send(new GpuMsg_LoadedShader(data)); +} + +void GpuProcessHost::CreateChannelCache(int32 client_id) { + TRACE_EVENT0("gpu", "GpuProcessHost::CreateChannelCache"); + + scoped_refptr<ShaderDiskCache> cache = + ShaderCacheFactory::GetInstance()->Get(client_id); + if (!cache.get()) + return; + + cache->set_host_id(host_id_); + + client_id_to_shader_cache_[client_id] = cache; +} + +void GpuProcessHost::OnDestroyChannel(int32 client_id) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnDestroyChannel"); + client_id_to_shader_cache_.erase(client_id); +} + +void GpuProcessHost::OnCacheShader(int32 client_id, + const std::string& key, + const std::string& shader) { + TRACE_EVENT0("gpu", "GpuProcessHost::OnCacheShader"); + ClientIdToShaderCacheMap::iterator iter = + client_id_to_shader_cache_.find(client_id); + // If the cache doesn't exist then this is an off the record profile. + if (iter == client_id_to_shader_cache_.end()) + return; + iter->second->Cache(GetShaderPrefixKey() + ":" + key, shader); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_process_host.h b/chromium/content/browser/gpu/gpu_process_host.h new file mode 100644 index 00000000000..8908a1035db --- /dev/null +++ b/chromium/content/browser/gpu/gpu_process_host.h @@ -0,0 +1,275 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_GPU_PROCESS_HOST_H_ +#define CONTENT_BROWSER_GPU_GPU_PROCESS_HOST_H_ + +#include <map> +#include <queue> +#include <set> +#include <string> + +#include "base/callback.h" +#include "base/containers/hash_tables.h" +#include "base/memory/weak_ptr.h" +#include "base/threading/non_thread_safe.h" +#include "base/time/time.h" +#include "content/browser/gpu/gpu_surface_tracker.h" +#include "content/common/content_export.h" +#include "content/common/gpu/gpu_memory_uma_stats.h" +#include "content/common/gpu/gpu_process_launch_causes.h" +#include "content/public/browser/browser_child_process_host_delegate.h" +#include "content/public/browser/gpu_data_manager.h" +#include "gpu/command_buffer/common/constants.h" +#include "gpu/config/gpu_info.h" +#include "ipc/ipc_channel_proxy.h" +#include "ipc/ipc_sender.h" +#include "ui/gfx/native_widget_types.h" +#include "ui/gfx/size.h" +#include "url/gurl.h" + +struct GPUCreateCommandBufferConfig; +struct GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params; +struct GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params; +struct GpuHostMsg_AcceleratedSurfaceRelease_Params; + +namespace IPC { +struct ChannelHandle; +} + +namespace content { +class BrowserChildProcessHostImpl; +class GpuMainThread; +class RenderWidgetHostViewFrameSubscriber; +class ShaderDiskCache; + +class GpuProcessHost : public BrowserChildProcessHostDelegate, + public IPC::Sender, + public base::NonThreadSafe { + public: + enum GpuProcessKind { + GPU_PROCESS_KIND_UNSANDBOXED, + GPU_PROCESS_KIND_SANDBOXED, + GPU_PROCESS_KIND_COUNT + }; + + typedef base::Callback<void(const IPC::ChannelHandle&, const gpu::GPUInfo&)> + EstablishChannelCallback; + + typedef base::Callback<void(int32)> CreateCommandBufferCallback; + + typedef base::Callback<void(const gfx::Size)> CreateImageCallback; + + static bool gpu_enabled() { return gpu_enabled_; } + + // Creates a new GpuProcessHost or gets an existing one, resulting in the + // launching of a GPU process if required. Returns null on failure. It + // is not safe to store the pointer once control has returned to the message + // loop as it can be destroyed. Instead store the associated GPU host ID. + // This could return NULL if GPU access is not allowed (blacklisted). + CONTENT_EXPORT static GpuProcessHost* Get(GpuProcessKind kind, + CauseForGpuLaunch cause); + + // Retrieves a list of process handles for all gpu processes. + static void GetProcessHandles( + const GpuDataManager::GetGpuProcessHandlesCallback& callback); + + // Helper function to send the given message to the GPU process on the IO + // thread. Calls Get and if a host is returned, sends it. Can be called from + // any thread. Deletes the message if it cannot be sent. + CONTENT_EXPORT static void SendOnIO(GpuProcessKind kind, + CauseForGpuLaunch cause, + IPC::Message* message); + + // Get the GPU process host for the GPU process with the given ID. Returns + // null if the process no longer exists. + static GpuProcessHost* FromID(int host_id); + int host_id() const { return host_id_; } + + // IPC::Sender implementation. + virtual bool Send(IPC::Message* msg) OVERRIDE; + + // Adds a message filter to the GpuProcessHost's channel. + void AddFilter(IPC::ChannelProxy::MessageFilter* filter); + + // Tells the GPU process to create a new channel for communication with a + // client. Once the GPU process responds asynchronously with the IPC handle + // and GPUInfo, we call the callback. + void EstablishGpuChannel(int client_id, + bool share_context, + const EstablishChannelCallback& callback); + + // Tells the GPU process to create a new command buffer that draws into the + // given surface. + void CreateViewCommandBuffer( + const gfx::GLSurfaceHandle& compositing_surface, + int surface_id, + int client_id, + const GPUCreateCommandBufferConfig& init_params, + const CreateCommandBufferCallback& callback); + + // Tells the GPU process to create a new image using the given window. + void CreateImage( + gfx::PluginWindowHandle window, + int client_id, + int image_id, + const CreateImageCallback& callback); + + // Tells the GPU process to delete image. + void DeleteImage(int client_id, int image_id, int sync_point); + + // What kind of GPU process, e.g. sandboxed or unsandboxed. + GpuProcessKind kind(); + + void ForceShutdown(); + + void BeginFrameSubscription( + int surface_id, + base::WeakPtr<RenderWidgetHostViewFrameSubscriber> subscriber); + void EndFrameSubscription(int surface_id); + void LoadedShader(const std::string& key, const std::string& data); + + private: + static bool ValidateHost(GpuProcessHost* host); + + GpuProcessHost(int host_id, GpuProcessKind kind); + virtual ~GpuProcessHost(); + + bool Init(); + + // Post an IPC message to the UI shim's message handler on the UI thread. + void RouteOnUIThread(const IPC::Message& message); + + // BrowserChildProcessHostDelegate implementation. + virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; + virtual void OnChannelConnected(int32 peer_pid) OVERRIDE; + virtual void OnProcessLaunched() OVERRIDE; + virtual void OnProcessCrashed(int exit_code) OVERRIDE; + + // Message handlers. + void OnInitialized(bool result, const gpu::GPUInfo& gpu_info); + void OnChannelEstablished(const IPC::ChannelHandle& channel_handle); + void OnCommandBufferCreated(const int32 route_id); + void OnDestroyCommandBuffer(int32 surface_id); + void OnImageCreated(const gfx::Size size); + void OnDidCreateOffscreenContext(const GURL& url); + void OnDidLoseContext(bool offscreen, + gpu::error::ContextLostReason reason, + const GURL& url); + void OnDidDestroyOffscreenContext(const GURL& url); + void OnGpuMemoryUmaStatsReceived(const GPUMemoryUmaStats& stats); +#if defined(OS_MACOSX) + void OnAcceleratedSurfaceBuffersSwapped( + const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params); +#endif + // Note: Different implementations depending on USE_AURA. +#if defined(OS_WIN) + void OnAcceleratedSurfaceBuffersSwapped( + const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params); + void OnAcceleratedSurfacePostSubBuffer( + const GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params& params); + void OnAcceleratedSurfaceSuspend(int32 surface_id); + void OnAcceleratedSurfaceRelease( + const GpuHostMsg_AcceleratedSurfaceRelease_Params& params); +#endif + + void CreateChannelCache(int32 client_id); + void OnDestroyChannel(int32 client_id); + void OnCacheShader(int32 client_id, const std::string& key, + const std::string& shader); + + bool LaunchGpuProcess(const std::string& channel_id); + + void SendOutstandingReplies(); + + void BlockLiveOffscreenContexts(); + + std::string GetShaderPrefixKey(); + + // The serial number of the GpuProcessHost / GpuProcessHostUIShim pair. + int host_id_; + + // These are the channel requests that we have already sent to + // the GPU process, but haven't heard back about yet. + std::queue<EstablishChannelCallback> channel_requests_; + + // The pending create command buffer requests we need to reply to. + std::queue<CreateCommandBufferCallback> create_command_buffer_requests_; + + // The pending create image requests we need to reply to. + std::queue<CreateImageCallback> create_image_requests_; + + + // Qeueud messages to send when the process launches. + std::queue<IPC::Message*> queued_messages_; + + // Whether the GPU process is valid, set to false after Send() failed. + bool valid_; + + // Whether we are running a GPU thread inside the browser process instead + // of a separate GPU process. + bool in_process_; + + bool swiftshader_rendering_; + GpuProcessKind kind_; + +#if !defined(CHROME_MULTIPLE_DLL) + scoped_ptr<GpuMainThread> in_process_gpu_thread_; +#endif + + // Whether we actually launched a GPU process. + bool process_launched_; + + // Whether the GPU process successfully initialized. + bool initialized_; + + // Time Init started. Used to log total GPU process startup time to UMA. + base::TimeTicks init_start_time_; + + // Master switch for enabling/disabling GPU acceleration for the current + // browser session. It does not change the acceleration settings for + // existing tabs, just the future ones. + static bool gpu_enabled_; + + static bool hardware_gpu_enabled_; + + scoped_ptr<BrowserChildProcessHostImpl> process_; + + // Track the URLs of the pages which have live offscreen contexts, + // assumed to be associated with untrusted content such as WebGL. + // For best robustness, when any context lost notification is + // received, assume all of these URLs are guilty, and block + // automatic execution of 3D content from those domains. + std::multiset<GURL> urls_with_live_offscreen_contexts_; + + // Statics kept around to send to UMA histograms on GPU process lost. + bool uma_memory_stats_received_; + GPUMemoryUmaStats uma_memory_stats_; + + // This map of frame subscribers are listening for frame presentation events. + // The key is the surface id and value is the subscriber. + typedef base::hash_map<int, + base::WeakPtr<RenderWidgetHostViewFrameSubscriber> > + FrameSubscriberMap; + FrameSubscriberMap frame_subscribers_; + + typedef std::map<int32, scoped_refptr<ShaderDiskCache> > + ClientIdToShaderCacheMap; + ClientIdToShaderCacheMap client_id_to_shader_cache_; + + std::string shader_prefix_key_; + + // Keep an extra reference to the SurfaceRef stored in the GpuSurfaceTracker + // in this map so that we don't destroy it whilst the GPU process is + // drawing to it. + typedef std::multimap<int, scoped_refptr<GpuSurfaceTracker::SurfaceRef> > + SurfaceRefMap; + SurfaceRefMap surface_refs_; + + DISALLOW_COPY_AND_ASSIGN(GpuProcessHost); +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_GPU_PROCESS_HOST_H_ diff --git a/chromium/content/browser/gpu/gpu_process_host_ui_shim.cc b/chromium/content/browser/gpu/gpu_process_host_ui_shim.cc new file mode 100644 index 00000000000..50810427388 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_process_host_ui_shim.cc @@ -0,0 +1,401 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/gpu_process_host_ui_shim.h" + +#include <algorithm> + +#include "base/bind.h" +#include "base/command_line.h" +#include "base/debug/trace_event.h" +#include "base/id_map.h" +#include "base/lazy_instance.h" +#include "base/strings/string_number_conversions.h" +#include "content/browser/gpu/gpu_data_manager_impl.h" +#include "content/browser/gpu/gpu_process_host.h" +#include "content/browser/gpu/gpu_surface_tracker.h" +#include "content/browser/renderer_host/render_process_host_impl.h" +#include "content/browser/renderer_host/render_view_host_impl.h" +#include "content/common/gpu/gpu_messages.h" +#include "content/port/browser/render_widget_host_view_port.h" +#include "content/public/browser/browser_thread.h" +#include "ui/gl/gl_switches.h" + +#if defined(TOOLKIT_GTK) +// These two #includes need to come after gpu_messages.h. +#include "ui/base/x/x11_util.h" +#include "ui/gfx/size.h" +#include <gdk/gdk.h> // NOLINT +#include <gdk/gdkx.h> // NOLINT +#endif + +// From gl2/gl2ext.h. +#ifndef GL_MAILBOX_SIZE_CHROMIUM +#define GL_MAILBOX_SIZE_CHROMIUM 64 +#endif + +namespace content { + +namespace { + +// One of the linux specific headers defines this as a macro. +#ifdef DestroyAll +#undef DestroyAll +#endif + +base::LazyInstance<IDMap<GpuProcessHostUIShim> > g_hosts_by_id = + LAZY_INSTANCE_INITIALIZER; + +void SendOnIOThreadTask(int host_id, IPC::Message* msg) { + GpuProcessHost* host = GpuProcessHost::FromID(host_id); + if (host) + host->Send(msg); + else + delete msg; +} + +class ScopedSendOnIOThread { + public: + ScopedSendOnIOThread(int host_id, IPC::Message* msg) + : host_id_(host_id), + msg_(msg), + cancelled_(false) { + } + + ~ScopedSendOnIOThread() { + if (!cancelled_) { + BrowserThread::PostTask(BrowserThread::IO, + FROM_HERE, + base::Bind(&SendOnIOThreadTask, + host_id_, + msg_.release())); + } + } + + void Cancel() { cancelled_ = true; } + + private: + int host_id_; + scoped_ptr<IPC::Message> msg_; + bool cancelled_; +}; + +RenderWidgetHostViewPort* GetRenderWidgetHostViewFromSurfaceID( + int surface_id) { + int render_process_id = 0; + int render_widget_id = 0; + if (!GpuSurfaceTracker::Get()->GetRenderWidgetIDForSurface( + surface_id, &render_process_id, &render_widget_id)) + return NULL; + + RenderWidgetHost* host = + RenderWidgetHost::FromID(render_process_id, render_widget_id); + return host ? RenderWidgetHostViewPort::FromRWHV(host->GetView()) : NULL; +} + +} // namespace + +void RouteToGpuProcessHostUIShimTask(int host_id, const IPC::Message& msg) { + GpuProcessHostUIShim* ui_shim = GpuProcessHostUIShim::FromID(host_id); + if (ui_shim) + ui_shim->OnMessageReceived(msg); +} + +GpuProcessHostUIShim::GpuProcessHostUIShim(int host_id) + : host_id_(host_id) { + g_hosts_by_id.Pointer()->AddWithID(this, host_id_); +} + +// static +GpuProcessHostUIShim* GpuProcessHostUIShim::Create(int host_id) { + DCHECK(!FromID(host_id)); + return new GpuProcessHostUIShim(host_id); +} + +// static +void GpuProcessHostUIShim::Destroy(int host_id, const std::string& message) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + + GpuDataManagerImpl::GetInstance()->AddLogMessage( + logging::LOG_ERROR, "GpuProcessHostUIShim", + message); + + delete FromID(host_id); +} + +// static +void GpuProcessHostUIShim::DestroyAll() { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + while (!g_hosts_by_id.Pointer()->IsEmpty()) { + IDMap<GpuProcessHostUIShim>::iterator it(g_hosts_by_id.Pointer()); + delete it.GetCurrentValue(); + } +} + +// static +GpuProcessHostUIShim* GpuProcessHostUIShim::FromID(int host_id) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + return g_hosts_by_id.Pointer()->Lookup(host_id); +} + +// static +GpuProcessHostUIShim* GpuProcessHostUIShim::GetOneInstance() { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + if (g_hosts_by_id.Pointer()->IsEmpty()) + return NULL; + IDMap<GpuProcessHostUIShim>::iterator it(g_hosts_by_id.Pointer()); + return it.GetCurrentValue(); +} + +bool GpuProcessHostUIShim::Send(IPC::Message* msg) { + DCHECK(CalledOnValidThread()); + return BrowserThread::PostTask(BrowserThread::IO, + FROM_HERE, + base::Bind(&SendOnIOThreadTask, + host_id_, + msg)); +} + +bool GpuProcessHostUIShim::OnMessageReceived(const IPC::Message& message) { + DCHECK(CalledOnValidThread()); + + if (message.routing_id() != MSG_ROUTING_CONTROL) + return false; + + return OnControlMessageReceived(message); +} + +void GpuProcessHostUIShim::SimulateRemoveAllContext() { + Send(new GpuMsg_Clean()); +} + +void GpuProcessHostUIShim::SimulateCrash() { + Send(new GpuMsg_Crash()); +} + +void GpuProcessHostUIShim::SimulateHang() { + Send(new GpuMsg_Hang()); +} + +GpuProcessHostUIShim::~GpuProcessHostUIShim() { + DCHECK(CalledOnValidThread()); + g_hosts_by_id.Pointer()->Remove(host_id_); +} + +bool GpuProcessHostUIShim::OnControlMessageReceived( + const IPC::Message& message) { + DCHECK(CalledOnValidThread()); + + IPC_BEGIN_MESSAGE_MAP(GpuProcessHostUIShim, message) + IPC_MESSAGE_HANDLER(GpuHostMsg_OnLogMessage, + OnLogMessage) + + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceBuffersSwapped, + OnAcceleratedSurfaceBuffersSwapped) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfacePostSubBuffer, + OnAcceleratedSurfacePostSubBuffer) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceSuspend, + OnAcceleratedSurfaceSuspend) + IPC_MESSAGE_HANDLER(GpuHostMsg_GraphicsInfoCollected, + OnGraphicsInfoCollected) + IPC_MESSAGE_HANDLER(GpuHostMsg_AcceleratedSurfaceRelease, + OnAcceleratedSurfaceRelease) + IPC_MESSAGE_HANDLER(GpuHostMsg_VideoMemoryUsageStats, + OnVideoMemoryUsageStatsReceived); + IPC_MESSAGE_HANDLER(GpuHostMsg_UpdateVSyncParameters, + OnUpdateVSyncParameters) + IPC_MESSAGE_HANDLER(GpuHostMsg_FrameDrawn, OnFrameDrawn) + +#if defined(TOOLKIT_GTK) || defined(OS_WIN) + IPC_MESSAGE_HANDLER(GpuHostMsg_ResizeView, OnResizeView) +#endif + + IPC_MESSAGE_UNHANDLED_ERROR() + IPC_END_MESSAGE_MAP() + + return true; +} + +void GpuProcessHostUIShim::OnUpdateVSyncParameters(int surface_id, + base::TimeTicks timebase, + base::TimeDelta interval) { + + int render_process_id = 0; + int render_widget_id = 0; + if (!GpuSurfaceTracker::Get()->GetRenderWidgetIDForSurface( + surface_id, &render_process_id, &render_widget_id)) { + return; + } + RenderWidgetHost* rwh = + RenderWidgetHost::FromID(render_process_id, render_widget_id); + if (!rwh) + return; + RenderWidgetHostImpl::From(rwh)->UpdateVSyncParameters(timebase, interval); +} + +void GpuProcessHostUIShim::OnLogMessage( + int level, + const std::string& header, + const std::string& message) { + GpuDataManagerImpl::GetInstance()->AddLogMessage( + level, header, message); +} + +void GpuProcessHostUIShim::OnGraphicsInfoCollected( + const gpu::GPUInfo& gpu_info) { + // OnGraphicsInfoCollected is sent back after the GPU process successfully + // initializes GL. + TRACE_EVENT0("test_gpu", "OnGraphicsInfoCollected"); + + GpuDataManagerImpl::GetInstance()->UpdateGpuInfo(gpu_info); +} + +#if defined(TOOLKIT_GTK) || defined(OS_WIN) + +void GpuProcessHostUIShim::OnResizeView(int32 surface_id, + int32 route_id, + gfx::Size size) { + // Always respond even if the window no longer exists. The GPU process cannot + // make progress on the resizing command buffer until it receives the + // response. + ScopedSendOnIOThread delayed_send( + host_id_, + new AcceleratedSurfaceMsg_ResizeViewACK(route_id)); + + RenderWidgetHostViewPort* view = + GetRenderWidgetHostViewFromSurfaceID(surface_id); + if (!view) + return; + + gfx::GLSurfaceHandle surface = view->GetCompositingSurface(); + + // Resize the window synchronously. The GPU process must not issue GL + // calls on the command buffer until the window is the size it expects it + // to be. +#if defined(TOOLKIT_GTK) + GdkWindow* window = reinterpret_cast<GdkWindow*>( + gdk_xid_table_lookup(surface.handle)); + if (window) { + Display* display = GDK_WINDOW_XDISPLAY(window); + gdk_window_resize(window, size.width(), size.height()); + XSync(display, False); + } +#elif defined(OS_WIN) + // Ensure window does not have zero area because D3D cannot create a zero + // area swap chain. + SetWindowPos(surface.handle, + NULL, + 0, 0, + std::max(1, size.width()), + std::max(1, size.height()), + SWP_NOSENDCHANGING | SWP_NOCOPYBITS | SWP_NOZORDER | + SWP_NOACTIVATE | SWP_DEFERERASE | SWP_NOMOVE); +#endif +} + +#endif + +static base::TimeDelta GetSwapDelay() { + CommandLine* cmd_line = CommandLine::ForCurrentProcess(); + int delay = 0; + if (cmd_line->HasSwitch(switches::kGpuSwapDelay)) { + base::StringToInt(cmd_line->GetSwitchValueNative( + switches::kGpuSwapDelay).c_str(), &delay); + } + return base::TimeDelta::FromMilliseconds(delay); +} + +void GpuProcessHostUIShim::OnAcceleratedSurfaceBuffersSwapped( + const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params) { + TRACE_EVENT0("renderer", + "GpuProcessHostUIShim::OnAcceleratedSurfaceBuffersSwapped"); + AcceleratedSurfaceMsg_BufferPresented_Params ack_params; + ack_params.mailbox_name = params.mailbox_name; + ack_params.sync_point = 0; + ScopedSendOnIOThread delayed_send( + host_id_, + new AcceleratedSurfaceMsg_BufferPresented(params.route_id, + ack_params)); + + if (!params.mailbox_name.empty() && + params.mailbox_name.length() != GL_MAILBOX_SIZE_CHROMIUM) + return; + + RenderWidgetHostViewPort* view = GetRenderWidgetHostViewFromSurfaceID( + params.surface_id); + if (!view) + return; + + delayed_send.Cancel(); + + static const base::TimeDelta swap_delay = GetSwapDelay(); + if (swap_delay.ToInternalValue()) + base::PlatformThread::Sleep(swap_delay); + + // View must send ACK message after next composite. + view->AcceleratedSurfaceBuffersSwapped(params, host_id_); + view->DidReceiveRendererFrame(); +} + +void GpuProcessHostUIShim::OnFrameDrawn(const ui::LatencyInfo& latency_info) { + RenderWidgetHostImpl::CompositorFrameDrawn(latency_info); +} + +void GpuProcessHostUIShim::OnAcceleratedSurfacePostSubBuffer( + const GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params& params) { + TRACE_EVENT0("renderer", + "GpuProcessHostUIShim::OnAcceleratedSurfacePostSubBuffer"); + + AcceleratedSurfaceMsg_BufferPresented_Params ack_params; + ack_params.mailbox_name = params.mailbox_name; + ack_params.sync_point = 0; + ScopedSendOnIOThread delayed_send( + host_id_, + new AcceleratedSurfaceMsg_BufferPresented(params.route_id, + ack_params)); + + if (!params.mailbox_name.empty() && + params.mailbox_name.length() != GL_MAILBOX_SIZE_CHROMIUM) + return; + + RenderWidgetHostViewPort* view = + GetRenderWidgetHostViewFromSurfaceID(params.surface_id); + if (!view) + return; + + delayed_send.Cancel(); + + // View must send ACK message after next composite. + view->AcceleratedSurfacePostSubBuffer(params, host_id_); + view->DidReceiveRendererFrame(); +} + +void GpuProcessHostUIShim::OnAcceleratedSurfaceSuspend(int32 surface_id) { + TRACE_EVENT0("renderer", + "GpuProcessHostUIShim::OnAcceleratedSurfaceSuspend"); + + RenderWidgetHostViewPort* view = + GetRenderWidgetHostViewFromSurfaceID(surface_id); + if (!view) + return; + + view->AcceleratedSurfaceSuspend(); +} + +void GpuProcessHostUIShim::OnAcceleratedSurfaceRelease( + const GpuHostMsg_AcceleratedSurfaceRelease_Params& params) { + RenderWidgetHostViewPort* view = GetRenderWidgetHostViewFromSurfaceID( + params.surface_id); + if (!view) + return; + view->AcceleratedSurfaceRelease(); +} + +void GpuProcessHostUIShim::OnVideoMemoryUsageStatsReceived( + const GPUVideoMemoryUsageStats& video_memory_usage_stats) { + GpuDataManagerImpl::GetInstance()->UpdateVideoMemoryUsageStats( + video_memory_usage_stats); +} + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_process_host_ui_shim.h b/chromium/content/browser/gpu/gpu_process_host_ui_shim.h new file mode 100644 index 00000000000..9f4329a10a5 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_process_host_ui_shim.h @@ -0,0 +1,118 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_GPU_PROCESS_HOST_UI_SHIM_H_ +#define CONTENT_BROWSER_GPU_GPU_PROCESS_HOST_UI_SHIM_H_ + +// This class lives on the UI thread and supports classes like the +// BackingStoreProxy, which must live on the UI thread. The IO thread +// portion of this class, the GpuProcessHost, is responsible for +// shuttling messages between the browser and GPU processes. + +#include <string> + +#include "base/callback_forward.h" +#include "base/compiler_specific.h" +#include "base/memory/linked_ptr.h" +#include "base/memory/ref_counted.h" +#include "base/threading/non_thread_safe.h" +#include "content/common/content_export.h" +#include "content/common/message_router.h" +#include "content/public/common/gpu_memory_stats.h" +#include "gpu/config/gpu_info.h" +#include "ipc/ipc_listener.h" +#include "ipc/ipc_sender.h" + +struct GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params; +struct GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params; +struct GpuHostMsg_AcceleratedSurfaceRelease_Params; + +namespace ui { +struct LatencyInfo; +} + +namespace gfx { +class Size; +} + +namespace IPC { +class Message; +} + +namespace content { +void RouteToGpuProcessHostUIShimTask(int host_id, const IPC::Message& msg); + +class GpuProcessHostUIShim : public IPC::Listener, + public IPC::Sender, + public base::NonThreadSafe { + public: + // Create a GpuProcessHostUIShim with the given ID. The object can be found + // using FromID with the same id. + static GpuProcessHostUIShim* Create(int host_id); + + // Destroy the GpuProcessHostUIShim with the given host ID. This can only + // be called on the UI thread. Only the GpuProcessHost should destroy the + // UI shim. + static void Destroy(int host_id, const std::string& message); + + // Destroy all remaining GpuProcessHostUIShims. + CONTENT_EXPORT static void DestroyAll(); + + CONTENT_EXPORT static GpuProcessHostUIShim* FromID(int host_id); + + // Get a GpuProcessHostUIShim instance; it doesn't matter which one. + // Return NULL if none has been created. + CONTENT_EXPORT static GpuProcessHostUIShim* GetOneInstance(); + + // IPC::Sender implementation. + virtual bool Send(IPC::Message* msg) OVERRIDE; + + // IPC::Listener implementation. + // The GpuProcessHost causes this to be called on the UI thread to + // dispatch the incoming messages from the GPU process, which are + // actually received on the IO thread. + virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; + + CONTENT_EXPORT void SimulateRemoveAllContext(); + CONTENT_EXPORT void SimulateCrash(); + CONTENT_EXPORT void SimulateHang(); + + private: + explicit GpuProcessHostUIShim(int host_id); + virtual ~GpuProcessHostUIShim(); + + // Message handlers. + bool OnControlMessageReceived(const IPC::Message& message); + + void OnLogMessage(int level, const std::string& header, + const std::string& message); +#if defined(TOOLKIT_GTK) || defined(OS_WIN) + void OnResizeView(int32 surface_id, + int32 route_id, + gfx::Size size); +#endif + + void OnGraphicsInfoCollected(const gpu::GPUInfo& gpu_info); + + void OnAcceleratedSurfaceBuffersSwapped( + const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params); + void OnAcceleratedSurfacePostSubBuffer( + const GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params& params); + void OnAcceleratedSurfaceSuspend(int32 surface_id); + void OnAcceleratedSurfaceRelease( + const GpuHostMsg_AcceleratedSurfaceRelease_Params& params); + void OnVideoMemoryUsageStatsReceived( + const GPUVideoMemoryUsageStats& video_memory_usage_stats); + void OnUpdateVSyncParameters(int surface_id, + base::TimeTicks timebase, + base::TimeDelta interval); + void OnFrameDrawn(const ui::LatencyInfo& latency_info); + + // The serial number of the GpuProcessHost / GpuProcessHostUIShim pair. + int host_id_; +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_GPU_PROCESS_HOST_UI_SHIM_H_ diff --git a/chromium/content/browser/gpu/gpu_surface_tracker.cc b/chromium/content/browser/gpu/gpu_surface_tracker.cc new file mode 100644 index 00000000000..350e6f3831c --- /dev/null +++ b/chromium/content/browser/gpu/gpu_surface_tracker.cc @@ -0,0 +1,200 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/gpu_surface_tracker.h" + +#if defined(OS_ANDROID) +#include <android/native_window_jni.h> +#endif // defined(OS_ANDROID) + +#include "base/logging.h" + +#if defined(TOOLKIT_GTK) +#include "base/bind.h" +#include "content/public/browser/browser_thread.h" +#include "ui/gfx/gtk_native_view_id_manager.h" +#endif // defined(TOOLKIT_GTK) + +namespace content { + +namespace { +#if defined(TOOLKIT_GTK) + +void ReleasePermanentXIDDispatcher( + const gfx::PluginWindowHandle& surface) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); + + GtkNativeViewManager* manager = GtkNativeViewManager::GetInstance(); + manager->ReleasePermanentXID(surface); +} + +// Implementation of SurfaceRef that allows GTK to ref and unref the +// surface with the GtkNativeViewManager. +class SurfaceRefPluginWindow : public GpuSurfaceTracker::SurfaceRef { + public: + SurfaceRefPluginWindow(const gfx::PluginWindowHandle& surface_ref); + private: + virtual ~SurfaceRefPluginWindow(); + gfx::PluginWindowHandle surface_; +}; + +SurfaceRefPluginWindow::SurfaceRefPluginWindow( + const gfx::PluginWindowHandle& surface) + : surface_(surface) { + if (surface_ != gfx::kNullPluginWindow) { + GtkNativeViewManager* manager = GtkNativeViewManager::GetInstance(); + if (!manager->AddRefPermanentXID(surface_)) { + LOG(ERROR) << "Surface " << surface << " cannot be referenced."; + } + } +} + +SurfaceRefPluginWindow::~SurfaceRefPluginWindow() { + if (surface_ != gfx::kNullPluginWindow) { + BrowserThread::PostTask(BrowserThread::UI, + FROM_HERE, + base::Bind(&ReleasePermanentXIDDispatcher, + surface_)); + } +} +#endif // defined(TOOLKIT_GTK) +} // anonymous + +GpuSurfaceTracker::GpuSurfaceTracker() + : next_surface_id_(1) { + GpuSurfaceLookup::InitInstance(this); +} + +GpuSurfaceTracker::~GpuSurfaceTracker() { + GpuSurfaceLookup::InitInstance(NULL); +} + +GpuSurfaceTracker* GpuSurfaceTracker::GetInstance() { + return Singleton<GpuSurfaceTracker>::get(); +} + +int GpuSurfaceTracker::AddSurfaceForRenderer(int renderer_id, + int render_widget_id) { + base::AutoLock lock(lock_); + int surface_id = next_surface_id_++; + surface_map_[surface_id] = + SurfaceInfo(renderer_id, render_widget_id, gfx::kNullAcceleratedWidget, + gfx::GLSurfaceHandle(), NULL); + return surface_id; +} + +int GpuSurfaceTracker::LookupSurfaceForRenderer(int renderer_id, + int render_widget_id) { + base::AutoLock lock(lock_); + for (SurfaceMap::iterator it = surface_map_.begin(); it != surface_map_.end(); + ++it) { + const SurfaceInfo& info = it->second; + if (info.renderer_id == renderer_id && + info.render_widget_id == render_widget_id) { + return it->first; + } + } + return 0; +} + +int GpuSurfaceTracker::AddSurfaceForNativeWidget( + gfx::AcceleratedWidget widget) { + base::AutoLock lock(lock_); + int surface_id = next_surface_id_++; + surface_map_[surface_id] = + SurfaceInfo(0, 0, widget, gfx::GLSurfaceHandle(), NULL); + return surface_id; +} + +void GpuSurfaceTracker::RemoveSurface(int surface_id) { + base::AutoLock lock(lock_); + DCHECK(surface_map_.find(surface_id) != surface_map_.end()); + surface_map_.erase(surface_id); +} + +bool GpuSurfaceTracker::GetRenderWidgetIDForSurface(int surface_id, + int* renderer_id, + int* render_widget_id) { + base::AutoLock lock(lock_); + SurfaceMap::iterator it = surface_map_.find(surface_id); + if (it == surface_map_.end()) + return false; + const SurfaceInfo& info = it->second; + if (!info.handle.is_transport()) + return false; + *renderer_id = info.renderer_id; + *render_widget_id = info.render_widget_id; + return true; +} + +void GpuSurfaceTracker::SetSurfaceHandle(int surface_id, + const gfx::GLSurfaceHandle& handle) { + base::AutoLock lock(lock_); + DCHECK(surface_map_.find(surface_id) != surface_map_.end()); + SurfaceInfo& info = surface_map_[surface_id]; + info.handle = handle; +#if defined(TOOLKIT_GTK) + info.surface_ref = new SurfaceRefPluginWindow(handle.handle); +#endif // defined(TOOLKIT_GTK) +} + +gfx::GLSurfaceHandle GpuSurfaceTracker::GetSurfaceHandle(int surface_id) { + base::AutoLock lock(lock_); + SurfaceMap::iterator it = surface_map_.find(surface_id); + if (it == surface_map_.end()) + return gfx::GLSurfaceHandle(); + return it->second.handle; +} + +gfx::AcceleratedWidget GpuSurfaceTracker::AcquireNativeWidget(int surface_id) { + base::AutoLock lock(lock_); + SurfaceMap::iterator it = surface_map_.find(surface_id); + if (it == surface_map_.end()) + return gfx::kNullAcceleratedWidget; + +#if defined(OS_ANDROID) + if (it->second.native_widget != gfx::kNullAcceleratedWidget) + ANativeWindow_acquire(it->second.native_widget); +#endif // defined(OS_ANDROID) + + return it->second.native_widget; +} + +void GpuSurfaceTracker::SetNativeWidget( + int surface_id, gfx::AcceleratedWidget widget, + SurfaceRef* surface_ref) { + base::AutoLock lock(lock_); + SurfaceMap::iterator it = surface_map_.find(surface_id); + DCHECK(it != surface_map_.end()); + SurfaceInfo& info = it->second; + info.native_widget = widget; + info.surface_ref = surface_ref; +} + +std::size_t GpuSurfaceTracker::GetSurfaceCount() { + base::AutoLock lock(lock_); + return surface_map_.size(); +} + +GpuSurfaceTracker::SurfaceInfo::SurfaceInfo() + : renderer_id(0), + render_widget_id(0), + native_widget(gfx::kNullAcceleratedWidget) { } + +GpuSurfaceTracker::SurfaceInfo::SurfaceInfo( + int renderer_id, + int render_widget_id, + const gfx::AcceleratedWidget& native_widget, + const gfx::GLSurfaceHandle& handle, + const scoped_refptr<SurfaceRef>& surface_ref) + : renderer_id(renderer_id), + render_widget_id(render_widget_id), + native_widget(native_widget), + handle(handle), + surface_ref(surface_ref) { } + +GpuSurfaceTracker::SurfaceInfo::~SurfaceInfo() { } + + +} // namespace content diff --git a/chromium/content/browser/gpu/gpu_surface_tracker.h b/chromium/content/browser/gpu/gpu_surface_tracker.h new file mode 100644 index 00000000000..de9d666dbe8 --- /dev/null +++ b/chromium/content/browser/gpu/gpu_surface_tracker.h @@ -0,0 +1,138 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_GPU_SURFACE_TRACKER_H_ +#define CONTENT_BROWSER_GPU_GPU_SURFACE_TRACKER_H_ + +#include <map> + +#include "base/basictypes.h" +#include "base/memory/ref_counted.h" +#include "base/memory/singleton.h" +#include "base/synchronization/lock.h" +#include "content/common/gpu/gpu_surface_lookup.h" +#include "ui/gfx/native_widget_types.h" +#include "ui/gfx/size.h" + +namespace content { + +// This class is responsible for managing rendering surfaces exposed to the +// GPU process. Every surface gets registered to this class, and gets an ID. +// All calls to and from the GPU process, with the exception of +// CreateViewCommandBuffer, refer to the rendering surface by its ID. +// This class is thread safe. +// +// Note: The ID can exist before the actual native handle for the surface is +// created, for example to allow giving a reference to it to a renderer, so that +// it is unamibiguously identified. +class GpuSurfaceTracker : public GpuSurfaceLookup { + public: + // Base class for reference counting surfaces. We store a + // reference to an instance of this class in the surface_map_ + // and GpuProcessHost (if the GPU process is drawing to + // the surface with a Command Buffer). The reference count ensures that + // we don't destroy the object until it's released from both places. + // + // This is especially important on Android and GTK where the surface must + // not be destroyed when the WebContents is closed if the GPU is still + // drawing to it. Those platforms extend this class with the functionality + // they need to implement on tear down (see SurfaceRefPluginWindow for GTK and + // SurfaceRefAndroid for Android). + class SurfaceRef : public base::RefCountedThreadSafe<SurfaceRef> { + protected: + SurfaceRef() { } + virtual ~SurfaceRef() { } + + private: + friend class base::RefCountedThreadSafe<SurfaceRef>; + DISALLOW_COPY_AND_ASSIGN(SurfaceRef); + }; + + // GpuSurfaceLookup implementation: + // Returns the native widget associated with a given surface_id. + virtual gfx::AcceleratedWidget AcquireNativeWidget(int surface_id) OVERRIDE; + + // Gets the global instance of the surface tracker. + static GpuSurfaceTracker* Get() { return GetInstance(); } + + // Adds a surface for a given RenderWidgetHost. |renderer_id| is the renderer + // process ID, |render_widget_id| is the RenderWidgetHost route id within that + // renderer. Returns the surface ID. + int AddSurfaceForRenderer(int renderer_id, int render_widget_id); + + // Looks up a surface for a given RenderWidgetHost. Returns the surface + // ID, or 0 if not found. + // Note: This is an O(N) lookup. + int LookupSurfaceForRenderer(int renderer_id, int render_widget_id); + + // Adds a surface for a native widget. Returns the surface ID. + int AddSurfaceForNativeWidget(gfx::AcceleratedWidget widget); + + // Removes a given existing surface. + void RemoveSurface(int surface_id); + + // Gets the renderer process ID and RenderWidgetHost route id for a given + // surface, returning true if the surface is found (and corresponds to a + // RenderWidgetHost), or false if not. + bool GetRenderWidgetIDForSurface(int surface_id, + int* renderer_id, + int* render_widget_id); + + // Sets the native handle for the given surface. + // Note: This is an O(log N) lookup. + void SetSurfaceHandle(int surface_id, const gfx::GLSurfaceHandle& handle); + + // Sets the native widget associated with the surface_id. + void SetNativeWidget( + int surface_id, + gfx::AcceleratedWidget widget, + SurfaceRef* surface_ref); + + // Gets the native handle for the given surface. + // Note: This is an O(log N) lookup. + gfx::GLSurfaceHandle GetSurfaceHandle(int surface_id); + + // Returns the number of surfaces currently registered with the tracker. + std::size_t GetSurfaceCount(); + + // Gets the global instance of the surface tracker. Identical to Get(), but + // named that way for the implementation of Singleton. + static GpuSurfaceTracker* GetInstance(); + + scoped_refptr<SurfaceRef> GetSurfaceRefForSurface(int surface_id) { + return surface_map_[surface_id].surface_ref; + } + + private: + struct SurfaceInfo { + SurfaceInfo(); + SurfaceInfo(int renderer_id, + int render_widget_id, + const gfx::AcceleratedWidget& native_widget, + const gfx::GLSurfaceHandle& handle, + const scoped_refptr<SurfaceRef>& surface_ref); + ~SurfaceInfo(); + int renderer_id; + int render_widget_id; + gfx::AcceleratedWidget native_widget; + gfx::GLSurfaceHandle handle; + scoped_refptr<SurfaceRef> surface_ref; + }; + typedef std::map<int, SurfaceInfo> SurfaceMap; + + friend struct DefaultSingletonTraits<GpuSurfaceTracker>; + + GpuSurfaceTracker(); + virtual ~GpuSurfaceTracker(); + + base::Lock lock_; + SurfaceMap surface_map_; + int next_surface_id_; + + DISALLOW_COPY_AND_ASSIGN(GpuSurfaceTracker); +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_GPU_SURFACE_TRACKER_H_ diff --git a/chromium/content/browser/gpu/shader_disk_cache.cc b/chromium/content/browser/gpu/shader_disk_cache.cc new file mode 100644 index 00000000000..fc578bc4bf0 --- /dev/null +++ b/chromium/content/browser/gpu/shader_disk_cache.cc @@ -0,0 +1,617 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "content/browser/gpu/shader_disk_cache.h" + +#include "base/threading/thread_checker.h" +#include "content/browser/gpu/gpu_process_host.h" +#include "content/public/browser/browser_thread.h" +#include "gpu/command_buffer/common/constants.h" +#include "net/base/cache_type.h" +#include "net/base/io_buffer.h" +#include "net/base/net_errors.h" + +namespace content { + +namespace { + +static const base::FilePath::CharType kGpuCachePath[] = + FILE_PATH_LITERAL("GPUCache"); + +void EntryCloser(disk_cache::Entry* entry) { + entry->Close(); +} + +} // namespace + +// ShaderDiskCacheEntry handles the work of caching/updating the cached +// shaders. +class ShaderDiskCacheEntry + : public base::ThreadChecker, + public base::RefCounted<ShaderDiskCacheEntry> { + public: + ShaderDiskCacheEntry(base::WeakPtr<ShaderDiskCache> cache, + const std::string& key, + const std::string& shader); + void Cache(); + + private: + friend class base::RefCounted<ShaderDiskCacheEntry>; + + enum OpType { + TERMINATE, + OPEN_ENTRY, + WRITE_DATA, + CREATE_ENTRY, + }; + + ~ShaderDiskCacheEntry(); + + void OnOpComplete(int rv); + + int OpenCallback(int rv); + int WriteCallback(int rv); + int IOComplete(int rv); + + base::WeakPtr<ShaderDiskCache> cache_; + OpType op_type_; + std::string key_; + std::string shader_; + disk_cache::Entry* entry_; + + DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheEntry); +}; + +// ShaderDiskReadHelper is used to load all of the cached shaders from the +// disk cache and send to the memory cache. +class ShaderDiskReadHelper + : public base::ThreadChecker, + public base::RefCounted<ShaderDiskReadHelper> { + public: + ShaderDiskReadHelper(base::WeakPtr<ShaderDiskCache> cache, int host_id); + void LoadCache(); + + private: + friend class base::RefCounted<ShaderDiskReadHelper>; + + enum OpType { + TERMINATE, + OPEN_NEXT, + OPEN_NEXT_COMPLETE, + READ_COMPLETE, + ITERATION_FINISHED + }; + + + ~ShaderDiskReadHelper(); + + void OnOpComplete(int rv); + + int OpenNextEntry(); + int OpenNextEntryComplete(int rv); + int ReadComplete(int rv); + int IterationComplete(int rv); + + base::WeakPtr<ShaderDiskCache> cache_; + OpType op_type_; + void* iter_; + scoped_refptr<net::IOBufferWithSize> buf_; + int host_id_; + disk_cache::Entry* entry_; + + DISALLOW_COPY_AND_ASSIGN(ShaderDiskReadHelper); +}; + +class ShaderClearHelper + : public base::RefCounted<ShaderClearHelper>, + public base::SupportsWeakPtr<ShaderClearHelper> { + public: + ShaderClearHelper(scoped_refptr<ShaderDiskCache> cache, + const base::FilePath& path, + const base::Time& delete_begin, + const base::Time& delete_end, + const base::Closure& callback); + void Clear(); + + private: + friend class base::RefCounted<ShaderClearHelper>; + + enum OpType { + TERMINATE, + VERIFY_CACHE_SETUP, + DELETE_CACHE + }; + + ~ShaderClearHelper(); + + void DoClearShaderCache(int rv); + + scoped_refptr<ShaderDiskCache> cache_; + OpType op_type_; + base::FilePath path_; + base::Time delete_begin_; + base::Time delete_end_; + base::Closure callback_; + + DISALLOW_COPY_AND_ASSIGN(ShaderClearHelper); +}; + +ShaderDiskCacheEntry::ShaderDiskCacheEntry(base::WeakPtr<ShaderDiskCache> cache, + const std::string& key, + const std::string& shader) + : cache_(cache), + op_type_(OPEN_ENTRY), + key_(key), + shader_(shader), + entry_(NULL) { +} + +ShaderDiskCacheEntry::~ShaderDiskCacheEntry() { + if (entry_) + BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, + base::Bind(&EntryCloser, entry_)); +} + +void ShaderDiskCacheEntry::Cache() { + DCHECK(CalledOnValidThread()); + if (!cache_.get()) + return; + + int rv = cache_->backend()->OpenEntry( + key_, + &entry_, + base::Bind(&ShaderDiskCacheEntry::OnOpComplete, this)); + if (rv != net::ERR_IO_PENDING) + OnOpComplete(rv); +} + +void ShaderDiskCacheEntry::OnOpComplete(int rv) { + DCHECK(CalledOnValidThread()); + if (!cache_.get()) + return; + + do { + switch (op_type_) { + case OPEN_ENTRY: + rv = OpenCallback(rv); + break; + case CREATE_ENTRY: + rv = WriteCallback(rv); + break; + case WRITE_DATA: + rv = IOComplete(rv); + break; + case TERMINATE: + rv = net::ERR_IO_PENDING; // break the loop. + break; + default: + NOTREACHED(); // Invalid op_type_ provided. + break; + } + } while (rv != net::ERR_IO_PENDING); +} + +int ShaderDiskCacheEntry::OpenCallback(int rv) { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + if (rv == net::OK) { + cache_->backend()->OnExternalCacheHit(key_); + cache_->EntryComplete(this); + op_type_ = TERMINATE; + return rv; + } + + op_type_ = CREATE_ENTRY; + return cache_->backend()->CreateEntry( + key_, + &entry_, + base::Bind(&ShaderDiskCacheEntry::OnOpComplete, this)); +} + +int ShaderDiskCacheEntry::WriteCallback(int rv) { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + if (rv != net::OK) { + LOG(ERROR) << "Failed to create shader cache entry: " << rv; + cache_->EntryComplete(this); + op_type_ = TERMINATE; + return rv; + } + + op_type_ = WRITE_DATA; + scoped_refptr<net::StringIOBuffer> io_buf = new net::StringIOBuffer(shader_); + return entry_->WriteData( + 1, + 0, + io_buf.get(), + shader_.length(), + base::Bind(&ShaderDiskCacheEntry::OnOpComplete, this), + false); +} + +int ShaderDiskCacheEntry::IOComplete(int rv) { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + cache_->EntryComplete(this); + op_type_ = TERMINATE; + return rv; +} + +ShaderDiskReadHelper::ShaderDiskReadHelper( + base::WeakPtr<ShaderDiskCache> cache, + int host_id) + : cache_(cache), + op_type_(OPEN_NEXT), + iter_(NULL), + buf_(NULL), + host_id_(host_id), + entry_(NULL) { +} + +void ShaderDiskReadHelper::LoadCache() { + DCHECK(CalledOnValidThread()); + if (!cache_.get()) + return; + OnOpComplete(net::OK); +} + +void ShaderDiskReadHelper::OnOpComplete(int rv) { + DCHECK(CalledOnValidThread()); + if (!cache_.get()) + return; + + do { + switch (op_type_) { + case OPEN_NEXT: + rv = OpenNextEntry(); + break; + case OPEN_NEXT_COMPLETE: + rv = OpenNextEntryComplete(rv); + break; + case READ_COMPLETE: + rv = ReadComplete(rv); + break; + case ITERATION_FINISHED: + rv = IterationComplete(rv); + break; + case TERMINATE: + cache_->ReadComplete(); + rv = net::ERR_IO_PENDING; // break the loop + break; + default: + NOTREACHED(); // Invalid state for read helper + rv = net::ERR_FAILED; + break; + } + } while (rv != net::ERR_IO_PENDING); +} + +int ShaderDiskReadHelper::OpenNextEntry() { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + op_type_ = OPEN_NEXT_COMPLETE; + return cache_->backend()->OpenNextEntry( + &iter_, + &entry_, + base::Bind(&ShaderDiskReadHelper::OnOpComplete, this)); +} + +int ShaderDiskReadHelper::OpenNextEntryComplete(int rv) { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + if (rv == net::ERR_FAILED) { + op_type_ = ITERATION_FINISHED; + return net::OK; + } + + if (rv < 0) + return rv; + + op_type_ = READ_COMPLETE; + buf_ = new net::IOBufferWithSize(entry_->GetDataSize(1)); + return entry_->ReadData( + 1, + 0, + buf_.get(), + buf_->size(), + base::Bind(&ShaderDiskReadHelper::OnOpComplete, this)); +} + +int ShaderDiskReadHelper::ReadComplete(int rv) { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + if (rv && rv == buf_->size()) { + GpuProcessHost* host = GpuProcessHost::FromID(host_id_); + if (host) + host->LoadedShader(entry_->GetKey(), std::string(buf_->data(), + buf_->size())); + } + + buf_ = NULL; + entry_->Close(); + entry_ = NULL; + + op_type_ = OPEN_NEXT; + return net::OK; +} + +int ShaderDiskReadHelper::IterationComplete(int rv) { + DCHECK(CalledOnValidThread()); + // Called through OnOpComplete, so we know |cache_| is valid. + cache_->backend()->EndEnumeration(&iter_); + iter_ = NULL; + op_type_ = TERMINATE; + return net::OK; +} + +ShaderDiskReadHelper::~ShaderDiskReadHelper() { + if (entry_) + BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, + base::Bind(&EntryCloser, entry_)); +} + +ShaderClearHelper::ShaderClearHelper(scoped_refptr<ShaderDiskCache> cache, + const base::FilePath& path, + const base::Time& delete_begin, + const base::Time& delete_end, + const base::Closure& callback) + : cache_(cache), + op_type_(VERIFY_CACHE_SETUP), + path_(path), + delete_begin_(delete_begin), + delete_end_(delete_end), + callback_(callback) { +} + +ShaderClearHelper::~ShaderClearHelper() { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); +} + +void ShaderClearHelper::Clear() { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + DoClearShaderCache(net::OK); +} + +void ShaderClearHelper::DoClearShaderCache(int rv) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + + // Hold a ref to ourselves so when we do the CacheCleared call we don't get + // auto-deleted when our ref count drops to zero. + scoped_refptr<ShaderClearHelper> helper = this; + + while (rv != net::ERR_IO_PENDING) { + switch (op_type_) { + case VERIFY_CACHE_SETUP: + rv = cache_->SetAvailableCallback( + base::Bind(&ShaderClearHelper::DoClearShaderCache, AsWeakPtr())); + op_type_ = DELETE_CACHE; + break; + case DELETE_CACHE: + rv = cache_->Clear( + delete_begin_, delete_end_, + base::Bind(&ShaderClearHelper::DoClearShaderCache, AsWeakPtr())); + op_type_ = TERMINATE; + break; + case TERMINATE: + ShaderCacheFactory::GetInstance()->CacheCleared(path_); + callback_.Run(); + rv = net::ERR_IO_PENDING; // Break the loop. + break; + default: + NOTREACHED(); // Invalid state provided. + op_type_ = TERMINATE; + break; + } + } +} + +// static +ShaderCacheFactory* ShaderCacheFactory::GetInstance() { + return Singleton<ShaderCacheFactory, + LeakySingletonTraits<ShaderCacheFactory> >::get(); +} + +ShaderCacheFactory::ShaderCacheFactory() { +} + +ShaderCacheFactory::~ShaderCacheFactory() { +} + +void ShaderCacheFactory::SetCacheInfo(int32 client_id, + const base::FilePath& path) { + client_id_to_path_map_[client_id] = path; +} + +void ShaderCacheFactory::RemoveCacheInfo(int32 client_id) { + client_id_to_path_map_.erase(client_id); +} + +scoped_refptr<ShaderDiskCache> ShaderCacheFactory::Get(int32 client_id) { + ClientIdToPathMap::iterator iter = + client_id_to_path_map_.find(client_id); + if (iter == client_id_to_path_map_.end()) + return NULL; + return ShaderCacheFactory::GetByPath(iter->second); +} + +scoped_refptr<ShaderDiskCache> ShaderCacheFactory::GetByPath( + const base::FilePath& path) { + ShaderCacheMap::iterator iter = shader_cache_map_.find(path); + if (iter != shader_cache_map_.end()) + return iter->second; + + ShaderDiskCache* cache = new ShaderDiskCache(path); + cache->Init(); + return cache; +} + +void ShaderCacheFactory::AddToCache(const base::FilePath& key, + ShaderDiskCache* cache) { + shader_cache_map_[key] = cache; +} + +void ShaderCacheFactory::RemoveFromCache(const base::FilePath& key) { + shader_cache_map_.erase(key); +} + +void ShaderCacheFactory::ClearByPath(const base::FilePath& path, + const base::Time& delete_begin, + const base::Time& delete_end, + const base::Closure& callback) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + DCHECK(!callback.is_null()); + + scoped_refptr<ShaderClearHelper> helper = new ShaderClearHelper( + GetByPath(path), path, delete_begin, delete_end, callback); + + // We could receive requests to clear the same path with different + // begin/end times. So, we keep a list of requests. If we haven't seen this + // path before we kick off the clear and add it to the list. If we have see it + // already, then we already have a clear running. We add this clear to the + // list and wait for any previous clears to finish. + ShaderClearMap::iterator iter = shader_clear_map_.find(path); + if (iter != shader_clear_map_.end()) { + iter->second.push(helper); + return; + } + + shader_clear_map_.insert( + std::pair<base::FilePath, ShaderClearQueue>(path, ShaderClearQueue())); + shader_clear_map_[path].push(helper); + helper->Clear(); +} + +void ShaderCacheFactory::CacheCleared(const base::FilePath& path) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + + ShaderClearMap::iterator iter = shader_clear_map_.find(path); + if (iter == shader_clear_map_.end()) { + LOG(ERROR) << "Completed clear but missing clear helper."; + return; + } + + iter->second.pop(); + + // If there are remaining items in the list we trigger the Clear on the + // next one. + if (!iter->second.empty()) { + iter->second.front()->Clear(); + return; + } + + shader_clear_map_.erase(path); +} + +ShaderDiskCache::ShaderDiskCache(const base::FilePath& cache_path) + : cache_available_(false), + host_id_(0), + cache_path_(cache_path), + is_initialized_(false) { + ShaderCacheFactory::GetInstance()->AddToCache(cache_path_, this); +} + +ShaderDiskCache::~ShaderDiskCache() { + ShaderCacheFactory::GetInstance()->RemoveFromCache(cache_path_); +} + +void ShaderDiskCache::Init() { + if (is_initialized_) { + NOTREACHED(); // can't initialize disk cache twice. + return; + } + is_initialized_ = true; + + int rv = disk_cache::CreateCacheBackend( + net::SHADER_CACHE, + net::CACHE_BACKEND_BLOCKFILE, + cache_path_.Append(kGpuCachePath), + gpu::kDefaultMaxProgramCacheMemoryBytes, + true, + BrowserThread::GetMessageLoopProxyForThread(BrowserThread::CACHE).get(), + NULL, + &backend_, + base::Bind(&ShaderDiskCache::CacheCreatedCallback, this)); + + if (rv == net::OK) + cache_available_ = true; +} + +void ShaderDiskCache::Cache(const std::string& key, const std::string& shader) { + if (!cache_available_) + return; + + ShaderDiskCacheEntry* shim = + new ShaderDiskCacheEntry(AsWeakPtr(), key, shader); + shim->Cache(); + + entry_map_[shim] = shim; +} + +int ShaderDiskCache::Clear( + const base::Time begin_time, const base::Time end_time, + const net::CompletionCallback& completion_callback) { + int rv; + if (begin_time.is_null()) { + rv = backend_->DoomAllEntries(completion_callback); + } else { + rv = backend_->DoomEntriesBetween(begin_time, end_time, + completion_callback); + } + return rv; +} + +int32 ShaderDiskCache::Size() { + if (!cache_available_) + return -1; + return backend_->GetEntryCount(); +} + +int ShaderDiskCache::SetAvailableCallback( + const net::CompletionCallback& callback) { + if (cache_available_) + return net::OK; + available_callback_ = callback; + return net::ERR_IO_PENDING; +} + +void ShaderDiskCache::CacheCreatedCallback(int rv) { + if (rv != net::OK) { + LOG(ERROR) << "Shader Cache Creation failed: " << rv; + return; + } + helper_ = new ShaderDiskReadHelper(AsWeakPtr(), host_id_); + helper_->LoadCache(); +} + +void ShaderDiskCache::EntryComplete(void* entry) { + entry_map_.erase(entry); + + if (entry_map_.empty() && !cache_complete_callback_.is_null()) + cache_complete_callback_.Run(net::OK); +} + +void ShaderDiskCache::ReadComplete() { + helper_ = NULL; + + // The cache is considered available after we have finished reading any + // of the old cache values off disk. This prevents a potential race where we + // are reading from disk and execute a cache clear at the same time. + cache_available_ = true; + if (!available_callback_.is_null()) { + available_callback_.Run(net::OK); + available_callback_.Reset(); + } +} + +int ShaderDiskCache::SetCacheCompleteCallback( + const net::CompletionCallback& callback) { + if (entry_map_.empty()) { + return net::OK; + } + cache_complete_callback_ = callback; + return net::ERR_IO_PENDING; +} + +} // namespace content + diff --git a/chromium/content/browser/gpu/shader_disk_cache.h b/chromium/content/browser/gpu/shader_disk_cache.h new file mode 100644 index 00000000000..051be5876ec --- /dev/null +++ b/chromium/content/browser/gpu/shader_disk_cache.h @@ -0,0 +1,154 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CONTENT_BROWSER_GPU_SHADER_DISK_CACHE_H_ +#define CONTENT_BROWSER_GPU_SHADER_DISK_CACHE_H_ + +#include <map> +#include <queue> +#include <string> + +#include "base/files/file_path.h" +#include "base/memory/ref_counted.h" +#include "base/memory/singleton.h" +#include "content/common/content_export.h" +#include "net/disk_cache/disk_cache.h" + +namespace content { + +class ShaderDiskCacheEntry; +class ShaderDiskReadHelper; +class ShaderClearHelper; + +// ShaderDiskCache is the interface to the on disk cache for +// GL shaders. +// +// While this class is both RefCounted and SupportsWeakPtr +// when using this class you should work with the RefCounting. +// The WeakPtr is needed interally. +class CONTENT_EXPORT ShaderDiskCache + : public base::RefCounted<ShaderDiskCache>, + public base::SupportsWeakPtr<ShaderDiskCache> { + public: + void Init(); + + void set_host_id(int host_id) { host_id_ = host_id; } + + // Store the |shader| into the cache under |key|. + void Cache(const std::string& key, const std::string& shader); + + // Clear a range of entries. This supports unbounded deletes in either + // direction by using null Time values for either |begin_time| or |end_time|. + // The return value is a net error code. If this method returns + // ERR_IO_PENDING, the |completion_callback| will be invoked when the + // operation completes. + int Clear( + const base::Time begin_time, + const base::Time end_time, + const net::CompletionCallback& completion_callback); + + // Sets a callback for when the cache is available. If the cache is + // already available the callback will not be called and net::OK is returned. + // If the callback is set net::ERR_IO_PENDING is returned and the callback + // will be executed when the cache is available. + int SetAvailableCallback(const net::CompletionCallback& callback); + + // Returns the number of elements currently in the cache. + int32 Size(); + + // Set a callback notification for when all current entries have been + // written to the cache. + // The return value is a net error code. If this method returns + // ERR_IO_PENDING, the |callback| will be invoked when all entries have + // been written to the cache. + int SetCacheCompleteCallback(const net::CompletionCallback& callback); + + private: + friend class base::RefCounted<ShaderDiskCache>; + friend class ShaderDiskCacheEntry; + friend class ShaderDiskReadHelper; + friend class ShaderCacheFactory; + + explicit ShaderDiskCache(const base::FilePath& cache_path); + ~ShaderDiskCache(); + + void CacheCreatedCallback(int rv); + + disk_cache::Backend* backend() { return backend_.get(); } + + void EntryComplete(void* entry); + void ReadComplete(); + + bool cache_available_; + int host_id_; + base::FilePath cache_path_; + bool is_initialized_; + net::CompletionCallback available_callback_; + net::CompletionCallback cache_complete_callback_; + + scoped_ptr<disk_cache::Backend> backend_; + + scoped_refptr<ShaderDiskReadHelper> helper_; + std::map<void*, scoped_refptr<ShaderDiskCacheEntry> > entry_map_; + + DISALLOW_COPY_AND_ASSIGN(ShaderDiskCache); +}; + +// ShaderCacheFactory maintains a cache of ShaderDiskCache objects +// so we only create one per profile directory. +class CONTENT_EXPORT ShaderCacheFactory { + public: + static ShaderCacheFactory* GetInstance(); + + // Clear the shader disk cache for the given |path|. This supports unbounded + // deletes in either direction by using null Time values for either + // |begin_time| or |end_time|. The |callback| will be executed when the + // clear is complete. + void ClearByPath(const base::FilePath& path, + const base::Time& begin_time, + const base::Time& end_time, + const base::Closure& callback); + + // Retrieve the shader disk cache for the provided |client_id|. + scoped_refptr<ShaderDiskCache> Get(int32 client_id); + + // Set the |path| to be used for the disk cache for |client_id|. + void SetCacheInfo(int32 client_id, const base::FilePath& path); + + // Remove the path mapping for |client_id|. + void RemoveCacheInfo(int32 client_id); + + // Set the provided |cache| into the cache map for the given |path|. + void AddToCache(const base::FilePath& path, ShaderDiskCache* cache); + + // Remove the provided |path| from our cache map. + void RemoveFromCache(const base::FilePath& path); + + private: + friend struct DefaultSingletonTraits<ShaderCacheFactory>; + friend class ShaderClearHelper; + + ShaderCacheFactory(); + ~ShaderCacheFactory(); + + scoped_refptr<ShaderDiskCache> GetByPath(const base::FilePath& path); + void CacheCleared(const base::FilePath& path); + + typedef std::map<base::FilePath, ShaderDiskCache*> ShaderCacheMap; + ShaderCacheMap shader_cache_map_; + + typedef std::map<int32, base::FilePath> ClientIdToPathMap; + ClientIdToPathMap client_id_to_path_map_; + + typedef std::queue<scoped_refptr<ShaderClearHelper> > ShaderClearQueue; + typedef std::map<base::FilePath, ShaderClearQueue> ShaderClearMap; + ShaderClearMap shader_clear_map_; + + DISALLOW_COPY_AND_ASSIGN(ShaderCacheFactory); +}; + +} // namespace content + +#endif // CONTENT_BROWSER_GPU_SHADER_DISK_CACHE_H_ + diff --git a/chromium/content/browser/gpu/shader_disk_cache_unittest.cc b/chromium/content/browser/gpu/shader_disk_cache_unittest.cc new file mode 100644 index 00000000000..9c7061b62bb --- /dev/null +++ b/chromium/content/browser/gpu/shader_disk_cache_unittest.cc @@ -0,0 +1,75 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/files/scoped_temp_dir.h" +#include "base/threading/thread.h" +#include "content/browser/browser_thread_impl.h" +#include "content/browser/gpu/shader_disk_cache.h" +#include "content/public/test/test_browser_thread_bundle.h" +#include "net/base/test_completion_callback.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace content { +namespace { + +const int kDefaultClientId = 42; +const char kCacheKey[] = "key"; +const char kCacheValue[] = "cached value"; + +} // namespace + +class ShaderDiskCacheTest : public testing::Test { + public: + ShaderDiskCacheTest() + : thread_bundle_(content::TestBrowserThreadBundle::IO_MAINLOOP) { + } + + virtual ~ShaderDiskCacheTest() {} + + const base::FilePath& cache_path() { return temp_dir_.path(); } + + void InitCache() { + ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); + ShaderCacheFactory::GetInstance()->SetCacheInfo(kDefaultClientId, + cache_path()); + } + + private: + virtual void TearDown() OVERRIDE { + ShaderCacheFactory::GetInstance()->RemoveCacheInfo(kDefaultClientId); + } + + base::ScopedTempDir temp_dir_; + content::TestBrowserThreadBundle thread_bundle_; + + DISALLOW_COPY_AND_ASSIGN(ShaderDiskCacheTest); +}; + +TEST_F(ShaderDiskCacheTest, ClearsCache) { + InitCache(); + + scoped_refptr<ShaderDiskCache> cache = + ShaderCacheFactory::GetInstance()->Get(kDefaultClientId); + ASSERT_TRUE(cache.get() != NULL); + + net::TestCompletionCallback available_cb; + int rv = cache->SetAvailableCallback(available_cb.callback()); + ASSERT_EQ(net::OK, available_cb.GetResult(rv)); + EXPECT_EQ(0, cache->Size()); + + cache->Cache(kCacheKey, kCacheValue); + + net::TestCompletionCallback complete_cb; + rv = cache->SetCacheCompleteCallback(complete_cb.callback()); + ASSERT_EQ(net::OK, complete_cb.GetResult(rv)); + EXPECT_EQ(1, cache->Size()); + + base::Time time; + net::TestCompletionCallback clear_cb; + rv = cache->Clear(time, time, clear_cb.callback()); + ASSERT_EQ(net::OK, clear_cb.GetResult(rv)); + EXPECT_EQ(0, cache->Size()); +}; + +} // namespace content diff --git a/chromium/content/browser/gpu/test_support_gpu.gypi b/chromium/content/browser/gpu/test_support_gpu.gypi new file mode 100644 index 00000000000..4892e9a38e9 --- /dev/null +++ b/chromium/content/browser/gpu/test_support_gpu.gypi @@ -0,0 +1,70 @@ +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# This file is meant to be included into targets which run gpu tests. +{ + 'variables': { + 'test_list_out_dir': '<(SHARED_INTERMEDIATE_DIR)/content/test/gpu', + 'src_dir': '../../..', + }, + 'defines': [ + 'HAS_OUT_OF_PROC_TEST_RUNNER', + ], + 'include_dirs': [ + '<(src_dir)', + '<(test_list_out_dir)', + ], + # hard_dependency is necessary for this target because it has actions + # that generate a header file included by dependent targets. The header + # file must be generated before the dependents are compiled. The usual + # semantics are to allow the two targets to build concurrently. + 'hard_dependency': 1, + 'conditions': [ + ['OS=="win"', { + 'include_dirs': [ + '<(DEPTH)/third_party/wtl/include', + ], + 'sources': [ + '<(SHARED_INTERMEDIATE_DIR)/content/content_resources.rc', + '<(SHARED_INTERMEDIATE_DIR)/net/net_resources.rc', + '<(SHARED_INTERMEDIATE_DIR)/webkit/blink_resources.rc', + ], + 'conditions': [ + ['win_use_allocator_shim==1', { + 'dependencies': [ + '../base/allocator/allocator.gyp:allocator', + ], + }], + ], + 'configurations': { + 'Debug': { + 'msvs_settings': { + 'VCLinkerTool': { + 'LinkIncremental': '<(msvs_large_module_debug_link_mode)', + }, + }, + }, + }, + }], + ['OS=="mac"', { + # See comments about "xcode_settings" elsewhere in this file. + 'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-ObjC']}, + }], + ['toolkit_uses_gtk == 1', { + 'dependencies': [ + '<(src_dir)/build/linux/system.gyp:gtk', + ], + }], + ['toolkit_uses_gtk == 1 or chromeos==1 or (OS=="linux" and use_aura==1)', { + 'dependencies': [ + '<(src_dir)/build/linux/system.gyp:ssl', + ], + }], + ['toolkit_views==1', { + 'dependencies': [ + '<(src_dir)/ui/views/views.gyp:views', + ], + }], + ], +} diff --git a/chromium/content/browser/gpu/webgl_conformance_test.cc b/chromium/content/browser/gpu/webgl_conformance_test.cc new file mode 100644 index 00000000000..79f30782830 --- /dev/null +++ b/chromium/content/browser/gpu/webgl_conformance_test.cc @@ -0,0 +1,96 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/command_line.h" +#include "base/file_util.h" +#include "base/path_service.h" +#include "base/strings/utf_string_conversions.h" +#include "content/public/browser/web_contents.h" +#include "content/public/common/content_paths.h" +#include "content/public/common/content_switches.h" +#include "content/public/test/browser_test_utils.h" +#include "content/shell/shell.h" +#include "content/test/content_browser_test.h" +#include "content/test/content_browser_test_utils.h" +#include "gpu/config/gpu_test_config.h" +#include "gpu/config/gpu_test_expectations_parser.h" +#include "net/base/net_util.h" + +namespace content { + +class WebGLConformanceTest : public ContentBrowserTest { + public: + WebGLConformanceTest() {} + + virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { + // Allow privileged WebGL extensions. + command_line->AppendSwitch(switches::kEnablePrivilegedWebGLExtensions); +#if defined(OS_ANDROID) + command_line->AppendSwitch( + switches::kDisableGestureRequirementForMediaPlayback); +#endif + } + + virtual void SetUpInProcessBrowserTestFixture() OVERRIDE { + base::FilePath webgl_conformance_path; + PathService::Get(base::DIR_SOURCE_ROOT, &webgl_conformance_path); + webgl_conformance_path = webgl_conformance_path.Append( + FILE_PATH_LITERAL("third_party")); + webgl_conformance_path = webgl_conformance_path.Append( + FILE_PATH_LITERAL("webgl_conformance")); + ASSERT_TRUE(base::DirectoryExists(webgl_conformance_path)) + << "Missing conformance tests: " << webgl_conformance_path.value(); + + PathService::Get(DIR_TEST_DATA, &test_path_); + test_path_ = test_path_.Append(FILE_PATH_LITERAL("gpu")); + test_path_ = test_path_.Append(FILE_PATH_LITERAL("webgl_conformance.html")); + + ASSERT_TRUE(bot_config_.LoadCurrentConfig(NULL)) + << "Fail to load bot configuration"; + ASSERT_TRUE(bot_config_.IsValid()) + << "Invalid bot configuration"; + + base::FilePath path; + ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &path)); + path = path.Append(FILE_PATH_LITERAL("gpu")) + .Append(FILE_PATH_LITERAL("webgl_conformance_test_expectations.txt")); + ASSERT_TRUE(base::PathExists(path)); + ASSERT_TRUE(test_expectations_.LoadTestExpectations(path)); + } + + void RunTest(std::string url, std::string test_name) { + int32 expectation = + test_expectations_.GetTestExpectation(test_name, bot_config_); + if (expectation != gpu::GPUTestExpectationsParser::kGpuTestPass) { + LOG(WARNING) << "Test " << test_name << " is bypassed"; + return; + } + + DOMMessageQueue message_queue; + NavigateToURL(shell(), net::FilePathToFileURL(test_path_)); + + std::string message; + NavigateToURL(shell(), GURL("javascript:start('" + url + "');")); + ASSERT_TRUE(message_queue.WaitForMessage(&message)); + + EXPECT_STREQ("\"SUCCESS\"", message.c_str()) << message; + } + + private: + base::FilePath test_path_; + gpu::GPUTestBotConfig bot_config_; + gpu::GPUTestExpectationsParser test_expectations_; +}; + +#define CONFORMANCE_TEST(name, url) \ +IN_PROC_BROWSER_TEST_F(WebGLConformanceTest, MANUAL_##name) { \ + RunTest(url, #name); \ +} + +// The test declarations are located in webgl_conformance_test_list_autogen.h, +// because the list is automatically generated by a script. +// See: generate_webgl_conformance_test_list.py +#include "webgl_conformance_test_list_autogen.h" + +} // namespace content diff --git a/chromium/content/browser/gpu/webgl_conformance_test_list_autogen.h b/chromium/content/browser/gpu/webgl_conformance_test_list_autogen.h new file mode 100644 index 00000000000..d9a30c243c2 --- /dev/null +++ b/chromium/content/browser/gpu/webgl_conformance_test_list_autogen.h @@ -0,0 +1,1080 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// DO NOT EDIT! This file is auto-generated by +// generate_webgl_conformance_test_list.py +// It is included by webgl_conformance_test.cc + +#ifndef CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ +#define CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ + +CONFORMANCE_TEST(conformance_more_conformance_constants, + "conformance/more/conformance/constants.html"); +CONFORMANCE_TEST(conformance_more_conformance_getContext, + "conformance/more/conformance/getContext.html"); +CONFORMANCE_TEST(conformance_more_conformance_methods, + "conformance/more/conformance/methods.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_A, + "conformance/more/conformance/quickCheckAPI-A.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_B1, + "conformance/more/conformance/quickCheckAPI-B1.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_B2, + "conformance/more/conformance/quickCheckAPI-B2.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_B3, + "conformance/more/conformance/quickCheckAPI-B3.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_B4, + "conformance/more/conformance/quickCheckAPI-B4.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_C, + "conformance/more/conformance/quickCheckAPI-C.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_D_G, + "conformance/more/conformance/quickCheckAPI-D_G.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_G_I, + "conformance/more/conformance/quickCheckAPI-G_I.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_L_S, + "conformance/more/conformance/quickCheckAPI-L_S.html"); +CONFORMANCE_TEST(conformance_more_conformance_quickCheckAPI_S_V, + "conformance/more/conformance/quickCheckAPI-S_V.html"); +CONFORMANCE_TEST(conformance_more_conformance_webGLArrays, + "conformance/more/conformance/webGLArrays.html"); +CONFORMANCE_TEST(conformance_more_functions_bindBuffer, + "conformance/more/functions/bindBuffer.html"); +CONFORMANCE_TEST(conformance_more_functions_bindBufferBadArgs, + "conformance/more/functions/bindBufferBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_bindFramebufferLeaveNonZero, + "conformance/more/functions/bindFramebufferLeaveNonZero.html"); +CONFORMANCE_TEST(conformance_more_functions_bufferData, + "conformance/more/functions/bufferData.html"); +CONFORMANCE_TEST(conformance_more_functions_bufferDataBadArgs, + "conformance/more/functions/bufferDataBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_bufferSubData, + "conformance/more/functions/bufferSubData.html"); +CONFORMANCE_TEST(conformance_more_functions_bufferSubDataBadArgs, + "conformance/more/functions/bufferSubDataBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_copyTexImage2D, + "conformance/more/functions/copyTexImage2D.html"); +CONFORMANCE_TEST(conformance_more_functions_copyTexImage2DBadArgs, + "conformance/more/functions/copyTexImage2DBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_copyTexSubImage2D, + "conformance/more/functions/copyTexSubImage2D.html"); +CONFORMANCE_TEST(conformance_more_functions_copyTexSubImage2DBadArgs, + "conformance/more/functions/copyTexSubImage2DBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_deleteBufferBadArgs, + "conformance/more/functions/deleteBufferBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_drawArrays, + "conformance/more/functions/drawArrays.html"); +CONFORMANCE_TEST(conformance_more_functions_drawArraysOutOfBounds, + "conformance/more/functions/drawArraysOutOfBounds.html"); +CONFORMANCE_TEST(conformance_more_functions_drawElements, + "conformance/more/functions/drawElements.html"); +CONFORMANCE_TEST(conformance_more_functions_drawElementsBadArgs, + "conformance/more/functions/drawElementsBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_isTests, + "conformance/more/functions/isTests.html"); +CONFORMANCE_TEST(conformance_more_functions_isTestsBadArgs, + "conformance/more/functions/isTestsBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_readPixels, + "conformance/more/functions/readPixels.html"); +CONFORMANCE_TEST(conformance_more_functions_readPixelsBadArgs, + "conformance/more/functions/readPixelsBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_texImage2D, + "conformance/more/functions/texImage2D.html"); +CONFORMANCE_TEST(conformance_more_functions_texImage2DBadArgs, + "conformance/more/functions/texImage2DBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_texImage2DHTML, + "conformance/more/functions/texImage2DHTML.html"); +CONFORMANCE_TEST(conformance_more_functions_texImage2DHTMLBadArgs, + "conformance/more/functions/texImage2DHTMLBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_texSubImage2D, + "conformance/more/functions/texSubImage2D.html"); +CONFORMANCE_TEST(conformance_more_functions_texSubImage2DBadArgs, + "conformance/more/functions/texSubImage2DBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_texSubImage2DHTML, + "conformance/more/functions/texSubImage2DHTML.html"); +CONFORMANCE_TEST(conformance_more_functions_texSubImage2DHTMLBadArgs, + "conformance/more/functions/texSubImage2DHTMLBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformf, + "conformance/more/functions/uniformf.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformfBadArgs, + "conformance/more/functions/uniformfBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformfArrayLen1, + "conformance/more/functions/uniformfArrayLen1.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformi, + "conformance/more/functions/uniformi.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformiBadArgs, + "conformance/more/functions/uniformiBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformMatrix, + "conformance/more/functions/uniformMatrix.html"); +CONFORMANCE_TEST(conformance_more_functions_uniformMatrixBadArgs, + "conformance/more/functions/uniformMatrixBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_vertexAttrib, + "conformance/more/functions/vertexAttrib.html"); +CONFORMANCE_TEST(conformance_more_functions_vertexAttribBadArgs, + "conformance/more/functions/vertexAttribBadArgs.html"); +CONFORMANCE_TEST(conformance_more_functions_vertexAttribPointer, + "conformance/more/functions/vertexAttribPointer.html"); +CONFORMANCE_TEST(conformance_more_functions_vertexAttribPointerBadArgs, + "conformance/more/functions/vertexAttribPointerBadArgs.html"); +CONFORMANCE_TEST(conformance_more_glsl_arrayOutOfBounds, + "conformance/more/glsl/arrayOutOfBounds.html"); +CONFORMANCE_TEST(conformance_more_glsl_uniformOutOfBounds, + "conformance/more/glsl/uniformOutOfBounds.html"); +CONFORMANCE_TEST(conformance_attribs_gl_enable_vertex_attrib, + "conformance/attribs/gl-enable-vertex-attrib.html"); +CONFORMANCE_TEST(conformance_attribs_gl_vertex_attrib_render, + "conformance/attribs/gl-vertex-attrib-render.html"); +CONFORMANCE_TEST(conformance_attribs_gl_disabled_vertex_attrib, + "conformance/attribs/gl-disabled-vertex-attrib.html"); +CONFORMANCE_TEST(conformance_attribs_gl_vertex_attrib_zero_issues, + "conformance/attribs/gl-vertex-attrib-zero-issues.html"); +CONFORMANCE_TEST(conformance_attribs_gl_vertex_attrib, + "conformance/attribs/gl-vertex-attrib.html"); +CONFORMANCE_TEST(conformance_attribs_gl_vertexattribpointer_offsets, + "conformance/attribs/gl-vertexattribpointer-offsets.html"); +CONFORMANCE_TEST(conformance_attribs_gl_vertexattribpointer, + "conformance/attribs/gl-vertexattribpointer.html"); +CONFORMANCE_TEST(conformance_buffers_buffer_bind_test, + "conformance/buffers/buffer-bind-test.html"); +CONFORMANCE_TEST(conformance_buffers_buffer_data_array_buffer, + "conformance/buffers/buffer-data-array-buffer.html"); +CONFORMANCE_TEST(conformance_buffers_index_validation_copies_indices, + "conformance/buffers/index-validation-copies-indices.html"); +CONFORMANCE_TEST(conformance_buffers_index_validation_crash_with_buffer_sub_data, + "conformance/buffers/index-validation-crash-with-buffer-sub-data.html"); +CONFORMANCE_TEST(conformance_buffers_index_validation_verifies_too_many_indices, + "conformance/buffers/index-validation-verifies-too-many-indices.html"); +CONFORMANCE_TEST(conformance_buffers_index_validation_with_resized_buffer, + "conformance/buffers/index-validation-with-resized-buffer.html"); +CONFORMANCE_TEST(conformance_buffers_index_validation, + "conformance/buffers/index-validation.html"); +CONFORMANCE_TEST(conformance_canvas_buffer_offscreen_test, + "conformance/canvas/buffer-offscreen-test.html"); +CONFORMANCE_TEST(conformance_canvas_buffer_preserve_test, + "conformance/canvas/buffer-preserve-test.html"); +CONFORMANCE_TEST(conformance_canvas_canvas_test, + "conformance/canvas/canvas-test.html"); +CONFORMANCE_TEST(conformance_canvas_canvas_zero_size, + "conformance/canvas/canvas-zero-size.html"); +CONFORMANCE_TEST(conformance_canvas_drawingbuffer_static_canvas_test, + "conformance/canvas/drawingbuffer-static-canvas-test.html"); +CONFORMANCE_TEST(conformance_canvas_drawingbuffer_test, + "conformance/canvas/drawingbuffer-test.html"); +CONFORMANCE_TEST(conformance_canvas_framebuffer_bindings_unaffected_on_resize, + "conformance/canvas/framebuffer-bindings-unaffected-on-resize.html"); +CONFORMANCE_TEST(conformance_canvas_texture_bindings_unaffected_on_resize, + "conformance/canvas/texture-bindings-unaffected-on-resize.html"); +CONFORMANCE_TEST(conformance_canvas_viewport_unchanged_upon_resize, + "conformance/canvas/viewport-unchanged-upon-resize.html"); +CONFORMANCE_TEST(conformance_context_constants, + "conformance/context/constants.html"); +CONFORMANCE_TEST(conformance_context_context_attribute_preserve_drawing_buffer, + "conformance/context/context-attribute-preserve-drawing-buffer.html"); +CONFORMANCE_TEST(conformance_context_context_attributes_alpha_depth_stencil_antialias, + "conformance/context/context-attributes-alpha-depth-stencil-antialias.html"); +CONFORMANCE_TEST(conformance_context_context_creation_and_destruction, + "conformance/context/context-creation-and-destruction.html"); +CONFORMANCE_TEST(conformance_context_context_lost_restored, + "conformance/context/context-lost-restored.html"); +CONFORMANCE_TEST(conformance_context_context_lost, + "conformance/context/context-lost.html"); +CONFORMANCE_TEST(conformance_context_context_type_test, + "conformance/context/context-type-test.html"); +CONFORMANCE_TEST(conformance_context_incorrect_context_object_behaviour, + "conformance/context/incorrect-context-object-behaviour.html"); +CONFORMANCE_TEST(conformance_context_methods, + "conformance/context/methods.html"); +CONFORMANCE_TEST(conformance_context_premultiplyalpha_test, + "conformance/context/premultiplyalpha-test.html"); +CONFORMANCE_TEST(conformance_context_resource_sharing_test, + "conformance/context/resource-sharing-test.html"); +CONFORMANCE_TEST(conformance_extensions_get_extension, + "conformance/extensions/get-extension.html"); +CONFORMANCE_TEST(conformance_extensions_oes_standard_derivatives, + "conformance/extensions/oes-standard-derivatives.html"); +CONFORMANCE_TEST(conformance_extensions_oes_texture_float_with_canvas, + "conformance/extensions/oes-texture-float-with-canvas.html"); +CONFORMANCE_TEST(conformance_extensions_oes_texture_float_with_image_data, + "conformance/extensions/oes-texture-float-with-image-data.html"); +CONFORMANCE_TEST(conformance_extensions_oes_texture_float_with_image, + "conformance/extensions/oes-texture-float-with-image.html"); +CONFORMANCE_TEST(conformance_extensions_oes_texture_float_with_video, + "conformance/extensions/oes-texture-float-with-video.html"); +CONFORMANCE_TEST(conformance_extensions_oes_texture_float, + "conformance/extensions/oes-texture-float.html"); +CONFORMANCE_TEST(conformance_extensions_oes_vertex_array_object, + "conformance/extensions/oes-vertex-array-object.html"); +CONFORMANCE_TEST(conformance_extensions_webgl_debug_renderer_info, + "conformance/extensions/webgl-debug-renderer-info.html"); +CONFORMANCE_TEST(conformance_extensions_webgl_debug_shaders, + "conformance/extensions/webgl-debug-shaders.html"); +CONFORMANCE_TEST(conformance_extensions_webgl_compressed_texture_s3tc, + "conformance/extensions/webgl-compressed-texture-s3tc.html"); +CONFORMANCE_TEST(conformance_extensions_ext_texture_filter_anisotropic, + "conformance/extensions/ext-texture-filter-anisotropic.html"); +CONFORMANCE_TEST(conformance_limits_gl_min_attribs, + "conformance/limits/gl-min-attribs.html"); +CONFORMANCE_TEST(conformance_limits_gl_max_texture_dimensions, + "conformance/limits/gl-max-texture-dimensions.html"); +CONFORMANCE_TEST(conformance_limits_gl_min_textures, + "conformance/limits/gl-min-textures.html"); +CONFORMANCE_TEST(conformance_limits_gl_min_uniforms, + "conformance/limits/gl-min-uniforms.html"); +CONFORMANCE_TEST(conformance_misc_bad_arguments_test, + "conformance/misc/bad-arguments-test.html"); +CONFORMANCE_TEST(conformance_misc_delayed_drawing, + "conformance/misc/delayed-drawing.html"); +CONFORMANCE_TEST(conformance_misc_error_reporting, + "conformance/misc/error-reporting.html"); +CONFORMANCE_TEST(conformance_misc_instanceof_test, + "conformance/misc/instanceof-test.html"); +CONFORMANCE_TEST(conformance_misc_invalid_passed_params, + "conformance/misc/invalid-passed-params.html"); +CONFORMANCE_TEST(conformance_misc_is_object, + "conformance/misc/is-object.html"); +CONFORMANCE_TEST(conformance_misc_null_object_behaviour, + "conformance/misc/null-object-behaviour.html"); +CONFORMANCE_TEST(conformance_misc_functions_returning_strings, + "conformance/misc/functions-returning-strings.html"); +CONFORMANCE_TEST(conformance_misc_object_deletion_behaviour, + "conformance/misc/object-deletion-behaviour.html"); +CONFORMANCE_TEST(conformance_misc_shader_precision_format, + "conformance/misc/shader-precision-format.html"); +CONFORMANCE_TEST(conformance_misc_type_conversion_test, + "conformance/misc/type-conversion-test.html"); +CONFORMANCE_TEST(conformance_misc_uninitialized_test, + "conformance/misc/uninitialized-test.html"); +CONFORMANCE_TEST(conformance_misc_webgl_specific, + "conformance/misc/webgl-specific.html"); +CONFORMANCE_TEST(conformance_programs_get_active_test, + "conformance/programs/get-active-test.html"); +CONFORMANCE_TEST(conformance_programs_gl_bind_attrib_location_test, + "conformance/programs/gl-bind-attrib-location-test.html"); +CONFORMANCE_TEST(conformance_programs_gl_bind_attrib_location_long_names_test, + "conformance/programs/gl-bind-attrib-location-long-names-test.html"); +CONFORMANCE_TEST(conformance_programs_gl_get_active_attribute, + "conformance/programs/gl-get-active-attribute.html"); +CONFORMANCE_TEST(conformance_programs_gl_get_active_uniform, + "conformance/programs/gl-get-active-uniform.html"); +CONFORMANCE_TEST(conformance_programs_gl_getshadersource, + "conformance/programs/gl-getshadersource.html"); +CONFORMANCE_TEST(conformance_programs_gl_shader_test, + "conformance/programs/gl-shader-test.html"); +CONFORMANCE_TEST(conformance_programs_invalid_UTF_16, + "conformance/programs/invalid-UTF-16.html"); +CONFORMANCE_TEST(conformance_programs_program_test, + "conformance/programs/program-test.html"); +CONFORMANCE_TEST(conformance_programs_use_program_crash_with_discard_in_fragment_shader, + "conformance/programs/use-program-crash-with-discard-in-fragment-shader.html"); +CONFORMANCE_TEST(conformance_reading_read_pixels_pack_alignment, + "conformance/reading/read-pixels-pack-alignment.html"); +CONFORMANCE_TEST(conformance_reading_read_pixels_test, + "conformance/reading/read-pixels-test.html"); +CONFORMANCE_TEST(conformance_renderbuffers_framebuffer_object_attachment, + "conformance/renderbuffers/framebuffer-object-attachment.html"); +CONFORMANCE_TEST(conformance_renderbuffers_framebuffer_state_restoration, + "conformance/renderbuffers/framebuffer-state-restoration.html"); +CONFORMANCE_TEST(conformance_renderbuffers_framebuffer_test, + "conformance/renderbuffers/framebuffer-test.html"); +CONFORMANCE_TEST(conformance_renderbuffers_renderbuffer_initialization, + "conformance/renderbuffers/renderbuffer-initialization.html"); +CONFORMANCE_TEST(conformance_rendering_culling, + "conformance/rendering/culling.html"); +CONFORMANCE_TEST(conformance_rendering_draw_arrays_out_of_bounds, + "conformance/rendering/draw-arrays-out-of-bounds.html"); +CONFORMANCE_TEST(conformance_rendering_draw_elements_out_of_bounds, + "conformance/rendering/draw-elements-out-of-bounds.html"); +CONFORMANCE_TEST(conformance_rendering_gl_clear, + "conformance/rendering/gl-clear.html"); +CONFORMANCE_TEST(conformance_rendering_gl_drawelements, + "conformance/rendering/gl-drawelements.html"); +CONFORMANCE_TEST(conformance_rendering_gl_scissor_test, + "conformance/rendering/gl-scissor-test.html"); +CONFORMANCE_TEST(conformance_rendering_more_than_65536_indices, + "conformance/rendering/more-than-65536-indices.html"); +CONFORMANCE_TEST(conformance_rendering_point_size, + "conformance/rendering/point-size.html"); +CONFORMANCE_TEST(conformance_rendering_triangle, + "conformance/rendering/triangle.html"); +CONFORMANCE_TEST(conformance_rendering_line_loop_tri_fan, + "conformance/rendering/line-loop-tri-fan.html"); +CONFORMANCE_TEST(conformance_state_gl_enable_enum_test, + "conformance/state/gl-enable-enum-test.html"); +CONFORMANCE_TEST(conformance_state_gl_enum_tests, + "conformance/state/gl-enum-tests.html"); +CONFORMANCE_TEST(conformance_state_gl_get_calls, + "conformance/state/gl-get-calls.html"); +CONFORMANCE_TEST(conformance_state_gl_geterror, + "conformance/state/gl-geterror.html"); +CONFORMANCE_TEST(conformance_state_gl_getstring, + "conformance/state/gl-getstring.html"); +CONFORMANCE_TEST(conformance_state_gl_object_get_calls, + "conformance/state/gl-object-get-calls.html"); +CONFORMANCE_TEST(conformance_textures_compressed_tex_image, + "conformance/textures/compressed-tex-image.html"); +CONFORMANCE_TEST(conformance_textures_copy_tex_image_and_sub_image_2d, + "conformance/textures/copy-tex-image-and-sub-image-2d.html"); +CONFORMANCE_TEST(conformance_textures_gl_pixelstorei, + "conformance/textures/gl-pixelstorei.html"); +CONFORMANCE_TEST(conformance_textures_gl_teximage, + "conformance/textures/gl-teximage.html"); +CONFORMANCE_TEST(conformance_textures_origin_clean_conformance, + "conformance/textures/origin-clean-conformance.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_array_buffer_view, + "conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_canvas, + "conformance/textures/tex-image-and-sub-image-2d-with-canvas.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_canvas_rgb565, + "conformance/textures/tex-image-and-sub-image-2d-with-canvas-rgb565.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_canvas_rgba4444, + "conformance/textures/tex-image-and-sub-image-2d-with-canvas-rgba4444.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_canvas_rgba5551, + "conformance/textures/tex-image-and-sub-image-2d-with-canvas-rgba5551.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_data, + "conformance/textures/tex-image-and-sub-image-2d-with-image-data.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_data_rgb565, + "conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_data_rgba4444, + "conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba4444.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_data_rgba5551, + "conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba5551.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image, + "conformance/textures/tex-image-and-sub-image-2d-with-image.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_rgb565, + "conformance/textures/tex-image-and-sub-image-2d-with-image-rgb565.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_rgba4444, + "conformance/textures/tex-image-and-sub-image-2d-with-image-rgba4444.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_image_rgba5551, + "conformance/textures/tex-image-and-sub-image-2d-with-image-rgba5551.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_video, + "conformance/textures/tex-image-and-sub-image-2d-with-video.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_video_rgb565, + "conformance/textures/tex-image-and-sub-image-2d-with-video-rgb565.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_video_rgba4444, + "conformance/textures/tex-image-and-sub-image-2d-with-video-rgba4444.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_sub_image_2d_with_video_rgba5551, + "conformance/textures/tex-image-and-sub-image-2d-with-video-rgba5551.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_and_uniform_binding_bugs, + "conformance/textures/tex-image-and-uniform-binding-bugs.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_with_format_and_type, + "conformance/textures/tex-image-with-format-and-type.html"); +CONFORMANCE_TEST(conformance_textures_tex_image_with_invalid_data, + "conformance/textures/tex-image-with-invalid-data.html"); +CONFORMANCE_TEST(conformance_textures_tex_input_validation, + "conformance/textures/tex-input-validation.html"); +CONFORMANCE_TEST(conformance_textures_tex_sub_image_2d_bad_args, + "conformance/textures/tex-sub-image-2d-bad-args.html"); +CONFORMANCE_TEST(conformance_textures_tex_sub_image_2d, + "conformance/textures/tex-sub-image-2d.html"); +CONFORMANCE_TEST(conformance_textures_texparameter_test, + "conformance/textures/texparameter-test.html"); +CONFORMANCE_TEST(conformance_textures_texture_active_bind_2, + "conformance/textures/texture-active-bind-2.html"); +CONFORMANCE_TEST(conformance_textures_texture_active_bind, + "conformance/textures/texture-active-bind.html"); +CONFORMANCE_TEST(conformance_textures_texture_attachment_formats, + "conformance/textures/texture-attachment-formats.html"); +CONFORMANCE_TEST(conformance_textures_texture_clear, + "conformance/textures/texture-clear.html"); +CONFORMANCE_TEST(conformance_textures_texture_complete, + "conformance/textures/texture-complete.html"); +CONFORMANCE_TEST(conformance_textures_texture_formats_test, + "conformance/textures/texture-formats-test.html"); +CONFORMANCE_TEST(conformance_textures_texture_mips, + "conformance/textures/texture-mips.html"); +CONFORMANCE_TEST(conformance_textures_texture_npot_video, + "conformance/textures/texture-npot-video.html"); +CONFORMANCE_TEST(conformance_textures_texture_npot, + "conformance/textures/texture-npot.html"); +CONFORMANCE_TEST(conformance_textures_texture_size, + "conformance/textures/texture-size.html"); +CONFORMANCE_TEST(conformance_textures_texture_size_cube_maps, + "conformance/textures/texture-size-cube-maps.html"); +CONFORMANCE_TEST(conformance_textures_texture_transparent_pixels_initialized, + "conformance/textures/texture-transparent-pixels-initialized.html"); +CONFORMANCE_TEST(conformance_textures_texture_upload_cube_maps, + "conformance/textures/texture-upload-cube-maps.html"); +CONFORMANCE_TEST(conformance_typedarrays_array_buffer_crash, + "conformance/typedarrays/array-buffer-crash.html"); +CONFORMANCE_TEST(conformance_typedarrays_array_buffer_view_crash, + "conformance/typedarrays/array-buffer-view-crash.html"); +CONFORMANCE_TEST(conformance_typedarrays_array_unit_tests, + "conformance/typedarrays/array-unit-tests.html"); +CONFORMANCE_TEST(conformance_typedarrays_data_view_crash, + "conformance/typedarrays/data-view-crash.html"); +CONFORMANCE_TEST(conformance_typedarrays_data_view_test, + "conformance/typedarrays/data-view-test.html"); +CONFORMANCE_TEST(conformance_uniforms_gl_uniform_arrays, + "conformance/uniforms/gl-uniform-arrays.html"); +CONFORMANCE_TEST(conformance_uniforms_gl_uniform_bool, + "conformance/uniforms/gl-uniform-bool.html"); +CONFORMANCE_TEST(conformance_uniforms_gl_uniformmatrix4fv, + "conformance/uniforms/gl-uniformmatrix4fv.html"); +CONFORMANCE_TEST(conformance_uniforms_gl_unknown_uniform, + "conformance/uniforms/gl-unknown-uniform.html"); +CONFORMANCE_TEST(conformance_uniforms_null_uniform_location, + "conformance/uniforms/null-uniform-location.html"); +CONFORMANCE_TEST(conformance_uniforms_uniform_default_values, + "conformance/uniforms/uniform-default-values.html"); +CONFORMANCE_TEST(conformance_uniforms_uniform_location, + "conformance/uniforms/uniform-location.html"); +CONFORMANCE_TEST(conformance_uniforms_uniform_samplers_test, + "conformance/uniforms/uniform-samplers-test.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function, + "conformance/glsl/functions/glsl-function.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_abs, + "conformance/glsl/functions/glsl-function-abs.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_acos, + "conformance/glsl/functions/glsl-function-acos.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_asin, + "conformance/glsl/functions/glsl-function-asin.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_atan, + "conformance/glsl/functions/glsl-function-atan.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_atan_xy, + "conformance/glsl/functions/glsl-function-atan-xy.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_ceil, + "conformance/glsl/functions/glsl-function-ceil.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_clamp_float, + "conformance/glsl/functions/glsl-function-clamp-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_clamp_gentype, + "conformance/glsl/functions/glsl-function-clamp-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_cos, + "conformance/glsl/functions/glsl-function-cos.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_cross, + "conformance/glsl/functions/glsl-function-cross.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_distance, + "conformance/glsl/functions/glsl-function-distance.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_dot, + "conformance/glsl/functions/glsl-function-dot.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_faceforward, + "conformance/glsl/functions/glsl-function-faceforward.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_floor, + "conformance/glsl/functions/glsl-function-floor.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_fract, + "conformance/glsl/functions/glsl-function-fract.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_length, + "conformance/glsl/functions/glsl-function-length.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_max_float, + "conformance/glsl/functions/glsl-function-max-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_max_gentype, + "conformance/glsl/functions/glsl-function-max-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_min_float, + "conformance/glsl/functions/glsl-function-min-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_min_gentype, + "conformance/glsl/functions/glsl-function-min-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_mix_float, + "conformance/glsl/functions/glsl-function-mix-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_mix_gentype, + "conformance/glsl/functions/glsl-function-mix-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_mod_float, + "conformance/glsl/functions/glsl-function-mod-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_mod_gentype, + "conformance/glsl/functions/glsl-function-mod-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_normalize, + "conformance/glsl/functions/glsl-function-normalize.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_reflect, + "conformance/glsl/functions/glsl-function-reflect.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_sign, + "conformance/glsl/functions/glsl-function-sign.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_sin, + "conformance/glsl/functions/glsl-function-sin.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_step_float, + "conformance/glsl/functions/glsl-function-step-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_step_gentype, + "conformance/glsl/functions/glsl-function-step-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_smoothstep_float, + "conformance/glsl/functions/glsl-function-smoothstep-float.html"); +CONFORMANCE_TEST(conformance_glsl_functions_glsl_function_smoothstep_gentype, + "conformance/glsl/functions/glsl-function-smoothstep-gentype.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_float_vert, + "conformance/glsl/implicit/add_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_mat2_vert, + "conformance/glsl/implicit/add_int_mat2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_mat3_vert, + "conformance/glsl/implicit/add_int_mat3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_mat4_vert, + "conformance/glsl/implicit/add_int_mat4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_vec2_vert, + "conformance/glsl/implicit/add_int_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_vec3_vert, + "conformance/glsl/implicit/add_int_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_int_vec4_vert, + "conformance/glsl/implicit/add_int_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_ivec2_vec2_vert, + "conformance/glsl/implicit/add_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_ivec3_vec3_vert, + "conformance/glsl/implicit/add_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_add_ivec4_vec4_vert, + "conformance/glsl/implicit/add_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_assign_int_to_float_vert, + "conformance/glsl/implicit/assign_int_to_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_assign_ivec2_to_vec2_vert, + "conformance/glsl/implicit/assign_ivec2_to_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_assign_ivec3_to_vec3_vert, + "conformance/glsl/implicit/assign_ivec3_to_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_assign_ivec4_to_vec4_vert, + "conformance/glsl/implicit/assign_ivec4_to_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_construct_struct_vert, + "conformance/glsl/implicit/construct_struct.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_float_vert, + "conformance/glsl/implicit/divide_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_mat2_vert, + "conformance/glsl/implicit/divide_int_mat2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_mat3_vert, + "conformance/glsl/implicit/divide_int_mat3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_mat4_vert, + "conformance/glsl/implicit/divide_int_mat4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_vec2_vert, + "conformance/glsl/implicit/divide_int_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_vec3_vert, + "conformance/glsl/implicit/divide_int_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_int_vec4_vert, + "conformance/glsl/implicit/divide_int_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_ivec2_vec2_vert, + "conformance/glsl/implicit/divide_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_ivec3_vec3_vert, + "conformance/glsl/implicit/divide_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_divide_ivec4_vec4_vert, + "conformance/glsl/implicit/divide_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_equal_int_float_vert, + "conformance/glsl/implicit/equal_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_equal_ivec2_vec2_vert, + "conformance/glsl/implicit/equal_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_equal_ivec3_vec3_vert, + "conformance/glsl/implicit/equal_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_equal_ivec4_vec4_vert, + "conformance/glsl/implicit/equal_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_function_int_float_vert, + "conformance/glsl/implicit/function_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_function_ivec2_vec2_vert, + "conformance/glsl/implicit/function_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_function_ivec3_vec3_vert, + "conformance/glsl/implicit/function_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_function_ivec4_vec4_vert, + "conformance/glsl/implicit/function_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_greater_than_vert, + "conformance/glsl/implicit/greater_than.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_greater_than_equal_vert, + "conformance/glsl/implicit/greater_than_equal.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_less_than_vert, + "conformance/glsl/implicit/less_than.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_less_than_equal_vert, + "conformance/glsl/implicit/less_than_equal.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_float_vert, + "conformance/glsl/implicit/multiply_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_mat2_vert, + "conformance/glsl/implicit/multiply_int_mat2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_mat3_vert, + "conformance/glsl/implicit/multiply_int_mat3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_mat4_vert, + "conformance/glsl/implicit/multiply_int_mat4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_vec2_vert, + "conformance/glsl/implicit/multiply_int_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_vec3_vert, + "conformance/glsl/implicit/multiply_int_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_int_vec4_vert, + "conformance/glsl/implicit/multiply_int_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_ivec2_vec2_vert, + "conformance/glsl/implicit/multiply_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_ivec3_vec3_vert, + "conformance/glsl/implicit/multiply_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_multiply_ivec4_vec4_vert, + "conformance/glsl/implicit/multiply_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_not_equal_int_float_vert, + "conformance/glsl/implicit/not_equal_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_not_equal_ivec2_vec2_vert, + "conformance/glsl/implicit/not_equal_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_not_equal_ivec3_vec3_vert, + "conformance/glsl/implicit/not_equal_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_not_equal_ivec4_vec4_vert, + "conformance/glsl/implicit/not_equal_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_float_vert, + "conformance/glsl/implicit/subtract_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_mat2_vert, + "conformance/glsl/implicit/subtract_int_mat2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_mat3_vert, + "conformance/glsl/implicit/subtract_int_mat3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_mat4_vert, + "conformance/glsl/implicit/subtract_int_mat4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_vec2_vert, + "conformance/glsl/implicit/subtract_int_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_vec3_vert, + "conformance/glsl/implicit/subtract_int_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_int_vec4_vert, + "conformance/glsl/implicit/subtract_int_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_ivec2_vec2_vert, + "conformance/glsl/implicit/subtract_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_ivec3_vec3_vert, + "conformance/glsl/implicit/subtract_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_subtract_ivec4_vec4_vert, + "conformance/glsl/implicit/subtract_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_ternary_int_float_vert, + "conformance/glsl/implicit/ternary_int_float.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_ternary_ivec2_vec2_vert, + "conformance/glsl/implicit/ternary_ivec2_vec2.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_ternary_ivec3_vec3_vert, + "conformance/glsl/implicit/ternary_ivec3_vec3.vert.html"); +CONFORMANCE_TEST(conformance_glsl_implicit_ternary_ivec4_vec4_vert, + "conformance/glsl/implicit/ternary_ivec4_vec4.vert.html"); +CONFORMANCE_TEST(conformance_glsl_matrices_glsl_mat4_to_mat3, + "conformance/glsl/matrices/glsl-mat4-to-mat3.html"); +CONFORMANCE_TEST(conformance_glsl_misc_attrib_location_length_limits, + "conformance/glsl/misc/attrib-location-length-limits.html"); +CONFORMANCE_TEST(conformance_glsl_misc_embedded_struct_definitions_forbidden, + "conformance/glsl/misc/embedded-struct-definitions-forbidden.html"); +CONFORMANCE_TEST(conformance_glsl_misc_glsl_function_nodes, + "conformance/glsl/misc/glsl-function-nodes.html"); +CONFORMANCE_TEST(conformance_glsl_misc_glsl_vertex_branch, + "conformance/glsl/misc/glsl-vertex-branch.html"); +CONFORMANCE_TEST(conformance_glsl_misc_glsl_long_variable_names, + "conformance/glsl/misc/glsl-long-variable-names.html"); +CONFORMANCE_TEST(conformance_glsl_misc_non_ascii_comments_vert, + "conformance/glsl/misc/non-ascii-comments.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_non_ascii_vert, + "conformance/glsl/misc/non-ascii.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_re_compile_re_link, + "conformance/glsl/misc/re-compile-re-link.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_256_character_define, + "conformance/glsl/misc/shader-with-256-character-define.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_256_character_identifier_frag, + "conformance/glsl/misc/shader-with-256-character-identifier.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_257_character_define, + "conformance/glsl/misc/shader-with-257-character-define.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_257_character_identifier_frag, + "conformance/glsl/misc/shader-with-257-character-identifier.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with__webgl_identifier_vert, + "conformance/glsl/misc/shader-with-_webgl-identifier.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_arbitrary_indexing_frag, + "conformance/glsl/misc/shader-with-arbitrary-indexing.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_arbitrary_indexing_vert, + "conformance/glsl/misc/shader-with-arbitrary-indexing.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_attrib_array_vert, + "conformance/glsl/misc/shader-with-attrib-array.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_attrib_struct_vert, + "conformance/glsl/misc/shader-with-attrib-struct.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_clipvertex_vert, + "conformance/glsl/misc/shader-with-clipvertex.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_conditional_scoping, + "conformance/glsl/misc/shader-with-conditional-scoping.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_conditional_scoping_negative, + "conformance/glsl/misc/shader-with-conditional-scoping-negative.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_default_precision_frag, + "conformance/glsl/misc/shader-with-default-precision.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_default_precision_vert, + "conformance/glsl/misc/shader-with-default-precision.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_define_line_continuation_frag, + "conformance/glsl/misc/shader-with-define-line-continuation.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_dfdx_no_ext_frag, + "conformance/glsl/misc/shader-with-dfdx-no-ext.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_dfdx_frag, + "conformance/glsl/misc/shader-with-dfdx.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_do_loop, + "conformance/glsl/misc/shader-with-do-loop.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_error_directive, + "conformance/glsl/misc/shader-with-error-directive.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_explicit_int_cast_vert, + "conformance/glsl/misc/shader-with-explicit-int-cast.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_float_return_value_frag, + "conformance/glsl/misc/shader-with-float-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_for_scoping, + "conformance/glsl/misc/shader-with-for-scoping.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_for_loop, + "conformance/glsl/misc/shader-with-for-loop.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_frag_depth_frag, + "conformance/glsl/misc/shader-with-frag-depth.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_function_recursion_frag, + "conformance/glsl/misc/shader-with-function-recursion.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_function_scoped_struct, + "conformance/glsl/misc/shader-with-function-scoped-struct.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_functional_scoping, + "conformance/glsl/misc/shader-with-functional-scoping.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_comma_assignment, + "conformance/glsl/misc/shader-with-comma-assignment.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_comma_conditional_assignment, + "conformance/glsl/misc/shader-with-comma-conditional-assignment.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_glcolor_vert, + "conformance/glsl/misc/shader-with-glcolor.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_gles_1_frag, + "conformance/glsl/misc/shader-with-gles-1.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_gles_symbol_frag, + "conformance/glsl/misc/shader-with-gles-symbol.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_glprojectionmatrix_vert, + "conformance/glsl/misc/shader-with-glprojectionmatrix.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_implicit_vec3_to_vec4_cast_vert, + "conformance/glsl/misc/shader-with-implicit-vec3-to-vec4-cast.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_include_vert, + "conformance/glsl/misc/shader-with-include.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_int_return_value_frag, + "conformance/glsl/misc/shader-with-int-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_invalid_identifier_frag, + "conformance/glsl/misc/shader-with-invalid-identifier.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_ivec2_return_value_frag, + "conformance/glsl/misc/shader-with-ivec2-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_ivec3_return_value_frag, + "conformance/glsl/misc/shader-with-ivec3-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_ivec4_return_value_frag, + "conformance/glsl/misc/shader-with-ivec4-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_limited_indexing_frag, + "conformance/glsl/misc/shader-with-limited-indexing.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_hex_int_constant_macro, + "conformance/glsl/misc/shader-with-hex-int-constant-macro.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_long_line, + "conformance/glsl/misc/shader-with-long-line.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_non_ascii_error_frag, + "conformance/glsl/misc/shader-with-non-ascii-error.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_non_reserved_words, + "conformance/glsl/misc/shader-with-non-reserved-words.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_precision_frag, + "conformance/glsl/misc/shader-with-precision.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_quoted_error_frag, + "conformance/glsl/misc/shader-with-quoted-error.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_reserved_words, + "conformance/glsl/misc/shader-with-reserved-words.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_undefined_preprocessor_symbol_frag, + "conformance/glsl/misc/shader-with-undefined-preprocessor-symbol.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_uniform_in_loop_condition_vert, + "conformance/glsl/misc/shader-with-uniform-in-loop-condition.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_vec2_return_value_frag, + "conformance/glsl/misc/shader-with-vec2-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_vec3_return_value_frag, + "conformance/glsl/misc/shader-with-vec3-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_vec4_return_value_frag, + "conformance/glsl/misc/shader-with-vec4-return-value.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_vec4_vec3_vec4_conditional, + "conformance/glsl/misc/shader-with-vec4-vec3-vec4-conditional.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_version_100_frag, + "conformance/glsl/misc/shader-with-version-100.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_version_100_vert, + "conformance/glsl/misc/shader-with-version-100.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_version_120_vert, + "conformance/glsl/misc/shader-with-version-120.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_version_130_vert, + "conformance/glsl/misc/shader-with-version-130.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_webgl_identifier_vert, + "conformance/glsl/misc/shader-with-webgl-identifier.vert.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_while_loop, + "conformance/glsl/misc/shader-with-while-loop.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_without_precision_frag, + "conformance/glsl/misc/shader-without-precision.frag.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shared, + "conformance/glsl/misc/shared.html"); +CONFORMANCE_TEST(conformance_glsl_misc_struct_nesting_exceeds_maximum, + "conformance/glsl/misc/struct-nesting-exceeds-maximum.html"); +CONFORMANCE_TEST(conformance_glsl_misc_struct_nesting_under_maximum, + "conformance/glsl/misc/struct-nesting-under-maximum.html"); +CONFORMANCE_TEST(conformance_glsl_misc_uniform_location_length_limits, + "conformance/glsl/misc/uniform-location-length-limits.html"); +CONFORMANCE_TEST(conformance_glsl_misc_shader_with_short_circuiting_operators, + "conformance/glsl/misc/shader-with-short-circuiting-operators.html"); +CONFORMANCE_TEST(conformance_glsl_reserved__webgl_field_vert, + "conformance/glsl/reserved/_webgl_field.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved__webgl_function_vert, + "conformance/glsl/reserved/_webgl_function.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved__webgl_struct_vert, + "conformance/glsl/reserved/_webgl_struct.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved__webgl_variable_vert, + "conformance/glsl/reserved/_webgl_variable.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved_webgl_field_vert, + "conformance/glsl/reserved/webgl_field.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved_webgl_function_vert, + "conformance/glsl/reserved/webgl_function.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved_webgl_struct_vert, + "conformance/glsl/reserved/webgl_struct.vert.html"); +CONFORMANCE_TEST(conformance_glsl_reserved_webgl_variable_vert, + "conformance/glsl/reserved/webgl_variable.vert.html"); +CONFORMANCE_TEST(conformance_glsl_samplers_glsl_function_texture2d_bias, + "conformance/glsl/samplers/glsl-function-texture2d-bias.html"); +CONFORMANCE_TEST(conformance_glsl_samplers_glsl_function_texture2dlod, + "conformance/glsl/samplers/glsl-function-texture2dlod.html"); +CONFORMANCE_TEST(conformance_glsl_samplers_glsl_function_texture2dproj, + "conformance/glsl/samplers/glsl-function-texture2dproj.html"); +CONFORMANCE_TEST(conformance_glsl_variables_gl_fragcoord, + "conformance/glsl/variables/gl-fragcoord.html"); +CONFORMANCE_TEST(conformance_glsl_variables_gl_frontfacing, + "conformance/glsl/variables/gl-frontfacing.html"); +CONFORMANCE_TEST(conformance_glsl_variables_gl_pointcoord, + "conformance/glsl/variables/gl-pointcoord.html"); +CONFORMANCE_TEST(conformance_ogles_GL_abs_abs_001_to_006, + "conformance/ogles/GL/abs/abs_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_acos_acos_001_to_006, + "conformance/ogles/GL/acos/acos_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_all_all_001_to_004, + "conformance/ogles/GL/all/all_001_to_004.html"); +CONFORMANCE_TEST(conformance_ogles_GL_any_any_001_to_004, + "conformance/ogles/GL/any/any_001_to_004.html"); +CONFORMANCE_TEST(conformance_ogles_GL_array_array_001_to_006, + "conformance/ogles/GL/array/array_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_asin_asin_001_to_006, + "conformance/ogles/GL/asin/asin_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_atan_atan_001_to_008, + "conformance/ogles/GL/atan/atan_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_atan_atan_009_to_012, + "conformance/ogles/GL/atan/atan_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_biConstants_biConstants_001_to_008, + "conformance/ogles/GL/biConstants/biConstants_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_biConstants_biConstants_009_to_016, + "conformance/ogles/GL/biConstants/biConstants_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_biuDepthRange_biuDepthRange_001_to_002, + "conformance/ogles/GL/biuDepthRange/biuDepthRange_001_to_002.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_001_to_008, + "conformance/ogles/GL/build/build_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_009_to_016, + "conformance/ogles/GL/build/build_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_017_to_024, + "conformance/ogles/GL/build/build_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_025_to_032, + "conformance/ogles/GL/build/build_025_to_032.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_033_to_040, + "conformance/ogles/GL/build/build_033_to_040.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_041_to_048, + "conformance/ogles/GL/build/build_041_to_048.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_049_to_056, + "conformance/ogles/GL/build/build_049_to_056.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_057_to_064, + "conformance/ogles/GL/build/build_057_to_064.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_065_to_072, + "conformance/ogles/GL/build/build_065_to_072.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_073_to_080, + "conformance/ogles/GL/build/build_073_to_080.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_081_to_088, + "conformance/ogles/GL/build/build_081_to_088.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_089_to_096, + "conformance/ogles/GL/build/build_089_to_096.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_097_to_104, + "conformance/ogles/GL/build/build_097_to_104.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_105_to_112, + "conformance/ogles/GL/build/build_105_to_112.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_113_to_120, + "conformance/ogles/GL/build/build_113_to_120.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_121_to_128, + "conformance/ogles/GL/build/build_121_to_128.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_129_to_136, + "conformance/ogles/GL/build/build_129_to_136.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_137_to_144, + "conformance/ogles/GL/build/build_137_to_144.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_145_to_152, + "conformance/ogles/GL/build/build_145_to_152.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_153_to_160, + "conformance/ogles/GL/build/build_153_to_160.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_161_to_168, + "conformance/ogles/GL/build/build_161_to_168.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_169_to_176, + "conformance/ogles/GL/build/build_169_to_176.html"); +CONFORMANCE_TEST(conformance_ogles_GL_build_build_177_to_178, + "conformance/ogles/GL/build/build_177_to_178.html"); +CONFORMANCE_TEST(conformance_ogles_GL_built_in_varying_array_out_of_bounds_built_in_varying_array_out_of_bounds_001_to_001, + "conformance/ogles/GL/built_in_varying_array_out_of_bounds/built_in_varying_array_out_of_bounds_001_to_001.html"); +CONFORMANCE_TEST(conformance_ogles_GL_ceil_ceil_001_to_006, + "conformance/ogles/GL/ceil/ceil_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_clamp_clamp_001_to_006, + "conformance/ogles/GL/clamp/clamp_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_control_flow_control_flow_001_to_008, + "conformance/ogles/GL/control_flow/control_flow_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_control_flow_control_flow_009_to_010, + "conformance/ogles/GL/control_flow/control_flow_009_to_010.html"); +CONFORMANCE_TEST(conformance_ogles_GL_cos_cos_001_to_006, + "conformance/ogles/GL/cos/cos_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_cross_cross_001_to_002, + "conformance/ogles/GL/cross/cross_001_to_002.html"); +CONFORMANCE_TEST(conformance_ogles_GL_default_default_001_to_001, + "conformance/ogles/GL/default/default_001_to_001.html"); +CONFORMANCE_TEST(conformance_ogles_GL_degrees_degrees_001_to_006, + "conformance/ogles/GL/degrees/degrees_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_discard_discard_001_to_002, + "conformance/ogles/GL/discard/discard_001_to_002.html"); +CONFORMANCE_TEST(conformance_ogles_GL_distance_distance_001_to_006, + "conformance/ogles/GL/distance/distance_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_dot_dot_001_to_006, + "conformance/ogles/GL/dot/dot_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_equal_equal_001_to_008, + "conformance/ogles/GL/equal/equal_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_equal_equal_009_to_012, + "conformance/ogles/GL/equal/equal_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_exp_exp_001_to_008, + "conformance/ogles/GL/exp/exp_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_exp_exp_009_to_012, + "conformance/ogles/GL/exp/exp_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_exp2_exp2_001_to_008, + "conformance/ogles/GL/exp2/exp2_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_exp2_exp2_009_to_012, + "conformance/ogles/GL/exp2/exp2_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_faceforward_faceforward_001_to_006, + "conformance/ogles/GL/faceforward/faceforward_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_floor_floor_001_to_006, + "conformance/ogles/GL/floor/floor_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_fract_fract_001_to_006, + "conformance/ogles/GL/fract/fract_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_001_to_008, + "conformance/ogles/GL/functions/functions_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_009_to_016, + "conformance/ogles/GL/functions/functions_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_017_to_024, + "conformance/ogles/GL/functions/functions_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_025_to_032, + "conformance/ogles/GL/functions/functions_025_to_032.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_033_to_040, + "conformance/ogles/GL/functions/functions_033_to_040.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_041_to_048, + "conformance/ogles/GL/functions/functions_041_to_048.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_049_to_056, + "conformance/ogles/GL/functions/functions_049_to_056.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_057_to_064, + "conformance/ogles/GL/functions/functions_057_to_064.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_065_to_072, + "conformance/ogles/GL/functions/functions_065_to_072.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_073_to_080, + "conformance/ogles/GL/functions/functions_073_to_080.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_081_to_088, + "conformance/ogles/GL/functions/functions_081_to_088.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_089_to_096, + "conformance/ogles/GL/functions/functions_089_to_096.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_097_to_104, + "conformance/ogles/GL/functions/functions_097_to_104.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_105_to_112, + "conformance/ogles/GL/functions/functions_105_to_112.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_113_to_120, + "conformance/ogles/GL/functions/functions_113_to_120.html"); +CONFORMANCE_TEST(conformance_ogles_GL_functions_functions_121_to_126, + "conformance/ogles/GL/functions/functions_121_to_126.html"); +CONFORMANCE_TEST(conformance_ogles_GL_gl_FragCoord_gl_FragCoord_001_to_003, + "conformance/ogles/GL/gl_FragCoord/gl_FragCoord_001_to_003.html"); +CONFORMANCE_TEST(conformance_ogles_GL_gl_FrontFacing_gl_FrontFacing_001_to_001, + "conformance/ogles/GL/gl_FrontFacing/gl_FrontFacing_001_to_001.html"); +CONFORMANCE_TEST(conformance_ogles_GL_greaterThan_greaterThan_001_to_008, + "conformance/ogles/GL/greaterThan/greaterThan_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_greaterThanEqual_greaterThanEqual_001_to_008, + "conformance/ogles/GL/greaterThanEqual/greaterThanEqual_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_inversesqrt_inversesqrt_001_to_006, + "conformance/ogles/GL/inversesqrt/inversesqrt_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_length_length_001_to_006, + "conformance/ogles/GL/length/length_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_lessThan_lessThan_001_to_008, + "conformance/ogles/GL/lessThan/lessThan_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_lessThanEqual_lessThanEqual_001_to_008, + "conformance/ogles/GL/lessThanEqual/lessThanEqual_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_log_log_001_to_008, + "conformance/ogles/GL/log/log_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_log_log_009_to_012, + "conformance/ogles/GL/log/log_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_log2_log2_001_to_008, + "conformance/ogles/GL/log2/log2_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_log2_log2_009_to_012, + "conformance/ogles/GL/log2/log2_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat_mat_001_to_008, + "conformance/ogles/GL/mat/mat_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat_mat_009_to_016, + "conformance/ogles/GL/mat/mat_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat_mat_017_to_024, + "conformance/ogles/GL/mat/mat_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat_mat_025_to_032, + "conformance/ogles/GL/mat/mat_025_to_032.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat_mat_033_to_040, + "conformance/ogles/GL/mat/mat_033_to_040.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat_mat_041_to_046, + "conformance/ogles/GL/mat/mat_041_to_046.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mat3_mat3_001_to_006, + "conformance/ogles/GL/mat3/mat3_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_matrixCompMult_matrixCompMult_001_to_004, + "conformance/ogles/GL/matrixCompMult/matrixCompMult_001_to_004.html"); +CONFORMANCE_TEST(conformance_ogles_GL_max_max_001_to_006, + "conformance/ogles/GL/max/max_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_min_min_001_to_006, + "conformance/ogles/GL/min/min_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mix_mix_001_to_006, + "conformance/ogles/GL/mix/mix_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_mod_mod_001_to_008, + "conformance/ogles/GL/mod/mod_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_normalize_normalize_001_to_006, + "conformance/ogles/GL/normalize/normalize_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_not_not_001_to_004, + "conformance/ogles/GL/not/not_001_to_004.html"); +CONFORMANCE_TEST(conformance_ogles_GL_notEqual_notEqual_001_to_008, + "conformance/ogles/GL/notEqual/notEqual_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_notEqual_notEqual_009_to_012, + "conformance/ogles/GL/notEqual/notEqual_009_to_012.html"); +CONFORMANCE_TEST(conformance_ogles_GL_operators_operators_001_to_008, + "conformance/ogles/GL/operators/operators_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_operators_operators_009_to_016, + "conformance/ogles/GL/operators/operators_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_operators_operators_017_to_024, + "conformance/ogles/GL/operators/operators_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_operators_operators_025_to_026, + "conformance/ogles/GL/operators/operators_025_to_026.html"); +CONFORMANCE_TEST(conformance_ogles_GL_pow_pow_001_to_008, + "conformance/ogles/GL/pow/pow_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_pow_pow_009_to_016, + "conformance/ogles/GL/pow/pow_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_pow_pow_017_to_024, + "conformance/ogles/GL/pow/pow_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_radians_radians_001_to_006, + "conformance/ogles/GL/radians/radians_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_reflect_reflect_001_to_006, + "conformance/ogles/GL/reflect/reflect_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_refract_refract_001_to_006, + "conformance/ogles/GL/refract/refract_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_sign_sign_001_to_006, + "conformance/ogles/GL/sign/sign_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_sin_sin_001_to_006, + "conformance/ogles/GL/sin/sin_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_smoothstep_smoothstep_001_to_006, + "conformance/ogles/GL/smoothstep/smoothstep_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_sqrt_sqrt_001_to_006, + "conformance/ogles/GL/sqrt/sqrt_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_step_step_001_to_006, + "conformance/ogles/GL/step/step_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_001_to_008, + "conformance/ogles/GL/struct/struct_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_009_to_016, + "conformance/ogles/GL/struct/struct_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_017_to_024, + "conformance/ogles/GL/struct/struct_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_025_to_032, + "conformance/ogles/GL/struct/struct_025_to_032.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_033_to_040, + "conformance/ogles/GL/struct/struct_033_to_040.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_041_to_048, + "conformance/ogles/GL/struct/struct_041_to_048.html"); +CONFORMANCE_TEST(conformance_ogles_GL_struct_struct_049_to_056, + "conformance/ogles/GL/struct/struct_049_to_056.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_001_to_008, + "conformance/ogles/GL/swizzlers/swizzlers_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_009_to_016, + "conformance/ogles/GL/swizzlers/swizzlers_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_017_to_024, + "conformance/ogles/GL/swizzlers/swizzlers_017_to_024.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_025_to_032, + "conformance/ogles/GL/swizzlers/swizzlers_025_to_032.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_033_to_040, + "conformance/ogles/GL/swizzlers/swizzlers_033_to_040.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_041_to_048, + "conformance/ogles/GL/swizzlers/swizzlers_041_to_048.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_049_to_056, + "conformance/ogles/GL/swizzlers/swizzlers_049_to_056.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_057_to_064, + "conformance/ogles/GL/swizzlers/swizzlers_057_to_064.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_065_to_072, + "conformance/ogles/GL/swizzlers/swizzlers_065_to_072.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_073_to_080, + "conformance/ogles/GL/swizzlers/swizzlers_073_to_080.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_081_to_088, + "conformance/ogles/GL/swizzlers/swizzlers_081_to_088.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_089_to_096, + "conformance/ogles/GL/swizzlers/swizzlers_089_to_096.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_097_to_104, + "conformance/ogles/GL/swizzlers/swizzlers_097_to_104.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_105_to_112, + "conformance/ogles/GL/swizzlers/swizzlers_105_to_112.html"); +CONFORMANCE_TEST(conformance_ogles_GL_swizzlers_swizzlers_113_to_120, + "conformance/ogles/GL/swizzlers/swizzlers_113_to_120.html"); +CONFORMANCE_TEST(conformance_ogles_GL_tan_tan_001_to_006, + "conformance/ogles/GL/tan/tan_001_to_006.html"); +CONFORMANCE_TEST(conformance_ogles_GL_vec_vec_001_to_008, + "conformance/ogles/GL/vec/vec_001_to_008.html"); +CONFORMANCE_TEST(conformance_ogles_GL_vec_vec_009_to_016, + "conformance/ogles/GL/vec/vec_009_to_016.html"); +CONFORMANCE_TEST(conformance_ogles_GL_vec_vec_017_to_018, + "conformance/ogles/GL/vec/vec_017_to_018.html"); +CONFORMANCE_TEST(conformance_ogles_GL_vec3_vec3_001_to_008, + "conformance/ogles/GL/vec3/vec3_001_to_008.html"); + +#endif // CONTENT_TEST_GPU_WEBGL_CONFORMANCE_TEST_LIST_AUTOGEN_H_ + |