diff options
author | Ken Rockot <rockot@google.com> | 2020-08-17 20:09:19 +0000 |
---|---|---|
committer | Michael BrĂ¼ning <michael.bruning@qt.io> | 2020-09-22 11:55:37 +0000 |
commit | 3ddb1e9639a5f7c9ba0eaf5d1407fc2bcba627d3 (patch) | |
tree | 380192646c41ee76316f5ce0d291e760c9061ff8 | |
parent | 0785cd837827ff88f13fca33ec03bbf330951c00 (diff) | |
download | qtwebengine-chromium-3ddb1e9639a5f7c9ba0eaf5d1407fc2bcba627d3.tar.gz |
[Backport] CVE-2020-6575: Race in Mojo
Manual backport of patch originally reviewed on
https://chromium-review.googlesource.com/c/chromium/src/+/2357451:
[mojo] Fix data race on NodeChannel destruction
See bug for details. It's possible for the IO thread to unsafely access
a NodeChannel after it's been destroyed on the UI thread.
This fixes the issue by ensuring the NodeChannel is always destroyed on
the IO thread.
Fixed: 1081874
Change-Id: I1fc4eaa0cef20f036c85fee3ed7976f518fbf60e
Reviewed-by: Michal Klocek <michal.klocek@qt.io>
-rw-r--r-- | chromium/mojo/core/BUILD.gn | 2 | ||||
-rw-r--r-- | chromium/mojo/core/embedder/embedder.cc | 2 | ||||
-rw-r--r-- | chromium/mojo/core/embedder/embedder.h | 7 | ||||
-rw-r--r-- | chromium/mojo/core/node_channel.cc | 19 | ||||
-rw-r--r-- | chromium/mojo/core/node_channel.h | 18 |
5 files changed, 22 insertions, 26 deletions
diff --git a/chromium/mojo/core/BUILD.gn b/chromium/mojo/core/BUILD.gn index e6fcb96256f..4746c6d2ac2 100644 --- a/chromium/mojo/core/BUILD.gn +++ b/chromium/mojo/core/BUILD.gn @@ -57,6 +57,7 @@ template("core_impl_source_set") { "handle_table.h", "invitation_dispatcher.h", "message_pipe_dispatcher.h", + "node_channel.h", "node_controller.h", "options_validation.h", "platform_handle_dispatcher.h", @@ -86,7 +87,6 @@ template("core_impl_source_set") { "invitation_dispatcher.cc", "message_pipe_dispatcher.cc", "node_channel.cc", - "node_channel.h", "node_controller.cc", "platform_handle_dispatcher.cc", "platform_handle_in_transit.cc", diff --git a/chromium/mojo/core/embedder/embedder.cc b/chromium/mojo/core/embedder/embedder.cc index ec68e37d098..f70c73be4df 100644 --- a/chromium/mojo/core/embedder/embedder.cc +++ b/chromium/mojo/core/embedder/embedder.cc @@ -35,7 +35,7 @@ void SetDefaultProcessErrorCallback(ProcessErrorCallback callback) { Core::Get()->SetDefaultProcessErrorCallback(std::move(callback)); } -scoped_refptr<base::TaskRunner> GetIOTaskRunner() { +scoped_refptr<base::SingleThreadTaskRunner> GetIOTaskRunner() { return Core::Get()->GetNodeController()->io_task_runner(); } diff --git a/chromium/mojo/core/embedder/embedder.h b/chromium/mojo/core/embedder/embedder.h index 5d654009877..96dc44c0a78 100644 --- a/chromium/mojo/core/embedder/embedder.h +++ b/chromium/mojo/core/embedder/embedder.h @@ -13,7 +13,7 @@ #include "base/component_export.h" #include "base/memory/ref_counted.h" #include "base/process/process_handle.h" -#include "base/task_runner.h" +#include "base/single_thread_task_runner.h" #include "build/build_config.h" #include "mojo/core/embedder/configuration.h" @@ -42,9 +42,10 @@ void SetDefaultProcessErrorCallback(ProcessErrorCallback callback); // Initialialization/shutdown for interprocess communication (IPC) ------------- -// Retrieves the TaskRunner used for IPC I/O, as set by ScopedIPCSupport. +// Retrieves the SequencedTaskRunner used for IPC I/O, as set by +// ScopedIPCSupport. COMPONENT_EXPORT(MOJO_CORE_EMBEDDER) -scoped_refptr<base::TaskRunner> GetIOTaskRunner(); +scoped_refptr<base::SingleThreadTaskRunner> GetIOTaskRunner(); } // namespace core } // namespace mojo diff --git a/chromium/mojo/core/node_channel.cc b/chromium/mojo/core/node_channel.cc index e898b044286..061ea1026e9 100644 --- a/chromium/mojo/core/node_channel.cc +++ b/chromium/mojo/core/node_channel.cc @@ -228,7 +228,7 @@ void NodeChannel::NotifyBadMessage(const std::string& error) { } void NodeChannel::SetRemoteProcessHandle(ScopedProcessHandle process_handle) { - DCHECK(io_task_runner_->RunsTasksInCurrentSequence()); + DCHECK(owning_task_runner()->RunsTasksInCurrentSequence()); { base::AutoLock lock(channel_lock_); if (channel_) @@ -253,7 +253,7 @@ ScopedProcessHandle NodeChannel::CloneRemoteProcessHandle() { } void NodeChannel::SetRemoteNodeName(const ports::NodeName& name) { - DCHECK(io_task_runner_->RunsTasksInCurrentSequence()); + DCHECK(owning_task_runner()->RunsTasksInCurrentSequence()); remote_node_name_ = name; } @@ -468,15 +468,15 @@ NodeChannel::NodeChannel( Channel::HandlePolicy channel_handle_policy, scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, const ProcessErrorCallback& process_error_callback) - : delegate_(delegate), - io_task_runner_(io_task_runner), + : base::RefCountedDeleteOnSequence<NodeChannel>(io_task_runner), + delegate_(delegate), process_error_callback_(process_error_callback) #if !defined(OS_NACL_SFI) , channel_(Channel::Create(this, std::move(connection_params), channel_handle_policy, - io_task_runner_)) + std::move(io_task_runner))) #endif { } @@ -499,15 +499,10 @@ void NodeChannel::CreateAndBindLocalBrokerHost( void NodeChannel::OnChannelMessage(const void* payload, size_t payload_size, std::vector<PlatformHandle> handles) { - DCHECK(io_task_runner_->RunsTasksInCurrentSequence()); + DCHECK(owning_task_runner()->RunsTasksInCurrentSequence()); RequestContext request_context(RequestContext::Source::SYSTEM); - // Ensure this NodeChannel stays alive through the extent of this method. The - // delegate may have the only other reference to this object and it may choose - // to drop it here in response to, e.g., a malformed message. - scoped_refptr<NodeChannel> keepalive = this; - if (payload_size <= sizeof(Header)) { delegate_->OnChannelError(remote_node_name_, this); return; @@ -739,7 +734,7 @@ void NodeChannel::OnChannelMessage(const void* payload, } void NodeChannel::OnChannelError(Channel::Error error) { - DCHECK(io_task_runner_->RunsTasksInCurrentSequence()); + DCHECK(owning_task_runner()->RunsTasksInCurrentSequence()); RequestContext request_context(RequestContext::Source::SYSTEM); diff --git a/chromium/mojo/core/node_channel.h b/chromium/mojo/core/node_channel.h index ea91f927049..04501da0fb6 100644 --- a/chromium/mojo/core/node_channel.h +++ b/chromium/mojo/core/node_channel.h @@ -11,7 +11,7 @@ #include "base/callback.h" #include "base/containers/queue.h" #include "base/macros.h" -#include "base/memory/ref_counted.h" +#include "base/memory/ref_counted_delete_on_sequence.h" #include "base/process/process_handle.h" #include "base/single_thread_task_runner.h" #include "base/synchronization/lock.h" @@ -21,13 +21,15 @@ #include "mojo/core/embedder/process_error_callback.h" #include "mojo/core/ports/name.h" #include "mojo/core/scoped_process_handle.h" +#include "mojo/core/system_impl_export.h" namespace mojo { namespace core { // Wraps a Channel to send and receive Node control messages. -class NodeChannel : public base::RefCountedThreadSafe<NodeChannel>, - public Channel::Delegate { +class MOJO_SYSTEM_IMPL_EXPORT NodeChannel + : public base::RefCountedDeleteOnSequence<NodeChannel>, + public Channel::Delegate { public: class Delegate { public: @@ -92,8 +94,6 @@ class NodeChannel : public base::RefCountedThreadSafe<NodeChannel>, void** data, size_t* num_data_bytes); - Channel* channel() const { return channel_.get(); } - // Start receiving messages. void Start(); @@ -155,7 +155,8 @@ class NodeChannel : public base::RefCountedThreadSafe<NodeChannel>, #endif private: - friend class base::RefCountedThreadSafe<NodeChannel>; + friend class base::RefCountedDeleteOnSequence<NodeChannel>; + friend class base::DeleteHelper<NodeChannel>; using PendingMessageQueue = base::queue<Channel::MessagePtr>; using PendingRelayMessageQueue = @@ -181,13 +182,12 @@ class NodeChannel : public base::RefCountedThreadSafe<NodeChannel>, void WriteChannelMessage(Channel::MessagePtr message); Delegate* const delegate_; - const scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_; const ProcessErrorCallback process_error_callback_; base::Lock channel_lock_; - scoped_refptr<Channel> channel_; + scoped_refptr<Channel> channel_ GUARDED_BY(channel_lock_); - // Must only be accessed from |io_task_runner_|'s thread. + // Must only be accessed from the owning task runner's thread. ports::NodeName remote_node_name_; base::Lock remote_process_handle_lock_; |