// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/wasm/module-compiler.h" #include #include #include "src/api/api.h" #include "src/asmjs/asm-js.h" #include "src/base/enum-set.h" #include "src/base/optional.h" #include "src/base/platform/mutex.h" #include "src/base/platform/semaphore.h" #include "src/base/platform/time.h" #include "src/base/utils/random-number-generator.h" #include "src/compiler/wasm-compiler.h" #include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope. #include "src/logging/counters.h" #include "src/logging/metrics.h" #include "src/objects/property-descriptor.h" #include "src/tasks/task-utils.h" #include "src/tracing/trace-event.h" #include "src/trap-handler/trap-handler.h" #include "src/utils/identity-map.h" #include "src/wasm/module-decoder.h" #include "src/wasm/streaming-decoder.h" #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-import-wrapper-cache.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-limits.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-opcodes.h" #include "src/wasm/wasm-result.h" #include "src/wasm/wasm-serialization.h" #define TRACE_COMPILE(...) \ do { \ if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_STREAMING(...) \ do { \ if (FLAG_trace_wasm_streaming) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_LAZY(...) \ do { \ if (FLAG_trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \ } while (false) namespace v8 { namespace internal { namespace wasm { namespace { enum class CompileMode : uint8_t { kRegular, kTiering }; enum class CompileStrategy : uint8_t { // Compiles functions on first use. In this case, execution will block until // the function's baseline is reached and top tier compilation starts in // background (if applicable). // Lazy compilation can help to reduce startup time and code size at the risk // of blocking execution. kLazy, // Compiles baseline ahead of execution and starts top tier compilation in // background (if applicable). kEager, // Triggers baseline compilation on first use (just like {kLazy}) with the // difference that top tier compilation is started eagerly. // This strategy can help to reduce startup time at the risk of blocking // execution, but only in its early phase (until top tier compilation // finishes). kLazyBaselineEagerTopTier, // Marker for default strategy. kDefault = kEager, }; // Background compile jobs hold a shared pointer to this token. The token is // used to notify them that they should stop. As soon as they see this (after // finishing their current compilation unit), they will stop. // This allows to already remove the NativeModule without having to synchronize // on background compile jobs. class BackgroundCompileToken { public: explicit BackgroundCompileToken( const std::shared_ptr& native_module) : native_module_(native_module) {} void Cancel() { base::SharedMutexGuard mutex_guard( &compilation_scope_mutex_); native_module_.reset(); } private: friend class BackgroundCompileScope; std::shared_ptr StartScope() { compilation_scope_mutex_.LockShared(); return native_module_.lock(); } // This private method can only be called via {BackgroundCompileScope}. void SchedulePublishCode(NativeModule* native_module, std::vector> codes) { { base::MutexGuard guard(&publish_mutex_); if (publisher_running_) { // Add new code to the queue and return. publish_queue_.reserve(publish_queue_.size() + codes.size()); for (auto& c : codes) publish_queue_.emplace_back(std::move(c)); return; } publisher_running_ = true; } while (true) { PublishCode(native_module, VectorOf(codes)); codes.clear(); // Keep publishing new code that came in. base::MutexGuard guard(&publish_mutex_); DCHECK(publisher_running_); if (publish_queue_.empty()) { publisher_running_ = false; return; } codes.swap(publish_queue_); } } void PublishCode(NativeModule*, Vector>); void ExitScope() { compilation_scope_mutex_.UnlockShared(); } // {compilation_scope_mutex_} protects {native_module_}. base::SharedMutex compilation_scope_mutex_; std::weak_ptr native_module_; // {publish_mutex_} protects {publish_queue_} and {publisher_running_}. base::Mutex publish_mutex_; std::vector> publish_queue_; bool publisher_running_ = false; }; class CompilationStateImpl; // Keep these scopes short, as they hold the mutex of the token, which // sequentializes all these scopes. The mutex is also acquired from foreground // tasks, which should not be blocked for a long time. class BackgroundCompileScope { public: explicit BackgroundCompileScope( const std::shared_ptr& token) : token_(token.get()), native_module_(token->StartScope()) {} ~BackgroundCompileScope() { token_->ExitScope(); } bool cancelled() const { return native_module_ == nullptr; } NativeModule* native_module() { DCHECK(!cancelled()); return native_module_.get(); } inline CompilationStateImpl* compilation_state(); // Call {SchedulePublishCode} via the {BackgroundCompileScope} to guarantee // that the {NativeModule} stays alive. void SchedulePublishCode(std::vector> codes) { token_->SchedulePublishCode(native_module_.get(), std::move(codes)); } private: BackgroundCompileToken* const token_; // Keep the native module alive while in this scope. std::shared_ptr const native_module_; }; enum CompileBaselineOnly : bool { kBaselineOnly = true, kBaselineOrTopTier = false }; // A set of work-stealing queues (vectors of units). Each background compile // task owns one of the queues and steals from all others once its own queue // runs empty. class CompilationUnitQueues { public: explicit CompilationUnitQueues(int max_tasks) : queues_(max_tasks) { DCHECK_LT(0, max_tasks); for (int task_id = 0; task_id < max_tasks; ++task_id) { queues_[task_id].next_steal_task_id = next_task_id(task_id); } for (auto& atomic_counter : num_units_) { std::atomic_init(&atomic_counter, size_t{0}); } } base::Optional GetNextUnit( int task_id, CompileBaselineOnly baseline_only) { DCHECK_LE(0, task_id); DCHECK_GT(queues_.size(), task_id); // As long as any lower-tier units are outstanding we need to steal them // before executing own higher-tier units. int max_tier = baseline_only ? kBaseline : kTopTier; for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) { if (auto unit = GetNextUnitOfTier(task_id, tier)) { size_t old_units_count = num_units_[tier].fetch_sub(1, std::memory_order_relaxed); DCHECK_LE(1, old_units_count); USE(old_units_count); return unit; } } return {}; } void AddUnits(Vector baseline_units, Vector top_tier_units, const WasmModule* module) { DCHECK_LT(0, baseline_units.size() + top_tier_units.size()); // Add to the individual queues in a round-robin fashion. No special care is // taken to balance them; they will be balanced by work stealing. int queue_to_add = next_queue_to_add.load(std::memory_order_relaxed); while (!next_queue_to_add.compare_exchange_weak( queue_to_add, next_task_id(queue_to_add), std::memory_order_relaxed)) { // Retry with updated {queue_to_add}. } Queue* queue = &queues_[queue_to_add]; base::MutexGuard guard(&queue->mutex); base::Optional big_units_guard; for (auto pair : {std::make_pair(int{kBaseline}, baseline_units), std::make_pair(int{kTopTier}, top_tier_units)}) { int tier = pair.first; Vector units = pair.second; if (units.empty()) continue; num_units_[tier].fetch_add(units.size(), std::memory_order_relaxed); for (WasmCompilationUnit unit : units) { size_t func_size = module->functions[unit.func_index()].code.length(); if (func_size <= kBigUnitsLimit) { queue->units[tier].push_back(unit); } else { if (!big_units_guard) { big_units_guard.emplace(&big_units_queue_.mutex); } big_units_queue_.has_units[tier].store(true, std::memory_order_relaxed); big_units_queue_.units[tier].emplace(func_size, unit); } } } } // Get the current total number of units in all queues. This is only a // momentary snapshot, it's not guaranteed that {GetNextUnit} returns a unit // if this method returns non-zero. size_t GetTotalSize() const { size_t total = 0; for (auto& atomic_counter : num_units_) { total += atomic_counter.load(std::memory_order_relaxed); } return total; } private: // Store tier in int so we can easily loop over it: static constexpr int kBaseline = 0; static constexpr int kTopTier = 1; static constexpr int kNumTiers = kTopTier + 1; // Functions bigger than {kBigUnitsLimit} will be compiled first, in ascending // order of their function body size. static constexpr size_t kBigUnitsLimit = 4096; struct Queue { base::Mutex mutex; // Protected by {mutex}: std::vector units[kNumTiers]; int next_steal_task_id; // End of fields protected by {mutex}. }; struct BigUnit { BigUnit(size_t func_size, WasmCompilationUnit unit) : func_size{func_size}, unit(unit) {} size_t func_size; WasmCompilationUnit unit; bool operator<(const BigUnit& other) const { return func_size < other.func_size; } }; struct BigUnitsQueue { BigUnitsQueue() { for (auto& atomic : has_units) std::atomic_init(&atomic, false); } base::Mutex mutex; // Can be read concurrently to check whether any elements are in the queue. std::atomic has_units[kNumTiers]; // Protected by {mutex}: std::priority_queue units[kNumTiers]; }; std::vector queues_; BigUnitsQueue big_units_queue_; std::atomic num_units_[kNumTiers]; std::atomic next_queue_to_add{0}; int next_task_id(int task_id) const { int next = task_id + 1; return next == static_cast(queues_.size()) ? 0 : next; } int GetLowestTierWithUnits() const { for (int tier = 0; tier < kNumTiers; ++tier) { if (num_units_[tier].load(std::memory_order_relaxed) > 0) return tier; } return kNumTiers; } base::Optional GetNextUnitOfTier(int task_id, int tier) { Queue* queue = &queues_[task_id]; // First check whether there is a big unit of that tier. Execute that first. if (auto unit = GetBigUnitOfTier(tier)) return unit; // Then check whether our own queue has a unit of the wanted tier. If // so, return it, otherwise get the task id to steal from. int steal_task_id; { base::MutexGuard mutex_guard(&queue->mutex); if (!queue->units[tier].empty()) { auto unit = queue->units[tier].back(); queue->units[tier].pop_back(); return unit; } steal_task_id = queue->next_steal_task_id; } // Try to steal from all other queues. If this succeeds, return one of the // stolen units. size_t steal_trials = queues_.size(); for (; steal_trials > 0; --steal_trials, steal_task_id = next_task_id(steal_task_id)) { if (steal_task_id == task_id) continue; if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) { return unit; } } // If we reach here, we didn't find any unit of the requested tier. return {}; } base::Optional GetBigUnitOfTier(int tier) { // Fast-path without locking. if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) { return {}; } base::MutexGuard guard(&big_units_queue_.mutex); if (big_units_queue_.units[tier].empty()) return {}; WasmCompilationUnit unit = big_units_queue_.units[tier].top().unit; big_units_queue_.units[tier].pop(); if (big_units_queue_.units[tier].empty()) { big_units_queue_.has_units[tier].store(false, std::memory_order_relaxed); } return unit; } // Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return // first stolen unit (rest put in queue of {task_id}), or {nullopt} if // {steal_from_task_id} had no units of {wanted_tier}. base::Optional StealUnitsAndGetFirst( int task_id, int steal_from_task_id, int wanted_tier) { DCHECK_NE(task_id, steal_from_task_id); std::vector stolen; base::Optional returned_unit; { Queue* steal_queue = &queues_[steal_from_task_id]; base::MutexGuard guard(&steal_queue->mutex); auto* steal_from_vector = &steal_queue->units[wanted_tier]; if (steal_from_vector->empty()) return {}; size_t remaining = steal_from_vector->size() / 2; auto steal_begin = steal_from_vector->begin() + remaining; returned_unit = *steal_begin; stolen.assign(steal_begin + 1, steal_from_vector->end()); steal_from_vector->erase(steal_begin, steal_from_vector->end()); } Queue* queue = &queues_[task_id]; base::MutexGuard guard(&queue->mutex); auto* target_queue = &queue->units[wanted_tier]; target_queue->insert(target_queue->end(), stolen.begin(), stolen.end()); queue->next_steal_task_id = next_task_id(steal_from_task_id); return returned_unit; } }; // {JobHandle} is not thread safe in general (at least both the // {DefaultJobHandle} and chromium's {base::JobHandle} are not). Hence, protect // concurrent accesses via a mutex. class ThreadSafeJobHandle { public: explicit ThreadSafeJobHandle(std::shared_ptr job_handle) : job_handle_(std::move(job_handle)) {} void NotifyConcurrencyIncrease() { base::MutexGuard guard(&mutex_); job_handle_->NotifyConcurrencyIncrease(); } void Join() { base::MutexGuard guard(&mutex_); job_handle_->Join(); } void Cancel() { base::MutexGuard guard(&mutex_); job_handle_->Cancel(); } bool IsRunning() const { base::MutexGuard guard(&mutex_); return job_handle_->IsRunning(); } private: mutable base::Mutex mutex_; std::shared_ptr job_handle_; }; // The {CompilationStateImpl} keeps track of the compilation state of the // owning NativeModule, i.e. which functions are left to be compiled. // It contains a task manager to allow parallel and asynchronous background // compilation of functions. // Its public interface {CompilationState} lives in compilation-environment.h. class CompilationStateImpl { public: CompilationStateImpl(const std::shared_ptr& native_module, std::shared_ptr async_counters); // Cancel all background compilation, without waiting for compile tasks to // finish. void CancelCompilation(); // Initialize compilation progress. Set compilation tiers to expect for // baseline and top tier compilation. Must be set before {AddCompilationUnits} // is invoked which triggers background compilation. void InitializeCompilationProgress(bool lazy_module, int num_wrappers); // Initialize the compilation progress after deserialization. This is needed // for recompilation (e.g. for tier down) to work later. void InitializeCompilationProgressAfterDeserialization(); // Initialize recompilation of the whole module: Setup compilation progress // for recompilation and add the respective compilation units. The callback is // called immediately if no recompilation is needed, or called later // otherwise. void InitializeRecompilation( TieringState new_tiering_state, CompilationState::callback_t recompilation_finished_callback); // Add the callback function to be called on compilation events. Needs to be // set before {AddCompilationUnits} is run to ensure that it receives all // events. The callback object must support being deleted from any thread. void AddCallback(CompilationState::callback_t); // Inserts new functions to compile and kicks off compilation. void AddCompilationUnits( Vector baseline_units, Vector top_tier_units, Vector> js_to_wasm_wrapper_units); void AddTopTierCompilationUnit(WasmCompilationUnit); base::Optional GetNextCompilationUnit( int task_id, CompileBaselineOnly baseline_only); std::shared_ptr GetNextJSToWasmWrapperCompilationUnit(); void FinalizeJSToWasmWrappers(Isolate* isolate, const WasmModule* module, Handle* export_wrappers_out); void OnFinishedUnits(Vector); void OnFinishedJSToWasmWrapperUnits(int num); int GetFreeCompileTaskId(); void OnCompilationStopped(int task_id, const WasmFeatures& detected); void PublishDetectedFeatures(Isolate*); // Ensure that a compilation job is running, and increase its concurrency if // needed. void ScheduleCompileJobForNewUnits(int new_units); size_t NumOutstandingCompilations() const; void SetError(); void WaitForBaselineFinished(); bool failed() const { return compile_failed_.load(std::memory_order_relaxed); } bool baseline_compilation_finished() const { base::MutexGuard guard(&callbacks_mutex_); return outstanding_baseline_units_ == 0; } bool top_tier_compilation_finished() const { base::MutexGuard guard(&callbacks_mutex_); return outstanding_top_tier_functions_ == 0; } bool recompilation_finished() const { base::MutexGuard guard(&callbacks_mutex_); return outstanding_recompilation_functions_ == 0; } CompileMode compile_mode() const { return compile_mode_; } Counters* counters() const { return async_counters_.get(); } WasmFeatures* detected_features() { return &detected_features_; } void SetWireBytesStorage( std::shared_ptr wire_bytes_storage) { base::MutexGuard guard(&mutex_); wire_bytes_storage_ = wire_bytes_storage; } std::shared_ptr GetWireBytesStorage() const { base::MutexGuard guard(&mutex_); DCHECK_NOT_NULL(wire_bytes_storage_); return wire_bytes_storage_; } private: // Trigger callbacks according to the internal counters below // (outstanding_...), plus the given events. // Hold the {callbacks_mutex_} when calling this method. void TriggerCallbacks(base::EnumSet additional_events = {}); NativeModule* const native_module_; const std::shared_ptr background_compile_token_; const CompileMode compile_mode_; const std::shared_ptr async_counters_; // Compilation error, atomically updated. This flag can be updated and read // using relaxed semantics. std::atomic compile_failed_{false}; // The atomic counter is shared with the compilation job. It's increased if // more units are added, and decreased when the queue drops to zero. std::shared_ptr> current_compile_concurrency_ = std::make_shared>(0); const int max_compile_concurrency_ = 0; CompilationUnitQueues compilation_unit_queues_; // Index of the next wrapper to compile in {js_to_wasm_wrapper_units_}. std::atomic js_to_wasm_wrapper_id_{0}; // Wrapper compilation units are stored in shared_ptrs so that they are kept // alive by the tasks even if the NativeModule dies. std::vector> js_to_wasm_wrapper_units_; // This mutex protects all information of this {CompilationStateImpl} which is // being accessed concurrently. mutable base::Mutex mutex_; ////////////////////////////////////////////////////////////////////////////// // Protected by {mutex_}: // Set of unused task ids; <= {max_compile_concurrency_} many. std::vector available_task_ids_; std::shared_ptr current_compile_job_; // Features detected to be used in this module. Features can be detected // as a module is being compiled. WasmFeatures detected_features_ = WasmFeatures::None(); // Abstraction over the storage of the wire bytes. Held in a shared_ptr so // that background compilation jobs can keep the storage alive while // compiling. std::shared_ptr wire_bytes_storage_; // End of fields protected by {mutex_}. ////////////////////////////////////////////////////////////////////////////// // This mutex protects the callbacks vector, and the counters used to // determine which callbacks to call. The counters plus the callbacks // themselves need to be synchronized to ensure correct order of events. mutable base::Mutex callbacks_mutex_; ////////////////////////////////////////////////////////////////////////////// // Protected by {callbacks_mutex_}: // Callback functions to be called on compilation events. std::vector callbacks_; // Events that already happened. base::EnumSet finished_events_; int outstanding_baseline_units_ = 0; int outstanding_top_tier_functions_ = 0; std::vector compilation_progress_; int outstanding_recompilation_functions_ = 0; TieringState tiering_state_ = kTieredUp; // End of fields protected by {callbacks_mutex_}. ////////////////////////////////////////////////////////////////////////////// // Encoding of fields in the {compilation_progress_} vector. using RequiredBaselineTierField = base::BitField8; using RequiredTopTierField = base::BitField8; using ReachedTierField = base::BitField8; using MissingRecompilationField = base::BitField8; }; CompilationStateImpl* Impl(CompilationState* compilation_state) { return reinterpret_cast(compilation_state); } const CompilationStateImpl* Impl(const CompilationState* compilation_state) { return reinterpret_cast(compilation_state); } CompilationStateImpl* BackgroundCompileScope::compilation_state() { return Impl(native_module()->compilation_state()); } void BackgroundCompileToken::PublishCode( NativeModule* native_module, Vector> code) { WasmCodeRefScope code_ref_scope; std::vector published_code = native_module->PublishCode(code); // Defer logging code in case wire bytes were not fully received yet. if (native_module->HasWireBytes()) { native_module->engine()->LogCode(VectorOf(published_code)); } Impl(native_module->compilation_state()) ->OnFinishedUnits(VectorOf(published_code)); } void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) { using Feature = v8::Isolate::UseCounterFeature; constexpr static std::pair kUseCounters[] = { {kFeature_reftypes, Feature::kWasmRefTypes}, {kFeature_bulk_memory, Feature::kWasmBulkMemory}, {kFeature_mv, Feature::kWasmMultiValue}, {kFeature_simd, Feature::kWasmSimdOpcodes}, {kFeature_threads, Feature::kWasmThreadOpcodes}}; for (auto& feature : kUseCounters) { if (detected.contains(feature.first)) isolate->CountUsage(feature.second); } } } // namespace ////////////////////////////////////////////////////// // PIMPL implementation of {CompilationState}. CompilationState::~CompilationState() { Impl(this)->~CompilationStateImpl(); } void CompilationState::CancelCompilation() { Impl(this)->CancelCompilation(); } void CompilationState::SetError() { Impl(this)->SetError(); } void CompilationState::SetWireBytesStorage( std::shared_ptr wire_bytes_storage) { Impl(this)->SetWireBytesStorage(std::move(wire_bytes_storage)); } std::shared_ptr CompilationState::GetWireBytesStorage() const { return Impl(this)->GetWireBytesStorage(); } void CompilationState::AddCallback(CompilationState::callback_t callback) { return Impl(this)->AddCallback(std::move(callback)); } void CompilationState::WaitForBaselineFinished() { Impl(this)->WaitForBaselineFinished(); } void CompilationState::WaitForTopTierFinished() { // TODO(clemensb): Contribute to compilation while waiting. auto top_tier_finished_semaphore = std::make_shared(0); AddCallback([top_tier_finished_semaphore](CompilationEvent event) { if (event == CompilationEvent::kFailedCompilation || event == CompilationEvent::kFinishedTopTierCompilation) { top_tier_finished_semaphore->Signal(); } }); top_tier_finished_semaphore->Wait(); } void CompilationState::InitializeAfterDeserialization() { Impl(this)->InitializeCompilationProgressAfterDeserialization(); } bool CompilationState::failed() const { return Impl(this)->failed(); } bool CompilationState::baseline_compilation_finished() const { return Impl(this)->baseline_compilation_finished(); } bool CompilationState::top_tier_compilation_finished() const { return Impl(this)->top_tier_compilation_finished(); } bool CompilationState::recompilation_finished() const { return Impl(this)->recompilation_finished(); } // static std::unique_ptr CompilationState::New( const std::shared_ptr& native_module, std::shared_ptr async_counters) { return std::unique_ptr(reinterpret_cast( new CompilationStateImpl(native_module, std::move(async_counters)))); } // End of PIMPL implementation of {CompilationState}. ////////////////////////////////////////////////////// namespace { ExecutionTier ApplyHintToExecutionTier(WasmCompilationHintTier hint, ExecutionTier default_tier) { switch (hint) { case WasmCompilationHintTier::kDefault: return default_tier; case WasmCompilationHintTier::kBaseline: return ExecutionTier::kLiftoff; case WasmCompilationHintTier::kOptimized: return ExecutionTier::kTurbofan; } UNREACHABLE(); } const WasmCompilationHint* GetCompilationHint(const WasmModule* module, uint32_t func_index) { DCHECK_LE(module->num_imported_functions, func_index); uint32_t hint_index = declared_function_index(module, func_index); const std::vector& compilation_hints = module->compilation_hints; if (hint_index < compilation_hints.size()) { return &compilation_hints[hint_index]; } return nullptr; } CompileStrategy GetCompileStrategy(const WasmModule* module, const WasmFeatures& enabled_features, uint32_t func_index, bool lazy_module) { if (lazy_module) return CompileStrategy::kLazy; if (!enabled_features.has_compilation_hints()) { return CompileStrategy::kDefault; } auto* hint = GetCompilationHint(module, func_index); if (hint == nullptr) return CompileStrategy::kDefault; switch (hint->strategy) { case WasmCompilationHintStrategy::kLazy: return CompileStrategy::kLazy; case WasmCompilationHintStrategy::kEager: return CompileStrategy::kEager; case WasmCompilationHintStrategy::kLazyBaselineEagerTopTier: return CompileStrategy::kLazyBaselineEagerTopTier; case WasmCompilationHintStrategy::kDefault: return CompileStrategy::kDefault; } } struct ExecutionTierPair { ExecutionTier baseline_tier; ExecutionTier top_tier; }; ExecutionTierPair GetRequestedExecutionTiers( const WasmModule* module, CompileMode compile_mode, const WasmFeatures& enabled_features, uint32_t func_index) { ExecutionTierPair result; result.baseline_tier = WasmCompilationUnit::GetBaselineExecutionTier(module); switch (compile_mode) { case CompileMode::kRegular: result.top_tier = result.baseline_tier; return result; case CompileMode::kTiering: // Default tiering behaviour. result.top_tier = ExecutionTier::kTurbofan; // Check if compilation hints override default tiering behaviour. if (enabled_features.has_compilation_hints()) { const WasmCompilationHint* hint = GetCompilationHint(module, func_index); if (hint != nullptr) { result.baseline_tier = ApplyHintToExecutionTier(hint->baseline_tier, result.baseline_tier); result.top_tier = ApplyHintToExecutionTier(hint->top_tier, result.top_tier); } } // Correct top tier if necessary. static_assert(ExecutionTier::kLiftoff < ExecutionTier::kTurbofan, "Assume an order on execution tiers"); if (result.baseline_tier > result.top_tier) { result.top_tier = result.baseline_tier; } return result; } UNREACHABLE(); } // The {CompilationUnitBuilder} builds compilation units and stores them in an // internal buffer. The buffer is moved into the working queue of the // {CompilationStateImpl} when {Commit} is called. class CompilationUnitBuilder { public: explicit CompilationUnitBuilder(NativeModule* native_module) : native_module_(native_module) {} void AddUnits(uint32_t func_index) { if (func_index < native_module_->module()->num_imported_functions) { baseline_units_.emplace_back(func_index, ExecutionTier::kNone, kNoDebugging); return; } ExecutionTierPair tiers = GetRequestedExecutionTiers( native_module_->module(), compilation_state()->compile_mode(), native_module_->enabled_features(), func_index); // Compile everything for non-debugging initially. If needed, we will tier // down when the module is fully compiled. Synchronization would be pretty // difficult otherwise. baseline_units_.emplace_back(func_index, tiers.baseline_tier, kNoDebugging); if (tiers.baseline_tier != tiers.top_tier) { tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging); } } void AddJSToWasmWrapperUnit( std::shared_ptr unit) { js_to_wasm_wrapper_units_.emplace_back(std::move(unit)); } void AddTopTierUnit(int func_index) { ExecutionTierPair tiers = GetRequestedExecutionTiers( native_module_->module(), compilation_state()->compile_mode(), native_module_->enabled_features(), func_index); // In this case, the baseline is lazily compiled, if at all. The compilation // unit is added even if the baseline tier is the same. #ifdef DEBUG auto* module = native_module_->module(); DCHECK_EQ(kWasmOrigin, module->origin); const bool lazy_module = false; DCHECK_EQ(CompileStrategy::kLazyBaselineEagerTopTier, GetCompileStrategy(module, native_module_->enabled_features(), func_index, lazy_module)); #endif tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging); } void AddRecompilationUnit(int func_index, ExecutionTier tier) { // For recompilation, just treat all units like baseline units. baseline_units_.emplace_back( func_index, tier, tier == ExecutionTier::kLiftoff ? kForDebugging : kNoDebugging); } bool Commit() { if (baseline_units_.empty() && tiering_units_.empty() && js_to_wasm_wrapper_units_.empty()) { return false; } compilation_state()->AddCompilationUnits( VectorOf(baseline_units_), VectorOf(tiering_units_), VectorOf(js_to_wasm_wrapper_units_)); Clear(); return true; } void Clear() { baseline_units_.clear(); tiering_units_.clear(); js_to_wasm_wrapper_units_.clear(); } private: CompilationStateImpl* compilation_state() const { return Impl(native_module_->compilation_state()); } NativeModule* const native_module_; std::vector baseline_units_; std::vector tiering_units_; std::vector> js_to_wasm_wrapper_units_; }; void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes, const WasmFunction* func, const WasmModule* module, WasmError error) { WasmName name = wire_bytes.GetNameOrNull(func, module); if (name.begin() == nullptr) { thrower->CompileError("Compiling function #%d failed: %s @+%u", func->func_index, error.message().c_str(), error.offset()); } else { TruncatedUserString<> truncated_name(name); thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u", func->func_index, truncated_name.length(), truncated_name.start(), error.message().c_str(), error.offset()); } } DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index, Vector code, Counters* counters, AccountingAllocator* allocator, WasmFeatures enabled_features) { const WasmFunction* func = &module->functions[func_index]; FunctionBody body{func->sig, func->code.offset(), code.begin(), code.end()}; DecodeResult result; WasmFeatures detected; return VerifyWasmCode(allocator, enabled_features, module, &detected, body); } enum OnlyLazyFunctions : bool { kAllFunctions = false, kOnlyLazyFunctions = true, }; void ValidateSequentially( const WasmModule* module, NativeModule* native_module, Counters* counters, AccountingAllocator* allocator, ErrorThrower* thrower, bool lazy_module, OnlyLazyFunctions only_lazy_functions = kAllFunctions) { DCHECK(!thrower->error()); uint32_t start = module->num_imported_functions; uint32_t end = start + module->num_declared_functions; auto enabled_features = native_module->enabled_features(); for (uint32_t func_index = start; func_index < end; func_index++) { // Skip non-lazy functions if requested. if (only_lazy_functions) { CompileStrategy strategy = GetCompileStrategy(module, enabled_features, func_index, lazy_module); if (strategy != CompileStrategy::kLazy && strategy != CompileStrategy::kLazyBaselineEagerTopTier) { continue; } } ModuleWireBytes wire_bytes{native_module->wire_bytes()}; const WasmFunction* func = &module->functions[func_index]; Vector code = wire_bytes.GetFunctionBytes(func); DecodeResult result = ValidateSingleFunction( module, func_index, code, counters, allocator, enabled_features); if (result.failed()) { SetCompileError(thrower, wire_bytes, func, module, result.error()); } } } bool IsLazyModule(const WasmModule* module) { return FLAG_wasm_lazy_compilation || (FLAG_asm_wasm_lazy_compilation && is_asmjs_module(module)); } } // namespace bool CompileLazy(Isolate* isolate, NativeModule* native_module, int func_index) { const WasmModule* module = native_module->module(); auto enabled_features = native_module->enabled_features(); Counters* counters = isolate->counters(); DCHECK(!native_module->lazy_compile_frozen()); NativeModuleModificationScope native_module_modification_scope(native_module); TRACE_LAZY("Compiling wasm-function#%d.\n", func_index); CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); ExecutionTierPair tiers = GetRequestedExecutionTiers( module, compilation_state->compile_mode(), enabled_features, func_index); DCHECK_LE(native_module->num_imported_functions(), func_index); DCHECK_LT(func_index, native_module->num_functions()); WasmCompilationUnit baseline_unit{func_index, tiers.baseline_tier, kNoDebugging}; CompilationEnv env = native_module->CreateCompilationEnv(); WasmCompilationResult result = baseline_unit.ExecuteCompilation( isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(), counters, compilation_state->detected_features()); // During lazy compilation, we can only get compilation errors when // {--wasm-lazy-validation} is enabled. Otherwise, the module was fully // verified before starting its execution. CHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation); const WasmFunction* func = &module->functions[func_index]; if (result.failed()) { ErrorThrower thrower(isolate, nullptr); Vector code = compilation_state->GetWireBytesStorage()->GetCode(func->code); DecodeResult decode_result = ValidateSingleFunction( module, func_index, code, counters, isolate->wasm_engine()->allocator(), enabled_features); CHECK(decode_result.failed()); SetCompileError(&thrower, ModuleWireBytes(native_module->wire_bytes()), func, module, decode_result.error()); return false; } WasmCodeRefScope code_ref_scope; WasmCode* code = native_module->PublishCode( native_module->AddCompiledCode(std::move(result))); DCHECK_EQ(func_index, code->index()); if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate); counters->wasm_lazily_compiled_functions()->Increment(); const bool lazy_module = IsLazyModule(module); if (GetCompileStrategy(module, enabled_features, func_index, lazy_module) == CompileStrategy::kLazy && tiers.baseline_tier < tiers.top_tier) { WasmCompilationUnit tiering_unit{func_index, tiers.top_tier, kNoDebugging}; compilation_state->AddTopTierCompilationUnit(tiering_unit); } return true; } void TriggerTierUp(Isolate* isolate, NativeModule* native_module, int func_index) { CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); WasmCompilationUnit tiering_unit{func_index, ExecutionTier::kTurbofan, kNoDebugging}; compilation_state->AddTopTierCompilationUnit(tiering_unit); } namespace { void RecordStats(const Code code, Counters* counters) { counters->wasm_generated_code_size()->Increment(code.body_size()); counters->wasm_reloc_size()->Increment(code.relocation_info().length()); } enum CompilationExecutionResult : int8_t { kNoMoreUnits, kYield }; CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits( const std::shared_ptr& token, JobDelegate* delegate) { std::shared_ptr wrapper_unit = nullptr; int num_processed_wrappers = 0; { BackgroundCompileScope compile_scope(token); if (compile_scope.cancelled()) return kNoMoreUnits; wrapper_unit = compile_scope.compilation_state() ->GetNextJSToWasmWrapperCompilationUnit(); if (!wrapper_unit) return kNoMoreUnits; } while (true) { wrapper_unit->Execute(); ++num_processed_wrappers; bool yield = delegate && delegate->ShouldYield(); BackgroundCompileScope compile_scope(token); if (compile_scope.cancelled()) return kNoMoreUnits; if (yield || !(wrapper_unit = compile_scope.compilation_state() ->GetNextJSToWasmWrapperCompilationUnit())) { compile_scope.compilation_state()->OnFinishedJSToWasmWrapperUnits( num_processed_wrappers); return yield ? kYield : kNoMoreUnits; } } } // Run by the {BackgroundCompileJob} (on any thread). CompilationExecutionResult ExecuteCompilationUnits( const std::shared_ptr& token, Counters* counters, JobDelegate* delegate, CompileBaselineOnly baseline_only) { TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits"); // Execute JS to Wasm wrapper units first, so that they are ready to be // finalized by the main thread when the kFinishedBaselineCompilation event is // triggered. if (ExecuteJSToWasmWrapperCompilationUnits(token, delegate) == kYield) { return kYield; } // These fields are initialized in a {BackgroundCompileScope} before // starting compilation. base::Optional env; std::shared_ptr wire_bytes; std::shared_ptr module; WasmEngine* wasm_engine; int task_id; base::Optional unit; WasmFeatures detected_features = WasmFeatures::None(); auto stop = [&detected_features, &task_id](BackgroundCompileScope& compile_scope) { compile_scope.compilation_state()->OnCompilationStopped(task_id, detected_features); }; // Preparation (synchronized): Initialize the fields above and get the first // compilation unit. { BackgroundCompileScope compile_scope(token); if (compile_scope.cancelled()) return kNoMoreUnits; auto* compilation_state = compile_scope.compilation_state(); env.emplace(compile_scope.native_module()->CreateCompilationEnv()); wire_bytes = compilation_state->GetWireBytesStorage(); module = compile_scope.native_module()->shared_module(); wasm_engine = compile_scope.native_module()->engine(); task_id = compilation_state->GetFreeCompileTaskId(); unit = compilation_state->GetNextCompilationUnit(task_id, baseline_only); if (!unit) { stop(compile_scope); return kNoMoreUnits; } } TRACE_COMPILE("ExecuteCompilationUnits (task id %d)\n", task_id); std::vector results_to_publish; auto publish_results = [&results_to_publish]( BackgroundCompileScope* compile_scope) { TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), "wasm.PublishCompilationResults", "num_results", results_to_publish.size()); if (results_to_publish.empty()) return; std::vector> unpublished_code = compile_scope->native_module()->AddCompiledCode( VectorOf(results_to_publish)); results_to_publish.clear(); // For import wrapper compilation units, add result to the cache. const NativeModule* native_module = compile_scope->native_module(); int num_imported_functions = native_module->num_imported_functions(); WasmImportWrapperCache* cache = native_module->import_wrapper_cache(); for (const auto& code : unpublished_code) { int func_index = code->index(); DCHECK_LE(0, func_index); DCHECK_LT(func_index, native_module->num_functions()); if (func_index < num_imported_functions) { const FunctionSig* sig = native_module->module()->functions[func_index].sig; WasmImportWrapperCache::CacheKey key( compiler::kDefaultImportCallKind, sig, static_cast(sig->parameter_count())); // If two imported functions have the same key, only one of them should // have been added as a compilation unit. So it is always the first time // we compile a wrapper for this key here. DCHECK_NULL((*cache)[key]); (*cache)[key] = code.get(); code->IncRef(); } } compile_scope->SchedulePublishCode(std::move(unpublished_code)); }; bool compilation_failed = false; while (true) { // (asynchronous): Execute the compilation. WasmCompilationResult result = unit->ExecuteCompilation( wasm_engine, &env.value(), wire_bytes, counters, &detected_features); results_to_publish.emplace_back(std::move(result)); bool yield = delegate && delegate->ShouldYield(); // (synchronized): Publish the compilation result and get the next unit. { BackgroundCompileScope compile_scope(token); if (compile_scope.cancelled()) return kNoMoreUnits; if (!results_to_publish.back().succeeded()) { // Compile error. compile_scope.compilation_state()->SetError(); stop(compile_scope); compilation_failed = true; break; } // Get next unit. if (yield || !(unit = compile_scope.compilation_state()->GetNextCompilationUnit( task_id, baseline_only))) { publish_results(&compile_scope); stop(compile_scope); return yield ? kYield : kNoMoreUnits; } // Before executing a TurboFan unit, ensure to publish all previous // units. If we compiled Liftoff before, we need to publish them anyway // to ensure fast completion of baseline compilation, if we compiled // TurboFan before, we publish to reduce peak memory consumption. if (unit->tier() == ExecutionTier::kTurbofan) { publish_results(&compile_scope); } } } // We only get here if compilation failed. Other exits return directly. DCHECK(compilation_failed); USE(compilation_failed); token->Cancel(); return kNoMoreUnits; } using JSToWasmWrapperKey = std::pair; // Returns the number of units added. int AddExportWrapperUnits(Isolate* isolate, WasmEngine* wasm_engine, NativeModule* native_module, CompilationUnitBuilder* builder, const WasmFeatures& enabled_features) { std::unordered_set> keys; for (auto exp : native_module->module()->export_table) { if (exp.kind != kExternalFunction) continue; auto& function = native_module->module()->functions[exp.index]; JSToWasmWrapperKey key(function.imported, *function.sig); if (keys.insert(key).second) { auto unit = std::make_shared( isolate, wasm_engine, function.sig, function.imported, enabled_features); builder->AddJSToWasmWrapperUnit(std::move(unit)); } } return static_cast(keys.size()); } // Returns the number of units added. int AddImportWrapperUnits(NativeModule* native_module, CompilationUnitBuilder* builder) { std::unordered_set keys; int num_imported_functions = native_module->num_imported_functions(); for (int func_index = 0; func_index < num_imported_functions; func_index++) { const FunctionSig* sig = native_module->module()->functions[func_index].sig; if (!IsJSCompatibleSignature(sig, native_module->enabled_features())) { continue; } WasmImportWrapperCache::CacheKey key( compiler::kDefaultImportCallKind, sig, static_cast(sig->parameter_count())); auto it = keys.insert(key); if (it.second) { // Ensure that all keys exist in the cache, so that we can populate the // cache later without locking. (*native_module->import_wrapper_cache())[key] = nullptr; builder->AddUnits(func_index); } } return static_cast(keys.size()); } void InitializeCompilationUnits(Isolate* isolate, NativeModule* native_module) { CompilationStateImpl* compilation_state = Impl(native_module->compilation_state()); const bool lazy_module = IsLazyModule(native_module->module()); ModuleWireBytes wire_bytes(native_module->wire_bytes()); CompilationUnitBuilder builder(native_module); auto* module = native_module->module(); const bool prefer_liftoff = native_module->IsTieredDown(); uint32_t start = module->num_imported_functions; uint32_t end = start + module->num_declared_functions; for (uint32_t func_index = start; func_index < end; func_index++) { if (prefer_liftoff) { builder.AddRecompilationUnit(func_index, ExecutionTier::kLiftoff); continue; } CompileStrategy strategy = GetCompileStrategy( module, native_module->enabled_features(), func_index, lazy_module); if (strategy == CompileStrategy::kLazy) { native_module->UseLazyStub(func_index); } else if (strategy == CompileStrategy::kLazyBaselineEagerTopTier) { builder.AddTopTierUnit(func_index); native_module->UseLazyStub(func_index); } else { DCHECK_EQ(strategy, CompileStrategy::kEager); builder.AddUnits(func_index); } } int num_import_wrappers = AddImportWrapperUnits(native_module, &builder); int num_export_wrappers = AddExportWrapperUnits(isolate, isolate->wasm_engine(), native_module, &builder, WasmFeatures::FromIsolate(isolate)); compilation_state->InitializeCompilationProgress( lazy_module, num_import_wrappers + num_export_wrappers); builder.Commit(); } bool MayCompriseLazyFunctions(const WasmModule* module, const WasmFeatures& enabled_features, bool lazy_module) { if (lazy_module || enabled_features.has_compilation_hints()) return true; #ifdef ENABLE_SLOW_DCHECKS int start = module->num_imported_functions; int end = start + module->num_declared_functions; for (int func_index = start; func_index < end; func_index++) { SLOW_DCHECK(GetCompileStrategy(module, enabled_features, func_index, lazy_module) != CompileStrategy::kLazy); } #endif return false; } class CompilationTimeCallback { public: enum CompileMode { kSynchronous, kAsync, kStreaming }; explicit CompilationTimeCallback( std::shared_ptr async_counters, std::shared_ptr metrics_recorder, v8::metrics::Recorder::ContextId context_id, std::weak_ptr native_module, CompileMode compile_mode) : start_time_(base::TimeTicks::Now()), async_counters_(std::move(async_counters)), metrics_recorder_(std::move(metrics_recorder)), context_id_(context_id), native_module_(std::move(native_module)), compile_mode_(compile_mode) {} void operator()(CompilationEvent event) { DCHECK(base::TimeTicks::IsHighResolution()); std::shared_ptr native_module = native_module_.lock(); if (!native_module) return; auto now = base::TimeTicks::Now(); auto duration = now - start_time_; if (event == CompilationEvent::kFinishedBaselineCompilation) { // Reset {start_time_} to measure tier-up time. start_time_ = now; if (compile_mode_ != kSynchronous) { TimedHistogram* histogram = compile_mode_ == kAsync ? async_counters_->wasm_async_compile_wasm_module_time() : async_counters_->wasm_streaming_compile_wasm_module_time(); histogram->AddSample(static_cast(duration.InMicroseconds())); } v8::metrics::WasmModuleCompiled event{ (compile_mode_ != kSynchronous), // async (compile_mode_ == kStreaming), // streamed false, // cached false, // deserialized FLAG_wasm_lazy_compilation, // lazy true, // success native_module->generated_code_size(), // code_size_in_bytes native_module->liftoff_bailout_count(), // liftoff_bailout_count duration.InMicroseconds() // wall_clock_time_in_us }; metrics_recorder_->DelayMainThreadEvent(event, context_id_); } if (event == CompilationEvent::kFinishedTopTierCompilation) { TimedHistogram* histogram = async_counters_->wasm_tier_up_module_time(); histogram->AddSample(static_cast(duration.InMicroseconds())); v8::metrics::WasmModuleTieredUp event{ FLAG_wasm_lazy_compilation, // lazy native_module->generated_code_size(), // code_size_in_bytes duration.InMicroseconds() // wall_clock_time_in_us }; metrics_recorder_->DelayMainThreadEvent(event, context_id_); } if (event == CompilationEvent::kFailedCompilation) { v8::metrics::WasmModuleCompiled event{ (compile_mode_ != kSynchronous), // async (compile_mode_ == kStreaming), // streamed false, // cached false, // deserialized FLAG_wasm_lazy_compilation, // lazy false, // success native_module->generated_code_size(), // code_size_in_bytes native_module->liftoff_bailout_count(), // liftoff_bailout_count duration.InMicroseconds() // wall_clock_time_in_us }; metrics_recorder_->DelayMainThreadEvent(event, context_id_); } } private: base::TimeTicks start_time_; const std::shared_ptr async_counters_; std::shared_ptr metrics_recorder_; v8::metrics::Recorder::ContextId context_id_; std::weak_ptr native_module_; const CompileMode compile_mode_; }; void CompileNativeModule(Isolate* isolate, v8::metrics::Recorder::ContextId context_id, ErrorThrower* thrower, const WasmModule* wasm_module, std::shared_ptr native_module) { CHECK(!FLAG_jitless); ModuleWireBytes wire_bytes(native_module->wire_bytes()); const bool lazy_module = IsLazyModule(wasm_module); if (!FLAG_wasm_lazy_validation && wasm_module->origin == kWasmOrigin && MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(), lazy_module)) { // Validate wasm modules for lazy compilation if requested. Never validate // asm.js modules as these are valid by construction (additionally a CHECK // will catch this during lazy compilation). ValidateSequentially(wasm_module, native_module.get(), isolate->counters(), isolate->allocator(), thrower, lazy_module, kOnlyLazyFunctions); // On error: Return and leave the module in an unexecutable state. if (thrower->error()) return; } DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions); // The callback captures a shared ptr to the semaphore. auto* compilation_state = Impl(native_module->compilation_state()); if (base::TimeTicks::IsHighResolution()) { compilation_state->AddCallback(CompilationTimeCallback{ isolate->async_counters(), isolate->metrics_recorder(), context_id, native_module, CompilationTimeCallback::kSynchronous}); } // Initialize the compilation units and kick off background compile tasks. InitializeCompilationUnits(isolate, native_module.get()); compilation_state->WaitForBaselineFinished(); compilation_state->PublishDetectedFeatures(isolate); if (compilation_state->failed()) { DCHECK_IMPLIES(lazy_module, !FLAG_wasm_lazy_validation); ValidateSequentially(wasm_module, native_module.get(), isolate->counters(), isolate->allocator(), thrower, lazy_module); CHECK(thrower->error()); } } // The runnable task that performs compilations in the background. class BackgroundCompileJob : public JobTask { public: explicit BackgroundCompileJob( std::shared_ptr token, std::shared_ptr async_counters, std::shared_ptr> current_concurrency, int max_concurrency) : token_(std::move(token)), async_counters_(std::move(async_counters)), current_concurrency_(std::move(current_concurrency)), max_concurrency_(max_concurrency) {} void Run(JobDelegate* delegate) override { if (ExecuteCompilationUnits(token_, async_counters_.get(), delegate, kBaselineOrTopTier) == kYield) { return; } // Otherwise we didn't find any more units to execute. Reduce the available // concurrency to zero, but then check whether any more units were added in // the meantime, and increase back if necessary. current_concurrency_->store(0); { BackgroundCompileScope scope(token_); if (scope.cancelled()) return; size_t outstanding_units = scope.compilation_state()->NumOutstandingCompilations(); if (outstanding_units == 0) return; // On a race between this thread and the thread which scheduled the units, // this might increase concurrency more than needed, which is fine. It // will be reduced again when the first task finds no more work to do. scope.compilation_state()->ScheduleCompileJobForNewUnits( static_cast(outstanding_units)); } } size_t GetMaxConcurrency() const override { return std::min(max_concurrency_, current_concurrency_->load()); } private: const std::shared_ptr token_; const std::shared_ptr async_counters_; const std::shared_ptr> current_concurrency_; const int max_concurrency_; }; } // namespace std::shared_ptr CompileToNativeModule( Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower, std::shared_ptr module, const ModuleWireBytes& wire_bytes, Handle* export_wrappers_out) { const WasmModule* wasm_module = module.get(); OwnedVector wire_bytes_copy = OwnedVector::Of(wire_bytes.module_bytes()); // Prefer {wire_bytes_copy} to {wire_bytes.module_bytes()} for the temporary // cache key. When we eventually install the module in the cache, the wire // bytes of the temporary key and the new key have the same base pointer and // we can skip the full bytes comparison. std::shared_ptr native_module = isolate->wasm_engine()->MaybeGetNativeModule( wasm_module->origin, wire_bytes_copy.as_vector(), isolate); if (native_module) { // TODO(thibaudm): Look into sharing export wrappers. CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out); return native_module; } TimedHistogramScope wasm_compile_module_time_scope(SELECT_WASM_COUNTER( isolate->counters(), wasm_module->origin, wasm_compile, module_time)); // Embedder usage count for declared shared memories. if (wasm_module->has_shared_memory) { isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory); } // Create a new {NativeModule} first. const bool uses_liftoff = module->origin == kWasmOrigin && FLAG_liftoff; size_t code_size_estimate = wasm::WasmCodeManager::EstimateNativeModuleCodeSize(module.get(), uses_liftoff); native_module = isolate->wasm_engine()->NewNativeModule( isolate, enabled, module, code_size_estimate); native_module->SetWireBytes(std::move(wire_bytes_copy)); v8::metrics::Recorder::ContextId context_id = isolate->GetOrRegisterRecorderContextId(isolate->native_context()); CompileNativeModule(isolate, context_id, thrower, wasm_module, native_module); bool cache_hit = !isolate->wasm_engine()->UpdateNativeModuleCache( thrower->error(), &native_module, isolate); if (thrower->error()) return {}; if (cache_hit) { CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out); return native_module; } Impl(native_module->compilation_state()) ->FinalizeJSToWasmWrappers(isolate, native_module->module(), export_wrappers_out); // Ensure that the code objects are logged before returning. isolate->wasm_engine()->LogOutstandingCodesForIsolate(isolate); return native_module; } void RecompileNativeModule(NativeModule* native_module, TieringState tiering_state) { // Install a callback to notify us once background recompilation finished. auto recompilation_finished_semaphore = std::make_shared(0); auto* compilation_state = Impl(native_module->compilation_state()); // The callback captures a shared ptr to the semaphore. // Initialize the compilation units and kick off background compile tasks. compilation_state->InitializeRecompilation( tiering_state, [recompilation_finished_semaphore](CompilationEvent event) { if (event == CompilationEvent::kFinishedRecompilation) { recompilation_finished_semaphore->Signal(); } }); // Now wait until all compilation units finished. // TODO(clemensb): Contribute to compilation while waiting. recompilation_finished_semaphore->Wait(); DCHECK(!compilation_state->failed()); } AsyncCompileJob::AsyncCompileJob( Isolate* isolate, const WasmFeatures& enabled, std::unique_ptr bytes_copy, size_t length, Handle context, const char* api_method_name, std::shared_ptr resolver) : isolate_(isolate), api_method_name_(api_method_name), enabled_features_(enabled), wasm_lazy_compilation_(FLAG_wasm_lazy_compilation), start_time_(base::TimeTicks::Now()), bytes_copy_(std::move(bytes_copy)), wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length), resolver_(std::move(resolver)) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), "wasm.AsyncCompileJob"); CHECK(FLAG_wasm_async_compilation); CHECK(!FLAG_jitless); v8::Isolate* v8_isolate = reinterpret_cast(isolate); v8::Platform* platform = V8::GetCurrentPlatform(); foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate); native_context_ = isolate->global_handles()->Create(context->native_context()); DCHECK(native_context_->IsNativeContext()); context_id_ = isolate->GetOrRegisterRecorderContextId(native_context_); } void AsyncCompileJob::Start() { DoAsync(isolate_->counters(), isolate_->metrics_recorder()); // -- } void AsyncCompileJob::Abort() { // Removing this job will trigger the destructor, which will cancel all // compilation. isolate_->wasm_engine()->RemoveCompileJob(this); } class AsyncStreamingProcessor final : public StreamingProcessor { public: explicit AsyncStreamingProcessor(AsyncCompileJob* job, std::shared_ptr counters, AccountingAllocator* allocator); ~AsyncStreamingProcessor() override; bool ProcessModuleHeader(Vector bytes, uint32_t offset) override; bool ProcessSection(SectionCode section_code, Vector bytes, uint32_t offset) override; bool ProcessCodeSectionHeader(int num_functions, uint32_t offset, std::shared_ptr, int code_section_length) override; bool ProcessFunctionBody(Vector bytes, uint32_t offset) override; void OnFinishedChunk() override; void OnFinishedStream(OwnedVector bytes) override; void OnError(const WasmError&) override; void OnAbort() override; bool Deserialize(Vector wire_bytes, Vector module_bytes) override; private: // Finishes the AsyncCompileJob with an error. void FinishAsyncCompileJobWithError(const WasmError&); void CommitCompilationUnits(); ModuleDecoder decoder_; AsyncCompileJob* job_; WasmEngine* wasm_engine_; std::unique_ptr compilation_unit_builder_; int num_functions_ = 0; bool prefix_cache_hit_ = false; bool before_code_section_ = true; std::shared_ptr async_counters_; AccountingAllocator* allocator_; // Running hash of the wire bytes up to code section size, but excluding the // code section itself. Used by the {NativeModuleCache} to detect potential // duplicate modules. size_t prefix_hash_; }; std::shared_ptr AsyncCompileJob::CreateStreamingDecoder() { DCHECK_NULL(stream_); stream_ = StreamingDecoder::CreateAsyncStreamingDecoder( std::make_unique( this, isolate_->async_counters(), isolate_->allocator())); return stream_; } AsyncCompileJob::~AsyncCompileJob() { // Note: This destructor always runs on the foreground thread of the isolate. background_task_manager_.CancelAndWait(); // If the runtime objects were not created yet, then initial compilation did // not finish yet. In this case we can abort compilation. if (native_module_ && module_object_.is_null()) { Impl(native_module_->compilation_state())->CancelCompilation(); } // Tell the streaming decoder that the AsyncCompileJob is not available // anymore. // TODO(ahaas): Is this notification really necessary? Check // https://crbug.com/888170. if (stream_) stream_->NotifyCompilationEnded(); CancelPendingForegroundTask(); isolate_->global_handles()->Destroy(native_context_.location()); if (!module_object_.is_null()) { isolate_->global_handles()->Destroy(module_object_.location()); } } void AsyncCompileJob::CreateNativeModule( std::shared_ptr module, size_t code_size_estimate) { // Embedder usage count for declared shared memories. if (module->has_shared_memory) { isolate_->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory); } // TODO(wasm): Improve efficiency of storing module wire bytes. Only store // relevant sections, not function bodies // Create the module object and populate with compiled functions and // information needed at instantiation time. native_module_ = isolate_->wasm_engine()->NewNativeModule( isolate_, enabled_features_, std::move(module), code_size_estimate); native_module_->SetWireBytes({std::move(bytes_copy_), wire_bytes_.length()}); } bool AsyncCompileJob::GetOrCreateNativeModule( std::shared_ptr module, size_t code_size_estimate) { native_module_ = isolate_->wasm_engine()->MaybeGetNativeModule( module->origin, wire_bytes_.module_bytes(), isolate_); if (native_module_ == nullptr) { CreateNativeModule(std::move(module), code_size_estimate); return false; } return true; } void AsyncCompileJob::PrepareRuntimeObjects() { // Create heap objects for script and module bytes to be stored in the // module object. Asm.js is not compiled asynchronously. DCHECK(module_object_.is_null()); auto source_url = stream_ ? stream_->url() : Vector(); auto script = isolate_->wasm_engine()->GetOrCreateScript( isolate_, native_module_, source_url); Handle module_object = WasmModuleObject::New(isolate_, native_module_, script); module_object_ = isolate_->global_handles()->Create(*module_object); } // This function assumes that it is executed in a HandleScope, and that a // context is set on the isolate. void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), "wasm.FinishAsyncCompile"); bool is_after_deserialization = !module_object_.is_null(); auto compilation_state = Impl(native_module_->compilation_state()); if (!is_after_deserialization) { if (stream_) { stream_->NotifyNativeModuleCreated(native_module_); } PrepareRuntimeObjects(); } // Measure duration of baseline compilation or deserialization from cache. if (base::TimeTicks::IsHighResolution()) { base::TimeDelta duration = base::TimeTicks::Now() - start_time_; int duration_usecs = static_cast(duration.InMicroseconds()); isolate_->counters()->wasm_streaming_finish_wasm_module_time()->AddSample( duration_usecs); if (is_after_cache_hit || is_after_deserialization) { v8::metrics::WasmModuleCompiled event{ true, // async true, // streamed is_after_cache_hit, // cached is_after_deserialization, // deserialized wasm_lazy_compilation_, // lazy !compilation_state->failed(), // success native_module_->generated_code_size(), // code_size_in_bytes native_module_->liftoff_bailout_count(), // liftoff_bailout_count duration.InMicroseconds() // wall_clock_time_in_us }; isolate_->metrics_recorder()->DelayMainThreadEvent(event, context_id_); } } DCHECK(!isolate_->context().is_null()); // Finish the wasm script now and make it public to the debugger. Handle