// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/wasm/module-compiler.h" #include "src/api.h" #include "src/asmjs/asm-js.h" #include "src/assembler-inl.h" #include "src/base/optional.h" #include "src/base/template-utils.h" #include "src/base/utils/random-number-generator.h" #include "src/code-stubs.h" #include "src/compiler/wasm-compiler.h" #include "src/counters.h" #include "src/identity-map.h" #include "src/property-descriptor.h" #include "src/trap-handler/trap-handler.h" #include "src/wasm/compilation-manager.h" #include "src/wasm/module-decoder.h" #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-code-specialization.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-result.h" #define TRACE(...) \ do { \ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_CHAIN(instance) \ do { \ instance->PrintInstancesChain(); \ } while (false) #define TRACE_COMPILE(...) \ do { \ if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_STREAMING(...) \ do { \ if (FLAG_trace_wasm_streaming) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_LAZY(...) \ do { \ if (FLAG_trace_wasm_lazy_compilation) PrintF(__VA_ARGS__); \ } while (false) namespace v8 { namespace internal { namespace wasm { enum class CompilationEvent : uint8_t { kFinishedBaselineCompilation, kFailedCompilation }; enum class NotifyCompilationCallback : uint8_t { kNotify, kNoNotify }; // The CompilationState keeps track of the compilation state of the // owning NativeModule, i.e. which functions are left to be compiled. // It contains a task manager to allow parallel and asynchronous background // compilation of functions. class CompilationState { public: class CodeGenerationSchedule { public: explicit CodeGenerationSchedule( base::RandomNumberGenerator* random_number_generator, size_t max_memory = 0); void Schedule(std::unique_ptr item); bool IsEmpty() const { return schedule_.empty(); } std::unique_ptr GetNext(); bool CanAcceptWork() const; bool ShouldIncreaseWorkload() const; private: size_t GetRandomIndexInSchedule(); base::RandomNumberGenerator* random_number_generator_ = nullptr; std::vector> schedule_; const size_t max_memory_; size_t allocated_memory_ = 0; }; explicit CompilationState(internal::Isolate* isolate); ~CompilationState(); // Needs to be set before {AddCompilationUnits} is run, which triggers // background compilation. void SetNumberOfFunctionsToCompile(size_t num_functions); void AddCallback( std::function)> callback); // Inserts new functions to compile and kicks off compilation. void AddCompilationUnits( std::vector>& units); std::unique_ptr GetNextCompilationUnit(); std::unique_ptr GetNextExecutedUnit(); bool HasCompilationUnitToFinish(); void OnError(Handle error, NotifyCompilationCallback notify); void OnFinishedUnit(NotifyCompilationCallback notify); void ScheduleUnitForFinishing( std::unique_ptr& unit); void CancelAndWait(); void OnBackgroundTaskStopped(); void RestartBackgroundTasks(size_t max = std::numeric_limits::max()); // Only one foreground thread (finisher) is allowed to run at a time. // {SetFinisherIsRunning} returns whether the flag changed its state. bool SetFinisherIsRunning(bool value); void ScheduleFinisherTask(); bool StopBackgroundCompilationTaskForThrottling(); void Abort(); Isolate* isolate() const { return isolate_; } bool failed() const { base::LockGuard guard(&mutex_); return failed_; } private: void NotifyOnEvent(CompilationEvent event, Handle error); Isolate* isolate_; // This mutex protects all information of this CompilationState which is being // accessed concurrently. mutable base::Mutex mutex_; ////////////////////////////////////////////////////////////////////////////// // Protected by {mutex_}: std::vector> compilation_units_; CodeGenerationSchedule executed_units_; bool finisher_is_running_ = false; bool failed_ = false; size_t num_background_tasks_ = 0; // End of fields protected by {mutex_}. ////////////////////////////////////////////////////////////////////////////// std::vector)>> callbacks_; // When canceling the background_task_manager_, use {CancelAndWait} on // the CompilationState in order to cleanly clean up. CancelableTaskManager background_task_manager_; CancelableTaskManager foreground_task_manager_; std::shared_ptr background_task_runner_; std::shared_ptr foreground_task_runner_; const size_t max_background_tasks_ = 0; size_t outstanding_units_ = 0; }; namespace { class JSToWasmWrapperCache { public: Handle CloneOrCompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module, wasm::WasmCode* wasm_code, uint32_t index, bool use_trap_handler) { const wasm::WasmFunction* func = &module->functions[index]; int cached_idx = sig_map_.Find(func->sig); if (cached_idx >= 0) { Handle code = isolate->factory()->CopyCode(code_cache_[cached_idx]); // Now patch the call to wasm code. RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); DCHECK(!it.done()); it.rinfo()->set_js_to_wasm_address( wasm_code == nullptr ? nullptr : wasm_code->instructions().start()); return code; } Handle code = compiler::CompileJSToWasmWrapper( isolate, module, weak_instance_, wasm_code, index, use_trap_handler); uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig); DCHECK_EQ(code_cache_.size(), new_cache_idx); USE(new_cache_idx); code_cache_.push_back(code); return code; } void SetWeakInstance(Handle weak_instance) { weak_instance_ = weak_instance; } private: // sig_map_ maps signatures to an index in code_cache_. wasm::SignatureMap sig_map_; std::vector> code_cache_; Handle weak_instance_; }; // A helper class to simplify instantiating a module from a compiled module. // It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule}, // etc. class InstanceBuilder { public: InstanceBuilder(Isolate* isolate, ErrorThrower* thrower, Handle module_object, MaybeHandle ffi, MaybeHandle memory); // Build an instance, in all of its glory. MaybeHandle Build(); // Run the start function, if any. bool ExecuteStartFunction(); private: // Represents the initialized state of a table. struct TableInstance { Handle table_object; // WebAssembly.Table instance Handle js_wrappers; // JSFunctions exported size_t table_size; }; // A pre-evaluated value to use in import binding. struct SanitizedImport { Handle module_name; Handle import_name; Handle value; }; Isolate* isolate_; WasmModule* const module_; const std::shared_ptr async_counters_; ErrorThrower* thrower_; Handle module_object_; MaybeHandle ffi_; MaybeHandle memory_; Handle globals_; Handle compiled_module_; std::vector table_instances_; std::vector> js_wrappers_; Handle start_function_; JSToWasmWrapperCache js_to_wasm_cache_; std::vector sanitized_imports_; const std::shared_ptr& async_counters() const { return async_counters_; } Counters* counters() const { return async_counters().get(); } bool use_trap_handler() const { return compiled_module_->use_trap_handler(); } // Helper routines to print out errors with imports. #define ERROR_THROWER_WITH_MESSAGE(TYPE) \ void Report##TYPE(const char* error, uint32_t index, \ Handle module_name, Handle import_name) { \ thrower_->TYPE("Import #%d module=\"%s\" function=\"%s\" error: %s", \ index, module_name->ToCString().get(), \ import_name->ToCString().get(), error); \ } \ \ MaybeHandle Report##TYPE(const char* error, uint32_t index, \ Handle module_name) { \ thrower_->TYPE("Import #%d module=\"%s\" error: %s", index, \ module_name->ToCString().get(), error); \ return MaybeHandle(); \ } ERROR_THROWER_WITH_MESSAGE(LinkError) ERROR_THROWER_WITH_MESSAGE(TypeError) #undef ERROR_THROWER_WITH_MESSAGE // Look up an import value in the {ffi_} object. MaybeHandle LookupImport(uint32_t index, Handle module_name, Handle import_name); // Look up an import value in the {ffi_} object specifically for linking an // asm.js module. This only performs non-observable lookups, which allows // falling back to JavaScript proper (and hence re-executing all lookups) if // module instantiation fails. MaybeHandle LookupImportAsm(uint32_t index, Handle import_name); uint32_t EvalUint32InitExpr(const WasmInitExpr& expr); // Load data segments into the memory. void LoadDataSegments(Handle instance); void WriteGlobalValue(WasmGlobal& global, Handle value); void SanitizeImports(); // Process the imports, including functions, tables, globals, and memory, in // order, loading them from the {ffi_} object. Returns the number of imported // functions. int ProcessImports(Handle instance); template T* GetRawGlobalPtr(WasmGlobal& global); // Process initialization of globals. void InitGlobals(); // Allocate memory for a module instance as a new JSArrayBuffer. Handle AllocateMemory(uint32_t num_pages); bool NeedsWrappers() const; // Process the exports, creating wrappers for functions, tables, memories, // and globals. void ProcessExports(Handle instance, Handle compiled_module); void InitializeTables(Handle instance, CodeSpecialization* code_specialization); void LoadTableSegments(Handle instance); }; } // namespace MaybeHandle InstantiateToInstanceObject( Isolate* isolate, ErrorThrower* thrower, Handle module_object, MaybeHandle imports, MaybeHandle memory) { InstanceBuilder builder(isolate, thrower, module_object, imports, memory); auto instance = builder.Build(); if (!instance.is_null() && builder.ExecuteStartFunction()) { return instance; } return {}; } // A helper class to prevent pathological patching behavior for indirect // references to code which must be updated after lazy compiles. // Utilizes a reverse mapping to prevent O(n^2) behavior. class IndirectPatcher { public: void Patch(WasmInstanceObject* caller_instance, WasmInstanceObject* target_instance, int func_index, Address old_target, const WasmCode* new_code) { DisallowHeapAllocation no_gc; TRACE_LAZY( "IndirectPatcher::Patch(caller=%p, target=%p, func_index=%i, " "old_target=%p, " "new_code=%p)\n", caller_instance, target_instance, func_index, old_target, new_code); if (mapping_.size() == 0 || misses_ >= kMaxMisses) { BuildMapping(caller_instance); } // Patch entries for the given function index. WasmCodeManager* code_manager = caller_instance->GetIsolate()->wasm_engine()->code_manager(); USE(code_manager); auto& entries = mapping_[func_index]; int patched = 0; for (auto index : entries) { if (index < 0) { // Imported function entry. int i = -1 - index; ImportedFunctionEntry entry(caller_instance, i); if (entry.target() == old_target) { DCHECK_EQ( func_index, code_manager->GetCodeFromStartAddress(entry.target())->index()); entry.set(target_instance, new_code); patched++; } } else { // Indirect function table entry. int i = index; IndirectFunctionTableEntry entry(caller_instance, i); if (entry.target() == old_target) { DCHECK_EQ( func_index, code_manager->GetCodeFromStartAddress(entry.target())->index()); entry.set(entry.sig_id(), target_instance, new_code); patched++; } } } if (patched == 0) misses_++; } private: void BuildMapping(WasmInstanceObject* caller_instance) { mapping_.clear(); misses_ = 0; TRACE_LAZY("BuildMapping for (caller=%p)...\n", caller_instance); Isolate* isolate = caller_instance->GetIsolate(); WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager(); uint32_t num_imported_functions = caller_instance->module()->num_imported_functions; // Process the imported function entries. for (unsigned i = 0; i < num_imported_functions; i++) { ImportedFunctionEntry entry(caller_instance, i); WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target()); if (code->kind() != WasmCode::kLazyStub) continue; TRACE_LAZY(" +import[%u] -> #%d (%p)\n", i, code->index(), code->instructions().start()); DCHECK(!entry.is_js_receiver_entry()); WasmInstanceObject* target_instance = entry.instance(); WasmCode* new_code = target_instance->compiled_module()->GetNativeModule()->GetCode( code->index()); if (new_code->kind() != WasmCode::kLazyStub) { // Patch an imported function entry which is already compiled. entry.set(target_instance, new_code); } else { int key = code->index(); int index = -1 - i; mapping_[key].push_back(index); } } // Process the indirect function table entries. size_t ift_size = caller_instance->indirect_function_table_size(); for (unsigned i = 0; i < ift_size; i++) { IndirectFunctionTableEntry entry(caller_instance, i); if (entry.target() == nullptr) continue; // null IFT entry WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target()); if (code->kind() != WasmCode::kLazyStub) continue; TRACE_LAZY(" +indirect[%u] -> #%d (lazy:%p)\n", i, code->index(), code->instructions().start()); WasmInstanceObject* target_instance = entry.instance(); WasmCode* new_code = target_instance->compiled_module()->GetNativeModule()->GetCode( code->index()); if (new_code->kind() != WasmCode::kLazyStub) { // Patch an indirect function table entry which is already compiled. entry.set(entry.sig_id(), target_instance, new_code); } else { int key = code->index(); int index = i; mapping_[key].push_back(index); } } } static constexpr int kMaxMisses = 5; // maximum misses before rebuilding std::unordered_map> mapping_; int misses_ = 0; }; compiler::ModuleEnv CreateModuleEnvFromCompiledModule( Isolate* isolate, Handle compiled_module) { DisallowHeapAllocation no_gc; WasmModule* module = compiled_module->shared()->module(); compiler::ModuleEnv result(module, compiled_module->use_trap_handler()); return result; } const wasm::WasmCode* LazyCompileFunction( Isolate* isolate, Handle compiled_module, int func_index) { base::ElapsedTimer compilation_timer; wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode( static_cast(func_index)); if (existing_code != nullptr && existing_code->kind() == wasm::WasmCode::kFunction) { TRACE_LAZY("Function %d already compiled.\n", func_index); return existing_code; } compilation_timer.Start(); // TODO(wasm): Refactor this to only get the name if it is really needed for // tracing / debugging. std::string func_name; { WasmName name = Vector::cast( compiled_module->shared()->GetRawFunctionName(func_index)); // Copy to std::string, because the underlying string object might move on // the heap. func_name.assign(name.start(), static_cast(name.length())); } TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index); compiler::ModuleEnv module_env = CreateModuleEnvFromCompiledModule(isolate, compiled_module); const uint8_t* module_start = compiled_module->shared()->module_bytes()->GetChars(); const WasmFunction* func = &module_env.module->functions[func_index]; FunctionBody body{func->sig, func->code.offset(), module_start + func->code.offset(), module_start + func->code.end_offset()}; ErrorThrower thrower(isolate, "WasmLazyCompile"); compiler::WasmCompilationUnit unit(isolate, &module_env, compiled_module->GetNativeModule(), body, CStrVector(func_name.c_str()), func_index, CEntryStub(isolate, 1).GetCode()); unit.ExecuteCompilation(); wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower); if (wasm::WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate); // If there is a pending error, something really went wrong. The module was // verified before starting execution with lazy compilation. // This might be OOM, but then we cannot continue execution anyway. // TODO(clemensh): According to the spec, we can actually skip validation at // module creation time, and return a function that always traps here. CHECK(!thrower.error()); // Now specialize the generated code for this instance. CodeSpecialization code_specialization; code_specialization.RelocateDirectCalls(compiled_module->GetNativeModule()); code_specialization.ApplyToWasmCode(wasm_code, SKIP_ICACHE_FLUSH); int64_t func_size = static_cast(func->code.end_offset() - func->code.offset()); int64_t compilation_time = compilation_timer.Elapsed().InMicroseconds(); auto counters = isolate->counters(); counters->wasm_lazily_compiled_functions()->Increment(); Assembler::FlushICache(wasm_code->instructions().start(), wasm_code->instructions().size()); counters->wasm_generated_code_size()->Increment( static_cast(wasm_code->instructions().size())); counters->wasm_reloc_size()->Increment( static_cast(wasm_code->reloc_info().size())); counters->wasm_lazy_compilation_throughput()->AddSample( compilation_time != 0 ? static_cast(func_size / compilation_time) : 0); return wasm_code; } namespace { int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator, int offset) { DCHECK(!iterator.done()); int byte_pos; do { byte_pos = iterator.source_position().ScriptOffset(); iterator.Advance(); } while (!iterator.done() && iterator.code_offset() <= offset); return byte_pos; } const wasm::WasmCode* LazyCompileFromJsToWasm( Isolate* isolate, Handle instance, Handle js_to_wasm_caller, uint32_t callee_func_index) { Decoder decoder(nullptr, nullptr); Handle compiled_module(instance->compiled_module(), isolate); TRACE_LAZY( "Starting lazy compilation (func %u, js_to_wasm: true, patch caller: " "true). \n", callee_func_index); LazyCompileFunction(isolate, compiled_module, callee_func_index); { DisallowHeapAllocation no_gc; CodeSpaceMemoryModificationScope modification_scope(isolate->heap()); RelocIterator it(*js_to_wasm_caller, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); DCHECK(!it.done()); const wasm::WasmCode* callee_compiled = compiled_module->GetNativeModule()->GetCode(callee_func_index); DCHECK_NOT_NULL(callee_compiled); DCHECK_EQ(WasmCode::kLazyStub, isolate->wasm_engine() ->code_manager() ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) ->kind()); it.rinfo()->set_js_to_wasm_address(callee_compiled->instructions().start()); TRACE_LAZY("Patched 1 location in js-to-wasm %p.\n", *js_to_wasm_caller); #ifdef DEBUG it.next(); DCHECK(it.done()); #endif } wasm::WasmCode* ret = compiled_module->GetNativeModule()->GetCode(callee_func_index); DCHECK_NOT_NULL(ret); DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind()); return ret; } const wasm::WasmCode* LazyCompileIndirectCall( Isolate* isolate, Handle instance, uint32_t func_index) { TRACE_LAZY( "Starting lazy compilation (func %u, js_to_wasm: false, patch caller: " "false). \n", func_index); Handle compiled_module(instance->compiled_module(), isolate); return LazyCompileFunction(isolate, compiled_module, func_index); } const wasm::WasmCode* LazyCompileDirectCall(Isolate* isolate, Handle instance, const wasm::WasmCode* wasm_caller, int32_t caller_ret_offset) { DCHECK_LE(0, caller_ret_offset); Decoder decoder(nullptr, nullptr); // Gather all the targets of direct calls inside the code of {wasm_caller} // and place their function indexes in {direct_callees}. std::vector direct_callees; // The last one before {caller_ret_offset} must be the call that triggered // this lazy compilation. int callee_pos = -1; uint32_t num_non_compiled_callees = 0; // For stats. { DisallowHeapAllocation no_gc; Handle caller_module( wasm_caller->native_module()->compiled_module(), isolate); SeqOneByteString* module_bytes = caller_module->shared()->module_bytes(); uint32_t caller_func_index = wasm_caller->index(); SourcePositionTableIterator source_pos_iterator( wasm_caller->source_positions()); const byte* func_bytes = module_bytes->GetChars() + caller_module->shared() ->module() ->functions[caller_func_index] .code.offset(); for (RelocIterator it(wasm_caller->instructions(), wasm_caller->reloc_info(), wasm_caller->constant_pool(), RelocInfo::ModeMask(RelocInfo::WASM_CALL)); !it.done(); it.next()) { // TODO(clemensh): Introduce safe_cast which (D)CHECKS // (depending on the bool) against limits of T and then static_casts. size_t offset_l = it.rinfo()->pc() - wasm_caller->instructions().start(); DCHECK_GE(kMaxInt, offset_l); int offset = static_cast(offset_l); int byte_pos = AdvanceSourcePositionTableIterator(source_pos_iterator, offset); WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode( it.rinfo()->target_address()); if (callee->kind() == WasmCode::kLazyStub) { // The callee has not been compiled. ++num_non_compiled_callees; int32_t callee_func_index = ExtractDirectCallIndex(decoder, func_bytes + byte_pos); DCHECK_LT(callee_func_index, caller_module->GetNativeModule()->FunctionCount()); // {caller_ret_offset} points to one instruction after the call. // Remember the last called function before that offset. if (offset < caller_ret_offset) { callee_pos = static_cast(direct_callees.size()); } direct_callees.push_back(callee_func_index); } else { // If the callee is not the lazy compile stub, assume this callee // has already been compiled. direct_callees.push_back(-1); continue; } } TRACE_LAZY("Found %d non-compiled callees in function=%p.\n", num_non_compiled_callees, wasm_caller); USE(num_non_compiled_callees); } CHECK_LE(0, callee_pos); // TODO(wasm): compile all functions in non_compiled_callees in // background, wait for direct_callees[callee_pos]. auto callee_func_index = direct_callees[callee_pos]; TRACE_LAZY( "Starting lazy compilation (function=%p retaddr=+%d direct_callees[%d] " "-> %d).\n", wasm_caller, caller_ret_offset, callee_pos, callee_func_index); Handle compiled_module(instance->compiled_module(), isolate); const WasmCode* ret = LazyCompileFunction(isolate, compiled_module, callee_func_index); DCHECK_NOT_NULL(ret); int patched = 0; { DisallowHeapAllocation no_gc; // Now patch the code in {wasm_caller} with all functions which are now // compiled. This will pick up any other compiled functions, not only {ret}. size_t pos = 0; for (RelocIterator it(wasm_caller->instructions(), wasm_caller->reloc_info(), wasm_caller->constant_pool(), RelocInfo::ModeMask(RelocInfo::WASM_CALL)); !it.done(); it.next(), ++pos) { auto callee_index = direct_callees[pos]; if (callee_index < 0) continue; // callee already compiled. const WasmCode* callee_compiled = compiled_module->GetNativeModule()->GetCode(callee_index); if (callee_compiled->kind() != WasmCode::kFunction) continue; DCHECK_EQ(WasmCode::kLazyStub, isolate->wasm_engine() ->code_manager() ->GetCodeFromStartAddress(it.rinfo()->wasm_call_address()) ->kind()); it.rinfo()->set_wasm_call_address( callee_compiled->instructions().start()); ++patched; } DCHECK_EQ(direct_callees.size(), pos); } DCHECK_LT(0, patched); TRACE_LAZY("Patched %d calls(s) in %p.\n", patched, wasm_caller); USE(patched); return ret; } } // namespace Address CompileLazy(Isolate* isolate, Handle target_instance) { HistogramTimerScope lazy_time_scope( isolate->counters()->wasm_lazy_compilation_time()); //========================================================================== // Begin stack walk. //========================================================================== StackFrameIterator it(isolate); //========================================================================== // First frame: C entry stub. //========================================================================== DCHECK(!it.done()); DCHECK_EQ(StackFrame::EXIT, it.frame()->type()); it.Advance(); //========================================================================== // Second frame: WasmCompileLazy builtin. //========================================================================== DCHECK(!it.done()); int target_func_index = -1; bool indirectly_called = false; const wasm::WasmCode* lazy_stub = isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc()); CHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub->kind()); if (!lazy_stub->IsAnonymous()) { // If the lazy stub is not "anonymous", then its copy encodes the target // function index. Used for import and indirect calls. target_func_index = lazy_stub->index(); indirectly_called = true; } it.Advance(); //========================================================================== // Third frame: The calling wasm code (direct or indirect), or js-to-wasm // wrapper. //========================================================================== DCHECK(!it.done()); DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled()); Handle js_to_wasm_caller_code; const WasmCode* wasm_caller_code = nullptr; int32_t caller_ret_offset = -1; if (it.frame()->is_js_to_wasm()) { js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate); // This wasn't actually an indirect call, but a JS->wasm call. indirectly_called = false; } else { wasm_caller_code = isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc()); auto offset = it.frame()->pc() - wasm_caller_code->instructions().start(); caller_ret_offset = static_cast(offset); DCHECK_EQ(offset, caller_ret_offset); } //========================================================================== // Begin compilation. //========================================================================== Handle compiled_module( target_instance->compiled_module()); NativeModule* native_module = compiled_module->GetNativeModule(); DCHECK(!native_module->lazy_compile_frozen()); NativeModuleModificationScope native_module_modification_scope(native_module); const wasm::WasmCode* result = nullptr; if (!js_to_wasm_caller_code.is_null()) { result = LazyCompileFromJsToWasm(isolate, target_instance, js_to_wasm_caller_code, target_func_index); DCHECK_NOT_NULL(result); DCHECK_EQ(target_func_index, result->index()); } else { DCHECK_NOT_NULL(wasm_caller_code); if (target_func_index < 0) { result = LazyCompileDirectCall(isolate, target_instance, wasm_caller_code, caller_ret_offset); DCHECK_NOT_NULL(result); } else { result = LazyCompileIndirectCall(isolate, target_instance, target_func_index); DCHECK_NOT_NULL(result); } } //========================================================================== // Update import and indirect function tables in the caller. //========================================================================== if (indirectly_called) { DCHECK_NOT_NULL(wasm_caller_code); Handle caller_instance( WasmInstanceObject::GetOwningInstance(wasm_caller_code), isolate); if (!caller_instance->has_managed_indirect_patcher()) { auto patcher = Managed::Allocate(isolate); caller_instance->set_managed_indirect_patcher(*patcher); } IndirectPatcher* patcher = Managed::cast( caller_instance->managed_indirect_patcher()) ->get(); Address old_target = lazy_stub->instructions().start(); patcher->Patch(*caller_instance, *target_instance, target_func_index, old_target, result); } return result->instructions().start(); } namespace { bool compile_lazy(const WasmModule* module) { return FLAG_wasm_lazy_compilation || (FLAG_asm_wasm_lazy_compilation && module->is_asm_js()); } void FlushICache(const wasm::NativeModule* native_module) { for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) { const wasm::WasmCode* code = native_module->GetCode(i); if (code == nullptr) continue; Assembler::FlushICache(code->instructions().start(), code->instructions().size()); } } void FlushICache(Handle functions) { for (int i = 0, e = functions->length(); i < e; ++i) { if (!functions->get(i)->IsCode()) continue; Code* code = Code::cast(functions->get(i)); Assembler::FlushICache(code->raw_instruction_start(), code->raw_instruction_size()); } } byte* raw_buffer_ptr(MaybeHandle buffer, int offset) { return static_cast(buffer.ToHandleChecked()->backing_store()) + offset; } void RecordStats(const Code* code, Counters* counters) { counters->wasm_generated_code_size()->Increment(code->body_size()); counters->wasm_reloc_size()->Increment(code->relocation_info()->length()); } void RecordStats(const wasm::WasmCode* code, Counters* counters) { counters->wasm_generated_code_size()->Increment( static_cast(code->instructions().size())); counters->wasm_reloc_size()->Increment( static_cast(code->reloc_info().size())); } void RecordStats(Handle functions, Counters* counters) { DisallowHeapAllocation no_gc; for (int i = 0; i < functions->length(); ++i) { Object* val = functions->get(i); if (val->IsCode()) RecordStats(Code::cast(val), counters); } } void RecordStats(const wasm::NativeModule* native_module, Counters* counters) { for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) { const wasm::WasmCode* code = native_module->GetCode(i); if (code != nullptr) RecordStats(code, counters); } } bool in_bounds(uint32_t offset, size_t size, size_t upper) { return offset + size <= upper && offset + size >= offset; } using WasmInstanceMap = IdentityMap, FreeStoreAllocationPolicy>; double MonotonicallyIncreasingTimeInMs() { return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * base::Time::kMillisecondsPerSecond; } std::unique_ptr CreateDefaultModuleEnv( Isolate* isolate, WasmModule* module) { // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)? bool use_trap_handler = trap_handler::IsTrapHandlerEnabled(); return base::make_unique(module, use_trap_handler); } Handle NewCompiledModule(Isolate* isolate, WasmModule* module, Handle export_wrappers, compiler::ModuleEnv* env) { Handle compiled_module = WasmCompiledModule::New( isolate, module, export_wrappers, env->use_trap_handler); return compiled_module; } size_t GetMaxUsableMemorySize(Isolate* isolate) { return isolate->heap()->memory_allocator()->code_range()->valid() ? isolate->heap()->memory_allocator()->code_range()->size() : isolate->heap()->code_space()->Capacity(); } // The CompilationUnitBuilder builds compilation units and stores them in an // internal buffer. The buffer is moved into the working queue of the // CompilationState when {Commit} is called. class CompilationUnitBuilder { public: explicit CompilationUnitBuilder(NativeModule* native_module, compiler::ModuleEnv* module_env, Handle centry_stub) : native_module_(native_module), compilation_state_(native_module->compilation_state()), module_env_(module_env), centry_stub_(centry_stub) {} void AddUnit(const WasmFunction* function, uint32_t buffer_offset, Vector bytes, WasmName name) { units_.emplace_back(new compiler::WasmCompilationUnit( compilation_state_->isolate(), module_env_, native_module_, wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(), bytes.end()}, name, function->func_index, centry_stub_, compiler::WasmCompilationUnit::GetDefaultCompilationMode(), compilation_state_->isolate()->async_counters().get())); } bool Commit() { if (units_.empty()) return false; compilation_state_->AddCompilationUnits(units_); units_.clear(); return true; } void Clear() { units_.clear(); } private: NativeModule* native_module_; CompilationState* compilation_state_; compiler::ModuleEnv* module_env_; Handle centry_stub_; std::vector> units_; }; // Run by each compilation task and by the main thread (i.e. in both // foreground and background threads). The no_finisher_callback is called // within the result_mutex_ lock when no finishing task is running, i.e. when // the finisher_is_running_ flag is not set. bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) { DisallowHeapAllocation no_allocation; DisallowHandleAllocation no_handles; DisallowHandleDereference no_deref; DisallowCodeDependencyChange no_dependency_change; std::unique_ptr unit = compilation_state->GetNextCompilationUnit(); if (unit == nullptr) return false; unit->ExecuteCompilation(); compilation_state->ScheduleUnitForFinishing(unit); return true; } size_t GetNumFunctionsToCompile(const std::vector& functions, compiler::ModuleEnv* module_env) { // TODO(kimanh): Remove, FLAG_skip_compiling_wasm_funcs: previously used for // debugging, and now not necessarily working anymore. uint32_t start = module_env->module->num_imported_functions + FLAG_skip_compiling_wasm_funcs; uint32_t num_funcs = static_cast(functions.size()); uint32_t funcs_to_compile = start > num_funcs ? 0 : num_funcs - start; return funcs_to_compile; } void InitializeCompilationUnits(const std::vector& functions, const ModuleWireBytes& wire_bytes, compiler::ModuleEnv* module_env, Handle centry_stub, NativeModule* native_module) { uint32_t start = module_env->module->num_imported_functions + FLAG_skip_compiling_wasm_funcs; uint32_t num_funcs = static_cast(functions.size()); CompilationUnitBuilder builder(native_module, module_env, centry_stub); for (uint32_t i = start; i < num_funcs; ++i) { const WasmFunction* func = &functions[i]; uint32_t buffer_offset = func->code.offset(); Vector bytes(wire_bytes.start() + func->code.offset(), func->code.end_offset() - func->code.offset()); WasmName name = wire_bytes.GetName(func, module_env->module); DCHECK_NOT_NULL(native_module); builder.AddUnit(func, buffer_offset, bytes, name); } builder.Commit(); } void FinishCompilationUnits(CompilationState* compilation_state, ErrorThrower* thrower) { while (true) { if (compilation_state->failed()) break; std::unique_ptr unit = compilation_state->GetNextExecutedUnit(); if (unit == nullptr) break; wasm::WasmCode* result = unit->FinishCompilation(thrower); // Update the compilation state. compilation_state->OnFinishedUnit(NotifyCompilationCallback::kNoNotify); DCHECK_IMPLIES(result == nullptr, thrower->error()); if (result == nullptr) break; } if (!compilation_state->failed()) { compilation_state->RestartBackgroundTasks(); } } void CompileInParallel(Isolate* isolate, NativeModule* native_module, const ModuleWireBytes& wire_bytes, compiler::ModuleEnv* module_env, Handle centry_stub, ErrorThrower* thrower) { const WasmModule* module = module_env->module; // Data structures for the parallel compilation. //----------------------------------------------------------------------- // For parallel compilation: // 1) The main thread allocates a compilation unit for each wasm function // and stores them in the vector {compilation_units} within the // {compilation_state}. By adding units to the {compilation_state}, new // {BackgroundCompileTasks} instances are spawned which run on // the background threads. // 2.a) The background threads and the main thread pick one compilation // unit at a time and execute the parallel phase of the compilation // unit. After finishing the execution of the parallel phase, the // result is enqueued in {executed_units}. // 2.b) If {executed_units} contains a compilation unit, the main thread // dequeues it and finishes the compilation. // 3) After the parallel phase of all compilation units has started, the // main thread waits for all {BackgroundCompileTasks} instances to finish. // 4) The main thread finishes the compilation. // Turn on the {CanonicalHandleScope} so that the background threads can // use the node cache. CanonicalHandleScope canonical(isolate); CompilationState* compilation_state = native_module->compilation_state(); // Make sure that no foreground task is spawned for finishing // the compilation units. This foreground thread will be // responsible for finishing compilation. compilation_state->SetFinisherIsRunning(true); size_t functions_count = GetNumFunctionsToCompile(module->functions, module_env); compilation_state->SetNumberOfFunctionsToCompile(functions_count); // 1) The main thread allocates a compilation unit for each wasm function // and stores them in the vector {compilation_units} within the // {compilation_state}. By adding units to the {compilation_state}, new // {BackgroundCompileTask} instances are spawned which run on // background threads. InitializeCompilationUnits(module->functions, wire_bytes, module_env, centry_stub, native_module); // 2.a) The background threads and the main thread pick one compilation // unit at a time and execute the parallel phase of the compilation // unit. After finishing the execution of the parallel phase, the // result is enqueued in {executed_units}. // The foreground task bypasses waiting on memory threshold, because // its results will immediately be converted to code (below). while (FetchAndExecuteCompilationUnit(compilation_state)) { // 2.b) If {executed_units} contains a compilation unit, the main thread // dequeues it and finishes the compilation unit. Compilation units // are finished concurrently to the background threads to save // memory. FinishCompilationUnits(compilation_state, thrower); if (compilation_state->failed()) break; } // 3) After the parallel phase of all compilation units has started, the // main thread waits for all {BackgroundCompileTasks} instances to finish - // which happens once they all realize there's no next work item to // process. If compilation already failed, all background tasks have // already been canceled in {FinishCompilationUnits}, and there are // no units to finish. if (!compilation_state->failed()) { compilation_state->CancelAndWait(); // 4) Finish all compilation units which have been executed while we waited. FinishCompilationUnits(compilation_state, thrower); } } void CompileSequentially(Isolate* isolate, NativeModule* native_module, const ModuleWireBytes& wire_bytes, compiler::ModuleEnv* module_env, ErrorThrower* thrower) { DCHECK(!thrower->error()); const WasmModule* module = module_env->module; for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < module->functions.size(); ++i) { const WasmFunction& func = module->functions[i]; if (func.imported) continue; // Imports are compiled at instantiation time. // Compile the function. wasm::WasmCode* code = compiler::WasmCompilationUnit::CompileWasmFunction( native_module, thrower, isolate, wire_bytes, module_env, &func); if (code == nullptr) { TruncatedUserString<> name(wire_bytes.GetName(&func, module)); thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(), name.start()); break; } } } void ValidateSequentially(Isolate* isolate, const ModuleWireBytes& wire_bytes, compiler::ModuleEnv* module_env, ErrorThrower* thrower) { DCHECK(!thrower->error()); const WasmModule* module = module_env->module; for (uint32_t i = 0; i < module->functions.size(); ++i) { const WasmFunction& func = module->functions[i]; if (func.imported) continue; const byte* base = wire_bytes.start(); FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(), base + func.code.end_offset()}; DecodeResult result = VerifyWasmCodeWithStats( isolate->allocator(), module, body, module->is_wasm(), isolate->async_counters().get()); if (result.failed()) { TruncatedUserString<> name(wire_bytes.GetName(&func, module)); thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i, name.length(), name.start(), result.error_msg().c_str(), result.error_offset()); break; } } } MaybeHandle CompileToModuleObjectInternal( Isolate* isolate, ErrorThrower* thrower, std::unique_ptr module, const ModuleWireBytes& wire_bytes, Handle