summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-03-01 08:58:05 -0800
committerAli Sheikh <ofrobots@lemonhope.roam.corp.google.com>2016-03-03 20:35:20 -0800
commit069e02ab47656b3efd1b6829c65856b2e1c2d1db (patch)
treeeb643e0a2e88fd64bb9fc927423458d2ae96c2db /deps/v8/src/wasm
parent8938355398c79f583a468284b768652d12ba9bc9 (diff)
downloadnode-new-069e02ab47656b3efd1b6829c65856b2e1c2d1db.tar.gz
deps: upgrade to V8 4.9.385.18
Pick up the current branch head for V8 4.9 https://github.com/v8/v8/commit/1ecba0f PR-URL: https://github.com/nodejs/node/pull/4722 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Michaƫl Zasso <mic.besace@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/OWNERS5
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.cc1045
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.h33
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc1583
-rw-r--r--deps/v8/src/wasm/ast-decoder.h116
-rw-r--r--deps/v8/src/wasm/decoder.h233
-rw-r--r--deps/v8/src/wasm/encoder.cc592
-rw-r--r--deps/v8/src/wasm/encoder.h157
-rw-r--r--deps/v8/src/wasm/module-decoder.cc547
-rw-r--r--deps/v8/src/wasm/module-decoder.h33
-rw-r--r--deps/v8/src/wasm/wasm-js.cc345
-rw-r--r--deps/v8/src/wasm/wasm-js.h27
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h265
-rw-r--r--deps/v8/src/wasm/wasm-module.cc511
-rw-r--r--deps/v8/src/wasm/wasm-module.h192
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc133
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h476
-rw-r--r--deps/v8/src/wasm/wasm-result.cc53
-rw-r--r--deps/v8/src/wasm/wasm-result.h116
19 files changed, 6462 insertions, 0 deletions
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
new file mode 100644
index 0000000000..a9d24ade28
--- /dev/null
+++ b/deps/v8/src/wasm/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/deps/v8/src/wasm/asm-wasm-builder.cc b/deps/v8/src/wasm/asm-wasm-builder.cc
new file mode 100644
index 0000000000..30f84642f8
--- /dev/null
+++ b/deps/v8/src/wasm/asm-wasm-builder.cc
@@ -0,0 +1,1045 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/codegen.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define RECURSE(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+class AsmWasmBuilderImpl : public AstVisitor {
+ public:
+ AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal)
+ : local_variables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ functions_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ global_variables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ in_function_(false),
+ is_set_op_(false),
+ marking_exported(false),
+ builder_(new (zone) WasmModuleBuilder(zone)),
+ current_function_builder_(nullptr),
+ literal_(literal),
+ isolate_(isolate),
+ zone_(zone),
+ cache_(TypeCache::Get()),
+ breakable_blocks_(zone),
+ block_size_(0),
+ init_function_index(0) {
+ InitializeAstVisitor(isolate);
+ }
+
+ void InitializeInitFunction() {
+ unsigned char init[] = "__init__";
+ init_function_index = builder_->AddFunction();
+ current_function_builder_ = builder_->FunctionAt(init_function_index);
+ current_function_builder_->SetName(init, 8);
+ current_function_builder_->ReturnType(kAstStmt);
+ current_function_builder_->Exported(1);
+ current_function_builder_ = nullptr;
+ }
+
+ void Compile() {
+ InitializeInitFunction();
+ RECURSE(VisitFunctionLiteral(literal_));
+ }
+
+ void VisitVariableDeclaration(VariableDeclaration* decl) {}
+
+ void VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ DCHECK(!in_function_);
+ DCHECK(current_function_builder_ == nullptr);
+ uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
+ current_function_builder_ = builder_->FunctionAt(index);
+ in_function_ = true;
+ RECURSE(Visit(decl->fun()));
+ in_function_ = false;
+ current_function_builder_ = nullptr;
+ local_variables_.Clear();
+ }
+
+ void VisitImportDeclaration(ImportDeclaration* decl) {}
+
+ void VisitExportDeclaration(ExportDeclaration* decl) {}
+
+ void VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
+ }
+ }
+
+ void VisitBlock(Block* stmt) {
+ if (stmt->statements()->length() == 1) {
+ ExpressionStatement* expr =
+ stmt->statements()->at(0)->AsExpressionStatement();
+ if (expr != nullptr) {
+ if (expr->expression()->IsAssignment()) {
+ RECURSE(VisitExpressionStatement(expr));
+ return;
+ }
+ }
+ }
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
+ static_cast<byte>(stmt->statements()->length()));
+ RECURSE(VisitStatements(stmt->statements()));
+ DCHECK(block_size_ >= 0);
+ }
+
+ class BlockVisitor {
+ private:
+ int prev_block_size_;
+ uint32_t index_;
+ AsmWasmBuilderImpl* builder_;
+
+ public:
+ BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
+ WasmOpcode opcode, bool is_loop, int initial_block_size)
+ : builder_(builder) {
+ builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
+ builder_->current_function_builder_->Emit(opcode);
+ index_ = builder_->current_function_builder_->EmitEditableImmediate(0);
+ prev_block_size_ = builder_->block_size_;
+ builder_->block_size_ = initial_block_size;
+ }
+ ~BlockVisitor() {
+ builder_->current_function_builder_->EditImmediate(index_,
+ builder_->block_size_);
+ builder_->block_size_ = prev_block_size_;
+ builder_->breakable_blocks_.pop_back();
+ }
+ };
+
+ void VisitExpressionStatement(ExpressionStatement* stmt) {
+ RECURSE(Visit(stmt->expression()));
+ }
+
+ void VisitEmptyStatement(EmptyStatement* stmt) {}
+
+ void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
+
+ void VisitIfStatement(IfStatement* stmt) {
+ DCHECK(in_function_);
+ if (stmt->HasElseStatement()) {
+ current_function_builder_->Emit(kExprIfElse);
+ } else {
+ current_function_builder_->Emit(kExprIf);
+ }
+ RECURSE(Visit(stmt->condition()));
+ if (stmt->HasThenStatement()) {
+ RECURSE(Visit(stmt->then_statement()));
+ } else {
+ current_function_builder_->Emit(kExprNop);
+ }
+ if (stmt->HasElseStatement()) {
+ RECURSE(Visit(stmt->else_statement()));
+ }
+ }
+
+ void VisitContinueStatement(ContinueStatement* stmt) {
+ DCHECK(in_function_);
+ DCHECK(stmt->target() != NULL);
+ int i = static_cast<int>(breakable_blocks_.size()) - 1;
+ int block_distance = 0;
+ for (; i >= 0; i--) {
+ auto elem = breakable_blocks_.at(i);
+ if (elem.first == stmt->target()) {
+ DCHECK(elem.second);
+ break;
+ } else if (elem.second) {
+ block_distance += 2;
+ } else {
+ block_distance += 1;
+ }
+ }
+ DCHECK(i >= 0);
+ current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitBreakStatement(BreakStatement* stmt) {
+ DCHECK(in_function_);
+ DCHECK(stmt->target() != NULL);
+ int i = static_cast<int>(breakable_blocks_.size()) - 1;
+ int block_distance = 0;
+ for (; i >= 0; i--) {
+ auto elem = breakable_blocks_.at(i);
+ if (elem.first == stmt->target()) {
+ if (elem.second) {
+ block_distance++;
+ }
+ break;
+ } else if (elem.second) {
+ block_distance += 2;
+ } else {
+ block_distance += 1;
+ }
+ }
+ DCHECK(i >= 0);
+ current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitReturnStatement(ReturnStatement* stmt) {
+ if (in_function_) {
+ current_function_builder_->Emit(kExprReturn);
+ } else {
+ marking_exported = true;
+ }
+ RECURSE(Visit(stmt->expression()));
+ if (!in_function_) {
+ marking_exported = false;
+ }
+ }
+
+ void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
+
+ void SetLocalTo(uint16_t index, int value) {
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index, true);
+ byte code[] = {WASM_I32(value)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ block_size_++;
+ }
+
+ void CompileCase(CaseClause* clause, uint16_t fall_through,
+ VariableProxy* tag) {
+ Literal* label = clause->label()->AsLiteral();
+ DCHECK(label != nullptr);
+ block_size_++;
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->Emit(kExprI32Ior);
+ current_function_builder_->Emit(kExprI32Eq);
+ VisitVariableProxy(tag);
+ VisitLiteral(label);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(fall_through, true);
+ BlockVisitor visitor(this, nullptr, kExprBlock, false, 0);
+ SetLocalTo(fall_through, 1);
+ ZoneList<Statement*>* stmts = clause->statements();
+ block_size_ += stmts->length();
+ RECURSE(VisitStatements(stmts));
+ }
+
+ void VisitSwitchStatement(SwitchStatement* stmt) {
+ VariableProxy* tag = stmt->tag()->AsVariableProxy();
+ DCHECK(tag != NULL);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
+ 0);
+ uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
+ SetLocalTo(fall_through, 0);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default()) {
+ CompileCase(clause, fall_through, tag);
+ } else {
+ ZoneList<Statement*>* stmts = clause->statements();
+ block_size_ += stmts->length();
+ RECURSE(VisitStatements(stmts));
+ }
+ }
+ }
+
+ void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
+
+ void VisitDoWhileStatement(DoWhileStatement* stmt) {
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 2);
+ RECURSE(Visit(stmt->body()));
+ current_function_builder_->Emit(kExprIf);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitWhileStatement(WhileStatement* stmt) {
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 1);
+ current_function_builder_->Emit(kExprIf);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ RECURSE(Visit(stmt->body()));
+ }
+
+ void VisitForStatement(ForStatement* stmt) {
+ DCHECK(in_function_);
+ if (stmt->init() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->init()));
+ }
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 0);
+ if (stmt->cond() != nullptr) {
+ block_size_++;
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->Emit(kExprBoolNot);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 1);
+ current_function_builder_->Emit(kExprNop);
+ }
+ if (stmt->body() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->body()));
+ }
+ if (stmt->next() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->next()));
+ }
+ block_size_++;
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
+
+ void VisitForOfStatement(ForOfStatement* stmt) { UNREACHABLE(); }
+
+ void VisitTryCatchStatement(TryCatchStatement* stmt) { UNREACHABLE(); }
+
+ void VisitTryFinallyStatement(TryFinallyStatement* stmt) { UNREACHABLE(); }
+
+ void VisitDebuggerStatement(DebuggerStatement* stmt) { UNREACHABLE(); }
+
+ void VisitFunctionLiteral(FunctionLiteral* expr) {
+ Scope* scope = expr->scope();
+ if (in_function_) {
+ if (expr->bounds().lower->IsFunction()) {
+ Type::FunctionType* func_type = expr->bounds().lower->AsFunction();
+ LocalType return_type = TypeFrom(func_type->Result());
+ current_function_builder_->ReturnType(return_type);
+ for (int i = 0; i < expr->parameter_count(); i++) {
+ LocalType type = TypeFrom(func_type->Parameter(i));
+ DCHECK(type != kAstStmt);
+ LookupOrInsertLocal(scope->parameter(i), type);
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+ RECURSE(VisitDeclarations(scope->declarations()));
+ RECURSE(VisitStatements(expr->body()));
+ }
+
+ void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitConditional(Conditional* expr) {
+ DCHECK(in_function_);
+ current_function_builder_->Emit(kExprIfElse);
+ RECURSE(Visit(expr->condition()));
+ RECURSE(Visit(expr->then_expression()));
+ RECURSE(Visit(expr->else_expression()));
+ }
+
+ void VisitVariableProxy(VariableProxy* expr) {
+ if (in_function_) {
+ Variable* var = expr->var();
+ if (var->is_function()) {
+ DCHECK(!is_set_op_);
+ std::vector<uint8_t> index =
+ UnsignedLEB128From(LookupOrInsertFunction(var));
+ current_function_builder_->EmitCode(
+ &index[0], static_cast<uint32_t>(index.size()));
+ } else {
+ if (is_set_op_) {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprStoreGlobal);
+ } else {
+ current_function_builder_->Emit(kExprSetLocal);
+ }
+ is_set_op_ = false;
+ } else {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprLoadGlobal);
+ } else {
+ current_function_builder_->Emit(kExprGetLocal);
+ }
+ }
+ LocalType var_type = TypeOf(expr);
+ DCHECK(var_type != kAstStmt);
+ if (var->IsContextSlot()) {
+ AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+ } else {
+ AddLeb128(LookupOrInsertLocal(var, var_type), true);
+ }
+ }
+ }
+ }
+
+ void VisitLiteral(Literal* expr) {
+ if (in_function_) {
+ if (expr->raw_value()->IsNumber()) {
+ LocalType type = TypeOf(expr);
+ switch (type) {
+ case kAstI32: {
+ int val = static_cast<int>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ case kAstF32: {
+ float val = static_cast<float>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_F32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ case kAstF64: {
+ double val = static_cast<double>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_F64(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ void VisitRegExpLiteral(RegExpLiteral* expr) { UNREACHABLE(); }
+
+ void VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ DCHECK(marking_exported);
+ VariableProxy* expr = prop->value()->AsVariableProxy();
+ DCHECK(expr != nullptr);
+ Variable* var = expr->var();
+ Literal* name = prop->key()->AsLiteral();
+ DCHECK(name != nullptr);
+ DCHECK(name->IsPropertyName());
+ const AstRawString* raw_name = name->AsRawPropertyName();
+ if (var->is_function()) {
+ uint16_t index = LookupOrInsertFunction(var);
+ builder_->FunctionAt(index)->Exported(1);
+ builder_->FunctionAt(index)
+ ->SetName(raw_name->raw_data(), raw_name->length());
+ }
+ }
+ }
+
+ void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
+
+ void LoadInitFunction() {
+ current_function_builder_ = builder_->FunctionAt(init_function_index);
+ in_function_ = true;
+ }
+
+ void UnLoadInitFunction() {
+ in_function_ = false;
+ current_function_builder_ = nullptr;
+ }
+
+ void VisitAssignment(Assignment* expr) {
+ bool in_init = false;
+ if (!in_function_) {
+ // TODO(bradnelson): Get rid of this.
+ if (TypeOf(expr->value()) == kAstStmt) {
+ return;
+ }
+ in_init = true;
+ LoadInitFunction();
+ }
+ BinaryOperation* value_op = expr->value()->AsBinaryOperation();
+ if (value_op != nullptr && MatchBinaryOperation(value_op) == kAsIs) {
+ VariableProxy* target_var = expr->target()->AsVariableProxy();
+ VariableProxy* effective_value_var = GetLeft(value_op)->AsVariableProxy();
+ if (target_var != nullptr && effective_value_var != nullptr &&
+ target_var->var() == effective_value_var->var()) {
+ block_size_--;
+ return;
+ }
+ }
+ is_set_op_ = true;
+ RECURSE(Visit(expr->target()));
+ DCHECK(!is_set_op_);
+ RECURSE(Visit(expr->value()));
+ if (in_init) {
+ UnLoadInitFunction();
+ }
+ }
+
+ void VisitYield(Yield* expr) { UNREACHABLE(); }
+
+ void VisitThrow(Throw* expr) { UNREACHABLE(); }
+
+ void VisitProperty(Property* expr) {
+ Expression* obj = expr->obj();
+ DCHECK(obj->bounds().lower == obj->bounds().upper);
+ TypeImpl<ZoneTypeConfig>* type = obj->bounds().lower;
+ MachineType mtype;
+ int size;
+ if (type->Is(cache_.kUint8Array)) {
+ mtype = MachineType::Uint8();
+ size = 1;
+ } else if (type->Is(cache_.kInt8Array)) {
+ mtype = MachineType::Int8();
+ size = 1;
+ } else if (type->Is(cache_.kUint16Array)) {
+ mtype = MachineType::Uint16();
+ size = 2;
+ } else if (type->Is(cache_.kInt16Array)) {
+ mtype = MachineType::Int16();
+ size = 2;
+ } else if (type->Is(cache_.kUint32Array)) {
+ mtype = MachineType::Uint32();
+ size = 4;
+ } else if (type->Is(cache_.kInt32Array)) {
+ mtype = MachineType::Int32();
+ size = 4;
+ } else if (type->Is(cache_.kUint32Array)) {
+ mtype = MachineType::Uint32();
+ size = 4;
+ } else if (type->Is(cache_.kFloat32Array)) {
+ mtype = MachineType::Float32();
+ size = 4;
+ } else if (type->Is(cache_.kFloat64Array)) {
+ mtype = MachineType::Float64();
+ size = 8;
+ } else {
+ UNREACHABLE();
+ }
+ current_function_builder_->EmitWithU8(
+ WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
+ WasmOpcodes::LoadStoreAccessOf(false));
+ is_set_op_ = false;
+ Literal* value = expr->key()->AsLiteral();
+ if (value) {
+ DCHECK(value->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(value));
+ int val = static_cast<int>(value->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val * size)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return;
+ }
+ BinaryOperation* binop = expr->key()->AsBinaryOperation();
+ if (binop) {
+ DCHECK(Token::SAR == binop->op());
+ DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+ DCHECK(size ==
+ 1 << static_cast<int>(
+ binop->right()->AsLiteral()->raw_value()->AsNumber()));
+ // Mask bottom bits to match asm.js behavior.
+ current_function_builder_->Emit(kExprI32And);
+ byte code[] = {WASM_I8(~(size - 1))};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ RECURSE(Visit(binop->left()));
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void VisitCall(Call* expr) {
+ Call::CallType call_type = expr->GetCallType(isolate_);
+ switch (call_type) {
+ case Call::OTHER_CALL: {
+ DCHECK(in_function_);
+ current_function_builder_->Emit(kExprCallFunction);
+ RECURSE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
+
+ void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
+
+ void VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::NOT: {
+ DCHECK(TypeOf(expr->expression()) == kAstI32);
+ current_function_builder_->Emit(kExprBoolNot);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->expression()));
+ }
+
+ void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
+
+ bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
+ int32_t val) {
+ DCHECK(expr->right() != nullptr);
+ if (expr->op() == op && expr->right()->IsLiteral() &&
+ TypeOf(expr) == kAstI32) {
+ Literal* right = expr->right()->AsLiteral();
+ DCHECK(right->raw_value()->IsNumber());
+ if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool MatchDoubleBinaryOperation(BinaryOperation* expr, Token::Value op,
+ double val) {
+ DCHECK(expr->right() != nullptr);
+ if (expr->op() == op && expr->right()->IsLiteral() &&
+ TypeOf(expr) == kAstF64) {
+ Literal* right = expr->right()->AsLiteral();
+ DCHECK(right->raw_value()->IsNumber());
+ if (right->raw_value()->AsNumber() == val) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ enum ConvertOperation { kNone, kAsIs, kToInt, kToDouble };
+
+ ConvertOperation MatchOr(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0)) {
+ return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchShr(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
+ // TODO(titzer): this probably needs to be kToUint
+ return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchXor(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
+ DCHECK(TypeOf(expr->left()) == kAstI32);
+ DCHECK(TypeOf(expr->right()) == kAstI32);
+ BinaryOperation* op = expr->left()->AsBinaryOperation();
+ if (op != nullptr) {
+ if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
+ DCHECK(TypeOf(op->right()) == kAstI32);
+ if (TypeOf(op->left()) != kAstI32) {
+ return kToInt;
+ } else {
+ return kAsIs;
+ }
+ }
+ }
+ }
+ return kNone;
+ }
+
+ ConvertOperation MatchMul(BinaryOperation* expr) {
+ if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
+ DCHECK(TypeOf(expr->right()) == kAstF64);
+ if (TypeOf(expr->left()) != kAstF64) {
+ return kToDouble;
+ } else {
+ return kAsIs;
+ }
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchBinaryOperation(BinaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::BIT_OR:
+ return MatchOr(expr);
+ case Token::SHR:
+ return MatchShr(expr);
+ case Token::BIT_XOR:
+ return MatchXor(expr);
+ case Token::MUL:
+ return MatchMul(expr);
+ default:
+ return kNone;
+ }
+ }
+
+// Work around Mul + Div being defined in PPC assembler.
+#ifdef Mul
+#undef Mul
+#endif
+#ifdef Div
+#undef Div
+#endif
+
+#define NON_SIGNED_BINOP(op) \
+ static WasmOpcode opcodes[] = { \
+ kExprI32##op, \
+ kExprI32##op, \
+ kExprF32##op, \
+ kExprF64##op \
+ }
+
+#define SIGNED_BINOP(op) \
+ static WasmOpcode opcodes[] = { \
+ kExprI32##op##S, \
+ kExprI32##op##U, \
+ kExprF32##op, \
+ kExprF64##op \
+ }
+
+#define NON_SIGNED_INT_BINOP(op) \
+ static WasmOpcode opcodes[] = { kExprI32##op, kExprI32##op }
+
+#define BINOP_CASE(token, op, V, ignore_sign) \
+ case token: { \
+ V(op); \
+ int type = TypeIndexOf(expr->left(), expr->right(), ignore_sign); \
+ current_function_builder_->Emit(opcodes[type]); \
+ break; \
+ }
+
+ Expression* GetLeft(BinaryOperation* expr) {
+ if (expr->op() == Token::BIT_XOR) {
+ return expr->left()->AsBinaryOperation()->left();
+ } else {
+ return expr->left();
+ }
+ }
+
+ void VisitBinaryOperation(BinaryOperation* expr) {
+ ConvertOperation convertOperation = MatchBinaryOperation(expr);
+ if (convertOperation == kToDouble) {
+ TypeIndex type = TypeIndexOf(expr->left());
+ if (type == kInt32 || type == kFixnum) {
+ current_function_builder_->Emit(kExprF64SConvertI32);
+ } else if (type == kUint32) {
+ current_function_builder_->Emit(kExprF64UConvertI32);
+ } else if (type == kFloat32) {
+ current_function_builder_->Emit(kExprF64ConvertF32);
+ } else {
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ } else if (convertOperation == kToInt) {
+ TypeIndex type = TypeIndexOf(GetLeft(expr));
+ if (type == kFloat32) {
+ current_function_builder_->Emit(kExprI32SConvertF32);
+ } else if (type == kFloat64) {
+ current_function_builder_->Emit(kExprI32SConvertF64);
+ } else {
+ UNREACHABLE();
+ }
+ RECURSE(Visit(GetLeft(expr)));
+ } else if (convertOperation == kAsIs) {
+ RECURSE(Visit(GetLeft(expr)));
+ } else {
+ switch (expr->op()) {
+ BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
+ BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SHR, ShrU, NON_SIGNED_INT_BINOP, true);
+ case Token::MOD: {
+ TypeIndex type = TypeIndexOf(expr->left(), expr->right(), false);
+ if (type == kInt32) {
+ current_function_builder_->Emit(kExprI32RemS);
+ } else if (type == kUint32) {
+ current_function_builder_->Emit(kExprI32RemU);
+ } else if (type == kFloat64) {
+ ModF64(expr);
+ return;
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ }
+ }
+
+ void ModF64(BinaryOperation* expr) {
+ current_function_builder_->EmitWithU8(kExprBlock, 3);
+ uint16_t index_0 = current_function_builder_->AddLocal(kAstF64);
+ uint16_t index_1 = current_function_builder_->AddLocal(kAstF64);
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index_0, true);
+ RECURSE(Visit(expr->left()));
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index_1, true);
+ RECURSE(Visit(expr->right()));
+ current_function_builder_->Emit(kExprF64Sub);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_0, true);
+ current_function_builder_->Emit(kExprF64Mul);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_1, true);
+ // Use trunc instead of two casts
+ current_function_builder_->Emit(kExprF64SConvertI32);
+ current_function_builder_->Emit(kExprI32SConvertF64);
+ current_function_builder_->Emit(kExprF64Div);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_0, true);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_1, true);
+ }
+
+ void AddLeb128(uint32_t index, bool is_local) {
+ std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
+ if (is_local) {
+ uint32_t pos_of_index[1] = {0};
+ current_function_builder_->EmitCode(
+ &index_vec[0], static_cast<uint32_t>(index_vec.size()), pos_of_index,
+ 1);
+ } else {
+ current_function_builder_->EmitCode(
+ &index_vec[0], static_cast<uint32_t>(index_vec.size()));
+ }
+ }
+
+ void VisitCompareOperation(CompareOperation* expr) {
+ switch (expr->op()) {
+ BINOP_CASE(Token::EQ, Eq, NON_SIGNED_BINOP, false);
+ BINOP_CASE(Token::LT, Lt, SIGNED_BINOP, false);
+ BINOP_CASE(Token::LTE, Le, SIGNED_BINOP, false);
+ BINOP_CASE(Token::GT, Gt, SIGNED_BINOP, false);
+ BINOP_CASE(Token::GTE, Ge, SIGNED_BINOP, false);
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ }
+
+#undef BINOP_CASE
+#undef NON_SIGNED_INT_BINOP
+#undef SIGNED_BINOP
+#undef NON_SIGNED_BINOP
+
+ enum TypeIndex {
+ kInt32 = 0,
+ kUint32 = 1,
+ kFloat32 = 2,
+ kFloat64 = 3,
+ kFixnum = 4
+ };
+
+ TypeIndex TypeIndexOf(Expression* left, Expression* right, bool ignore_sign) {
+ TypeIndex left_index = TypeIndexOf(left);
+ TypeIndex right_index = TypeIndexOf(right);
+ if (left_index == kFixnum) {
+ left_index = right_index;
+ }
+ if (right_index == kFixnum) {
+ right_index = left_index;
+ }
+ if (left_index == kFixnum && right_index == kFixnum) {
+ left_index = kInt32;
+ right_index = kInt32;
+ }
+ DCHECK((left_index == right_index) ||
+ (ignore_sign && (left_index <= 1) && (right_index <= 1)));
+ return left_index;
+ }
+
+ TypeIndex TypeIndexOf(Expression* expr) {
+ DCHECK(expr->bounds().lower == expr->bounds().upper);
+ TypeImpl<ZoneTypeConfig>* type = expr->bounds().lower;
+ if (type->Is(cache_.kAsmFixnum)) {
+ return kFixnum;
+ } else if (type->Is(cache_.kAsmSigned)) {
+ return kInt32;
+ } else if (type->Is(cache_.kAsmUnsigned)) {
+ return kUint32;
+ } else if (type->Is(cache_.kAsmInt)) {
+ return kInt32;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ return kFloat32;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ return kFloat64;
+ } else {
+ UNREACHABLE();
+ return kInt32;
+ }
+ }
+
+#undef CASE
+#undef NON_SIGNED_INT
+#undef SIGNED
+#undef NON_SIGNED
+
+ void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
+
+ void VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+ }
+
+ void VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
+
+ void VisitSpread(Spread* expr) { UNREACHABLE(); }
+
+ void VisitSuperPropertyReference(SuperPropertyReference* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitSuperCallReference(SuperCallReference* expr) { UNREACHABLE(); }
+
+ void VisitSloppyBlockFunctionStatement(SloppyBlockFunctionStatement* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
+
+ void VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ UNREACHABLE();
+ }
+
+ struct IndexContainer : public ZoneObject {
+ uint16_t index;
+ };
+
+ uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
+ DCHECK(current_function_builder_ != nullptr);
+ ZoneHashMap::Entry* entry =
+ local_variables_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index;
+ if (v->IsParameter()) {
+ index = current_function_builder_->AddParam(type);
+ } else {
+ index = current_function_builder_->AddLocal(type);
+ }
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ uint16_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+ ZoneHashMap::Entry* entry =
+ global_variables_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index =
+ builder_->AddGlobal(WasmOpcodes::MachineTypeFor(type), 0);
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = global_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ uint16_t LookupOrInsertFunction(Variable* v) {
+ DCHECK(builder_ != nullptr);
+ ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index = builder_->AddFunction();
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ LocalType TypeOf(Expression* expr) {
+ DCHECK(expr->bounds().lower == expr->bounds().upper);
+ return TypeFrom(expr->bounds().lower);
+ }
+
+ LocalType TypeFrom(TypeImpl<ZoneTypeConfig>* type) {
+ if (type->Is(cache_.kAsmInt)) {
+ return kAstI32;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ return kAstF32;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ return kAstF64;
+ } else {
+ return kAstStmt;
+ }
+ }
+
+ Zone* zone() { return zone_; }
+
+ ZoneHashMap local_variables_;
+ ZoneHashMap functions_;
+ ZoneHashMap global_variables_;
+ bool in_function_;
+ bool is_set_op_;
+ bool marking_exported;
+ WasmModuleBuilder* builder_;
+ WasmFunctionBuilder* current_function_builder_;
+ FunctionLiteral* literal_;
+ Isolate* isolate_;
+ Zone* zone_;
+ TypeCache const& cache_;
+ ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
+ int block_size_;
+ uint16_t init_function_index;
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
+};
+
+AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
+ FunctionLiteral* literal)
+ : isolate_(isolate), zone_(zone), literal_(literal) {}
+
+// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
+// that zone in constructor may be thrown away once wasm module is written.
+WasmModuleIndex* AsmWasmBuilder::Run() {
+ AsmWasmBuilderImpl impl(isolate_, zone_, literal_);
+ impl.Compile();
+ WasmModuleWriter* writer = impl.builder_->Build(zone_);
+ return writer->WriteTo(zone_);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/asm-wasm-builder.h b/deps/v8/src/wasm/asm-wasm-builder.h
new file mode 100644
index 0000000000..cb568db77c
--- /dev/null
+++ b/deps/v8/src/wasm/asm-wasm-builder.h
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_ASM_WASM_BUILDER_H_
+#define V8_WASM_ASM_WASM_BUILDER_H_
+
+#include "src/allocation.h"
+#include "src/wasm/encoder.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class FunctionLiteral;
+
+namespace wasm {
+
+class AsmWasmBuilder {
+ public:
+ explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ WasmModuleIndex* Run();
+
+ private:
+ Isolate* isolate_;
+ Zone* zone_;
+ FunctionLiteral* literal_;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_ASM_WASM_BUILDER_H_
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
new file mode 100644
index 0000000000..ffb815771a
--- /dev/null
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -0,0 +1,1583 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/signature.h"
+
+#include "src/flags.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+// The root of a decoded tree.
+struct Tree {
+ LocalType type; // tree type.
+ uint32_t count; // number of children.
+ const byte* pc; // start of the syntax tree.
+ TFNode* node; // node in the TurboFan graph.
+ Tree* children[1]; // pointers to children.
+
+ WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
+};
+
+
+// A production represents an incomplete decoded tree in the LR decoder.
+struct Production {
+ Tree* tree; // the root of the syntax tree.
+ int index; // the current index into the children of the tree.
+
+ WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc()); }
+ const byte* pc() const { return tree->pc; }
+ bool done() const { return index >= static_cast<int>(tree->count); }
+ Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
+};
+
+
+// An SsaEnv environment carries the current local variable renaming
+// as well as the current effect and control dependency in the TF graph.
+// It maintains a control state that tracks whether the environment
+// is reachable, has reached a control end, or has been merged.
+struct SsaEnv {
+ enum State { kControlEnd, kUnreachable, kReached, kMerged };
+
+ State state;
+ TFNode* control;
+ TFNode* effect;
+ TFNode** locals;
+
+ bool go() { return state >= kReached; }
+ void Kill(State new_state = kControlEnd) {
+ state = new_state;
+ locals = nullptr;
+ control = nullptr;
+ effect = nullptr;
+ }
+};
+
+
+// An entry in the stack of blocks during decoding.
+struct Block {
+ SsaEnv* ssa_env; // SSA renaming environment.
+ int stack_depth; // production stack depth.
+};
+
+
+// An entry in the stack of ifs during decoding.
+struct IfEnv {
+ SsaEnv* false_env;
+ SsaEnv* merge_env;
+ SsaEnv** case_envs;
+};
+
+
+// Macros that build nodes only if there is a graph and the current SSA
+// environment is reachable from start. This avoids problems with malformed
+// TF graphs when decoding inputs that have unreachable code.
+#define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
+#define BUILD0(func) (build() ? builder_->func() : nullptr)
+
+
+// A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
+// shift-reduce strategy with multiple internal stacks.
+class LR_WasmDecoder : public Decoder {
+ public:
+ LR_WasmDecoder(Zone* zone, TFBuilder* builder)
+ : Decoder(nullptr, nullptr),
+ zone_(zone),
+ builder_(builder),
+ trees_(zone),
+ stack_(zone),
+ blocks_(zone),
+ ifs_(zone) {}
+
+ TreeResult Decode(FunctionEnv* function_env, const byte* base, const byte* pc,
+ const byte* end) {
+ base::ElapsedTimer decode_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ decode_timer.Start();
+ }
+ trees_.clear();
+ stack_.clear();
+ blocks_.clear();
+ ifs_.clear();
+
+ if (end < pc) {
+ error(pc, "function body end < start");
+ return result_;
+ }
+
+ base_ = base;
+ Reset(pc, end);
+ function_env_ = function_env;
+
+ InitSsaEnv();
+ DecodeFunctionBody();
+
+ Tree* tree = nullptr;
+ if (ok()) {
+ if (ssa_env_->go()) {
+ if (stack_.size() > 0) {
+ error(stack_.back().pc(), end, "fell off end of code");
+ }
+ AddImplicitReturnAtEnd();
+ }
+ if (trees_.size() == 0) {
+ if (function_env_->sig->return_count() > 0) {
+ error(start_, "no trees created");
+ }
+ } else {
+ tree = trees_[0];
+ }
+ }
+
+ if (ok()) {
+ if (FLAG_trace_wasm_decode_time) {
+ double ms = decode_timer.Elapsed().InMillisecondsF();
+ PrintF(" - decoding took %0.3f ms\n", ms);
+ }
+ TRACE("wasm-decode ok\n\n");
+ } else {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
+ startrel(error_pc_), error_msg_.get());
+ }
+ return toResult(tree);
+ }
+
+ private:
+ static const size_t kErrorMsgSize = 128;
+
+ Zone* zone_;
+ TFBuilder* builder_;
+ const byte* base_;
+ TreeResult result_;
+
+ SsaEnv* ssa_env_;
+ FunctionEnv* function_env_;
+
+ ZoneVector<Tree*> trees_;
+ ZoneVector<Production> stack_;
+ ZoneVector<Block> blocks_;
+ ZoneVector<IfEnv> ifs_;
+
+ inline bool build() { return builder_ && ssa_env_->go(); }
+
+ void InitSsaEnv() {
+ FunctionSig* sig = function_env_->sig;
+ int param_count = static_cast<int>(sig->parameter_count());
+ TFNode* start = nullptr;
+ SsaEnv* ssa_env = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * EnvironmentCount();
+ ssa_env->state = SsaEnv::kReached;
+ ssa_env->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
+
+ int pos = 0;
+ if (builder_) {
+ start = builder_->Start(param_count + 1);
+ // Initialize parameters.
+ for (int i = 0; i < param_count; i++) {
+ ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
+ }
+ // Initialize int32 locals.
+ if (function_env_->local_int32_count > 0) {
+ TFNode* zero = builder_->Int32Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_int32_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize int64 locals.
+ if (function_env_->local_int64_count > 0) {
+ TFNode* zero = builder_->Int64Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_int64_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize float32 locals.
+ if (function_env_->local_float32_count > 0) {
+ TFNode* zero = builder_->Float32Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_float32_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize float64 locals.
+ if (function_env_->local_float64_count > 0) {
+ TFNode* zero = builder_->Float64Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_float64_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ DCHECK_EQ(function_env_->total_locals, pos);
+ DCHECK_EQ(EnvironmentCount(), pos);
+ builder_->set_module(function_env_->module);
+ }
+ ssa_env->control = start;
+ ssa_env->effect = start;
+ SetEnv("initial", ssa_env);
+ }
+
+ void Leaf(LocalType type, TFNode* node = nullptr) {
+ size_t size = sizeof(Tree);
+ Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
+ tree->type = type;
+ tree->count = 0;
+ tree->pc = pc_;
+ tree->node = node;
+ tree->children[0] = nullptr;
+ Reduce(tree);
+ }
+
+ void Shift(LocalType type, uint32_t count) {
+ size_t size =
+ sizeof(Tree) + (count == 0 ? 0 : ((count - 1) * sizeof(Tree*)));
+ Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
+ tree->type = type;
+ tree->count = count;
+ tree->pc = pc_;
+ tree->node = nullptr;
+ for (uint32_t i = 0; i < count; i++) tree->children[i] = nullptr;
+ if (count == 0) {
+ Production p = {tree, 0};
+ Reduce(&p);
+ Reduce(tree);
+ } else {
+ stack_.push_back({tree, 0});
+ }
+ }
+
+ void Reduce(Tree* tree) {
+ while (true) {
+ if (stack_.size() == 0) {
+ trees_.push_back(tree);
+ break;
+ }
+ Production* p = &stack_.back();
+ p->tree->children[p->index++] = tree;
+ Reduce(p);
+ if (p->done()) {
+ tree = p->tree;
+ stack_.pop_back();
+ } else {
+ break;
+ }
+ }
+ }
+
+ char* indentation() {
+ static const int kMaxIndent = 64;
+ static char bytes[kMaxIndent + 1];
+ for (int i = 0; i < kMaxIndent; i++) bytes[i] = ' ';
+ bytes[kMaxIndent] = 0;
+ if (stack_.size() < kMaxIndent / 2) {
+ bytes[stack_.size() * 2] = 0;
+ }
+ return bytes;
+ }
+
+ // Decodes the body of a function, producing reduced trees into {result}.
+ void DecodeFunctionBody() {
+ TRACE("wasm-decode %p...%p (%d bytes) %s\n",
+ reinterpret_cast<const void*>(start_),
+ reinterpret_cast<const void*>(limit_),
+ static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
+
+ if (pc_ >= limit_) return; // Nothing to do.
+
+ while (true) { // decoding loop.
+ int len = 1;
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
+ TRACE("wasm-decode module+%-6d %s func+%d: 0x%02x %s\n", baserel(pc_),
+ indentation(), startrel(pc_), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ // A simple expression with a fixed signature.
+ Shift(sig->GetReturn(), static_cast<uint32_t>(sig->parameter_count()));
+ pc_ += len;
+ if (pc_ >= limit_) {
+ // End of code reached or exceeded.
+ if (pc_ > limit_ && ok()) {
+ error("Beyond end of code");
+ }
+ return;
+ }
+ continue; // back to decoding loop.
+ }
+
+ switch (opcode) {
+ case kExprNop:
+ Leaf(kAstStmt);
+ break;
+ case kExprBlock: {
+ int length = Operand<uint8_t>(pc_);
+ if (length < 1) {
+ Leaf(kAstStmt);
+ } else {
+ Shift(kAstEnd, length);
+ // The break environment is the outer environment.
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SetEnv("block:start", Steal(break_env));
+ }
+ len = 2;
+ break;
+ }
+ case kExprLoop: {
+ int length = Operand<uint8_t>(pc_);
+ if (length < 1) {
+ Leaf(kAstStmt);
+ } else {
+ Shift(kAstEnd, length);
+ // The break environment is the outer environment.
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SsaEnv* cont_env = Steal(break_env);
+ // The continue environment is the inner environment.
+ PrepareForLoop(cont_env);
+ SetEnv("loop:start", Split(cont_env));
+ if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
+ PushBlock(cont_env);
+ blocks_.back().stack_depth = -1; // no production for inner block.
+ }
+ len = 2;
+ break;
+ }
+ case kExprIf:
+ Shift(kAstStmt, 2);
+ break;
+ case kExprIfElse:
+ Shift(kAstEnd, 3); // Result type is typeof(x) in {c ? x : y}.
+ break;
+ case kExprSelect:
+ Shift(kAstStmt, 3); // Result type is typeof(x) in {c ? x : y}.
+ break;
+ case kExprBr: {
+ uint32_t depth = Operand<uint8_t>(pc_);
+ Shift(kAstEnd, 1);
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ }
+ len = 2;
+ break;
+ }
+ case kExprBrIf: {
+ uint32_t depth = Operand<uint8_t>(pc_);
+ Shift(kAstStmt, 2);
+ if (depth >= blocks_.size()) {
+ error("improperly nested conditional branch");
+ }
+ len = 2;
+ break;
+ }
+ case kExprTableSwitch: {
+ if (!checkAvailable(5)) {
+ error("expected #tableswitch <cases> <table>, fell off end");
+ break;
+ }
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc_ + 1);
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc_ + 3);
+ len = 5 + table_count * 2;
+
+ if (table_count == 0) {
+ error("tableswitch with 0 entries");
+ break;
+ }
+
+ if (!checkAvailable(len)) {
+ error("expected #tableswitch <cases> <table>, fell off end");
+ break;
+ }
+
+ Shift(kAstEnd, 1 + case_count);
+
+ // Verify table.
+ for (int i = 0; i < table_count; i++) {
+ uint16_t target =
+ *reinterpret_cast<const uint16_t*>(pc_ + 5 + i * 2);
+ if (target >= 0x8000) {
+ size_t depth = target - 0x8000;
+ if (depth > blocks_.size()) {
+ error(pc_ + 5 + i * 2, "improper branch in tableswitch");
+ }
+ } else {
+ if (target >= case_count) {
+ error(pc_ + 5 + i * 2, "invalid case target in tableswitch");
+ }
+ }
+ }
+ break;
+ }
+ case kExprReturn: {
+ int count = static_cast<int>(function_env_->sig->return_count());
+ if (count == 0) {
+ BUILD(Return, 0, builder_->Buffer(0));
+ ssa_env_->Kill();
+ Leaf(kAstEnd);
+ } else {
+ Shift(kAstEnd, count);
+ }
+ break;
+ }
+ case kExprUnreachable: {
+ BUILD0(Unreachable);
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ Leaf(kAstEnd, nullptr);
+ break;
+ }
+ case kExprI8Const: {
+ int32_t value = Operand<int8_t>(pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, value));
+ len = 2;
+ break;
+ }
+ case kExprI32Const: {
+ int32_t value = Operand<int32_t>(pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, value));
+ len = 5;
+ break;
+ }
+ case kExprI64Const: {
+ int64_t value = Operand<int64_t>(pc_);
+ Leaf(kAstI64, BUILD(Int64Constant, value));
+ len = 9;
+ break;
+ }
+ case kExprF32Const: {
+ float value = Operand<float>(pc_);
+ Leaf(kAstF32, BUILD(Float32Constant, value));
+ len = 5;
+ break;
+ }
+ case kExprF64Const: {
+ double value = Operand<double>(pc_);
+ Leaf(kAstF64, BUILD(Float64Constant, value));
+ len = 9;
+ break;
+ }
+ case kExprGetLocal: {
+ uint32_t index;
+ LocalType type = LocalOperand(pc_, &index, &len);
+ TFNode* val =
+ build() && type != kAstStmt ? ssa_env_->locals[index] : nullptr;
+ Leaf(type, val);
+ break;
+ }
+ case kExprSetLocal: {
+ uint32_t index;
+ LocalType type = LocalOperand(pc_, &index, &len);
+ Shift(type, 1);
+ break;
+ }
+ case kExprLoadGlobal: {
+ uint32_t index;
+ LocalType type = GlobalOperand(pc_, &index, &len);
+ Leaf(type, BUILD(LoadGlobal, index));
+ break;
+ }
+ case kExprStoreGlobal: {
+ uint32_t index;
+ LocalType type = GlobalOperand(pc_, &index, &len);
+ Shift(type, 1);
+ break;
+ }
+ case kExprI32LoadMem8S:
+ case kExprI32LoadMem8U:
+ case kExprI32LoadMem16S:
+ case kExprI32LoadMem16U:
+ case kExprI32LoadMem:
+ len = DecodeLoadMem(pc_, kAstI32);
+ break;
+ case kExprI64LoadMem8S:
+ case kExprI64LoadMem8U:
+ case kExprI64LoadMem16S:
+ case kExprI64LoadMem16U:
+ case kExprI64LoadMem32S:
+ case kExprI64LoadMem32U:
+ case kExprI64LoadMem:
+ len = DecodeLoadMem(pc_, kAstI64);
+ break;
+ case kExprF32LoadMem:
+ len = DecodeLoadMem(pc_, kAstF32);
+ break;
+ case kExprF64LoadMem:
+ len = DecodeLoadMem(pc_, kAstF64);
+ break;
+ case kExprI32StoreMem8:
+ case kExprI32StoreMem16:
+ case kExprI32StoreMem:
+ len = DecodeStoreMem(pc_, kAstI32);
+ break;
+ case kExprI64StoreMem8:
+ case kExprI64StoreMem16:
+ case kExprI64StoreMem32:
+ case kExprI64StoreMem:
+ len = DecodeStoreMem(pc_, kAstI64);
+ break;
+ case kExprF32StoreMem:
+ len = DecodeStoreMem(pc_, kAstF32);
+ break;
+ case kExprF64StoreMem:
+ len = DecodeStoreMem(pc_, kAstF64);
+ break;
+ case kExprMemorySize:
+ Leaf(kAstI32, BUILD(MemSize, 0));
+ break;
+ case kExprGrowMemory:
+ Shift(kAstI32, 1);
+ break;
+ case kExprCallFunction: {
+ uint32_t unused;
+ FunctionSig* sig = FunctionSigOperand(pc_, &unused, &len);
+ if (sig) {
+ LocalType type =
+ sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ Shift(type, static_cast<int>(sig->parameter_count()));
+ } else {
+ Leaf(kAstI32); // error
+ }
+ break;
+ }
+ case kExprCallIndirect: {
+ uint32_t unused;
+ FunctionSig* sig = SigOperand(pc_, &unused, &len);
+ if (sig) {
+ LocalType type =
+ sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ Shift(type, static_cast<int>(1 + sig->parameter_count()));
+ } else {
+ Leaf(kAstI32); // error
+ }
+ break;
+ }
+ default:
+ error("Invalid opcode");
+ return;
+ }
+ pc_ += len;
+ if (pc_ >= limit_) {
+ // End of code reached or exceeded.
+ if (pc_ > limit_ && ok()) {
+ error("Beyond end of code");
+ }
+ return;
+ }
+ }
+ }
+
+ void PushBlock(SsaEnv* ssa_env) {
+ blocks_.push_back({ssa_env, static_cast<int>(stack_.size() - 1)});
+ }
+
+ int DecodeLoadMem(const byte* pc, LocalType type) {
+ int length = 2;
+ uint32_t offset;
+ MemoryAccessOperand(pc, &length, &offset);
+ Shift(type, 1);
+ return length;
+ }
+
+ int DecodeStoreMem(const byte* pc, LocalType type) {
+ int length = 2;
+ uint32_t offset;
+ MemoryAccessOperand(pc, &length, &offset);
+ Shift(type, 2);
+ return length;
+ }
+
+ void AddImplicitReturnAtEnd() {
+ int retcount = static_cast<int>(function_env_->sig->return_count());
+ if (retcount == 0) {
+ BUILD0(ReturnVoid);
+ return;
+ }
+
+ if (static_cast<int>(trees_.size()) < retcount) {
+ error(limit_, nullptr,
+ "ImplicitReturn expects %d arguments, only %d remain", retcount,
+ static_cast<int>(trees_.size()));
+ return;
+ }
+
+ TRACE("wasm-decode implicit return of %d args\n", retcount);
+
+ TFNode** buffer = BUILD(Buffer, retcount);
+ for (int index = 0; index < retcount; index++) {
+ Tree* tree = trees_[trees_.size() - 1 - index];
+ if (buffer) buffer[index] = tree->node;
+ LocalType expected = function_env_->sig->GetReturn(index);
+ if (tree->type != expected) {
+ error(limit_, tree->pc,
+ "ImplicitReturn[%d] expected type %s, found %s of type %s", index,
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::OpcodeName(tree->opcode()),
+ WasmOpcodes::TypeName(tree->type));
+ return;
+ }
+ }
+
+ BUILD(Return, retcount, buffer);
+ }
+
+ int baserel(const byte* ptr) {
+ return base_ ? static_cast<int>(ptr - base_) : 0;
+ }
+
+ int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
+
+ void Reduce(Production* p) {
+ WasmOpcode opcode = p->opcode();
+ TRACE("-----reduce module+%-6d %s func+%d: 0x%02x %s\n", baserel(p->pc()),
+ indentation(), startrel(p->pc()), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ // A simple expression with a fixed signature.
+ TypeCheckLast(p, sig->GetParam(p->index - 1));
+ if (p->done() && build()) {
+ if (sig->parameter_count() == 2) {
+ p->tree->node = builder_->Binop(opcode, p->tree->children[0]->node,
+ p->tree->children[1]->node);
+ } else if (sig->parameter_count() == 1) {
+ p->tree->node = builder_->Unop(opcode, p->tree->children[0]->node);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ return;
+ }
+
+ switch (opcode) {
+ case kExprBlock: {
+ if (p->done()) {
+ Block* last = &blocks_.back();
+ DCHECK_EQ(stack_.size() - 1, last->stack_depth);
+ // fallthrough with the last expression.
+ ReduceBreakToExprBlock(p, last);
+ SetEnv("block:end", last->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+ case kExprLoop: {
+ if (p->done()) {
+ // Pop the continue environment.
+ blocks_.pop_back();
+ // Get the break environment.
+ Block* last = &blocks_.back();
+ DCHECK_EQ(stack_.size() - 1, last->stack_depth);
+ // fallthrough with the last expression.
+ ReduceBreakToExprBlock(p, last);
+ SetEnv("loop:end", last->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+ case kExprIf: {
+ if (p->index == 1) {
+ // Condition done. Split environment for true branch.
+ TypeCheckLast(p, kAstI32);
+ SsaEnv* false_env = ssa_env_;
+ SsaEnv* true_env = Split(ssa_env_);
+ ifs_.push_back({nullptr, false_env, nullptr});
+ BUILD(Branch, p->last()->node, &true_env->control,
+ &false_env->control);
+ SetEnv("if:true", true_env);
+ } else if (p->index == 2) {
+ // True block done. Merge true and false environments.
+ IfEnv* env = &ifs_.back();
+ SsaEnv* merge = env->merge_env;
+ if (merge->go()) {
+ merge->state = SsaEnv::kReached;
+ Goto(ssa_env_, merge);
+ }
+ SetEnv("if:merge", merge);
+ ifs_.pop_back();
+ }
+ break;
+ }
+ case kExprIfElse: {
+ if (p->index == 1) {
+ // Condition done. Split environment for true and false branches.
+ TypeCheckLast(p, kAstI32);
+ SsaEnv* merge_env = ssa_env_;
+ TFNode* if_true = nullptr;
+ TFNode* if_false = nullptr;
+ BUILD(Branch, p->last()->node, &if_true, &if_false);
+ SsaEnv* false_env = Split(ssa_env_);
+ SsaEnv* true_env = Steal(ssa_env_);
+ false_env->control = if_false;
+ true_env->control = if_true;
+ ifs_.push_back({false_env, merge_env, nullptr});
+ SetEnv("if_else:true", true_env);
+ } else if (p->index == 2) {
+ // True expr done.
+ IfEnv* env = &ifs_.back();
+ MergeIntoProduction(p, env->merge_env, p->last());
+ // Switch to environment for false branch.
+ SsaEnv* false_env = ifs_.back().false_env;
+ SetEnv("if_else:false", false_env);
+ } else if (p->index == 3) {
+ // False expr done.
+ IfEnv* env = &ifs_.back();
+ MergeIntoProduction(p, env->merge_env, p->last());
+ SetEnv("if_else:merge", env->merge_env);
+ ifs_.pop_back();
+ }
+ break;
+ }
+ case kExprSelect: {
+ if (p->index == 1) {
+ // Condition done.
+ TypeCheckLast(p, kAstI32);
+ } else if (p->index == 2) {
+ // True expression done.
+ p->tree->type = p->last()->type;
+ if (p->tree->type == kAstStmt) {
+ error(p->pc(), p->tree->children[1]->pc,
+ "select operand should be expression");
+ }
+ } else {
+ // False expression done.
+ DCHECK(p->done());
+ TypeCheckLast(p, p->tree->type);
+ if (build()) {
+ TFNode* controls[2];
+ builder_->Branch(p->tree->children[0]->node, &controls[0],
+ &controls[1]);
+ TFNode* merge = builder_->Merge(2, controls);
+ TFNode* vals[2] = {p->tree->children[1]->node,
+ p->tree->children[2]->node};
+ TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
+ p->tree->node = phi;
+ ssa_env_->control = merge;
+ }
+ }
+ break;
+ }
+ case kExprBr: {
+ uint32_t depth = Operand<uint8_t>(p->pc());
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ break;
+ }
+ Block* block = &blocks_[blocks_.size() - depth - 1];
+ ReduceBreakToExprBlock(p, block);
+ break;
+ }
+ case kExprBrIf: {
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32);
+ } else if (p->done()) {
+ uint32_t depth = Operand<uint8_t>(p->pc());
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ break;
+ }
+ Block* block = &blocks_[blocks_.size() - depth - 1];
+ SsaEnv* fenv = ssa_env_;
+ SsaEnv* tenv = Split(fenv);
+ BUILD(Branch, p->tree->children[0]->node, &tenv->control,
+ &fenv->control);
+ ssa_env_ = tenv;
+ ReduceBreakToExprBlock(p, block);
+ ssa_env_ = fenv;
+ }
+ break;
+ }
+ case kExprTableSwitch: {
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(p->pc() + 3);
+ if (table_count == 1) {
+ // Degenerate switch with only a default target.
+ if (p->index == 1) {
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SetEnv("switch:default", Steal(break_env));
+ }
+ if (p->done()) {
+ Block* block = &blocks_.back();
+ // fall through to the end.
+ ReduceBreakToExprBlock(p, block);
+ SetEnv("switch:end", block->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+
+ if (p->index == 1) {
+ // Switch key finished.
+ TypeCheckLast(p, kAstI32);
+
+ TFNode* sw = BUILD(Switch, table_count, p->last()->node);
+
+ // Allocate environments for each case.
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(p->pc() + 1);
+ SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(case_count);
+ for (int i = 0; i < case_count; i++) {
+ case_envs[i] = UnreachableEnv();
+ }
+
+ ifs_.push_back({nullptr, nullptr, case_envs});
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SsaEnv* copy = Steal(break_env);
+ ssa_env_ = copy;
+
+ // Build the environments for each case based on the table.
+ const uint16_t* table =
+ reinterpret_cast<const uint16_t*>(p->pc() + 5);
+ for (int i = 0; i < table_count; i++) {
+ uint16_t target = table[i];
+ SsaEnv* env = Split(copy);
+ env->control = (i == table_count - 1) ? BUILD(IfDefault, sw)
+ : BUILD(IfValue, i, sw);
+ if (target >= 0x8000) {
+ // Targets an outer block.
+ int depth = target - 0x8000;
+ SsaEnv* tenv = blocks_[blocks_.size() - depth - 1].ssa_env;
+ Goto(env, tenv);
+ } else {
+ // Targets a case.
+ Goto(env, case_envs[target]);
+ }
+ }
+
+ // Switch to the environment for the first case.
+ SetEnv("switch:case", case_envs[0]);
+ } else {
+ // Switch case finished.
+ if (p->done()) {
+ // Last case. Fall through to the end.
+ Block* block = &blocks_.back();
+ ReduceBreakToExprBlock(p, block);
+ SsaEnv* next = block->ssa_env;
+ blocks_.pop_back();
+ ifs_.pop_back();
+ SetEnv("switch:end", next);
+ } else {
+ // Interior case. Maybe fall through to the next case.
+ SsaEnv* next = ifs_.back().case_envs[p->index - 1];
+ if (ssa_env_->go()) Goto(ssa_env_, next);
+ SetEnv("switch:case", next);
+ }
+ }
+ break;
+ }
+ case kExprReturn: {
+ TypeCheckLast(p, function_env_->sig->GetReturn(p->index - 1));
+ if (p->done()) {
+ if (build()) {
+ int count = p->tree->count;
+ TFNode** buffer = builder_->Buffer(count);
+ for (int i = 0; i < count; i++) {
+ buffer[i] = p->tree->children[i]->node;
+ }
+ BUILD(Return, count, buffer);
+ }
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ }
+ break;
+ }
+ case kExprSetLocal: {
+ int unused = 0;
+ uint32_t index;
+ LocalType type = LocalOperand(p->pc(), &index, &unused);
+ Tree* val = p->last();
+ if (type == val->type) {
+ if (build()) ssa_env_->locals[index] = val->node;
+ p->tree->node = val->node;
+ } else {
+ error(p->pc(), val->pc, "Typecheck failed in SetLocal");
+ }
+ break;
+ }
+ case kExprStoreGlobal: {
+ int unused = 0;
+ uint32_t index;
+ LocalType type = GlobalOperand(p->pc(), &index, &unused);
+ Tree* val = p->last();
+ if (type == val->type) {
+ BUILD(StoreGlobal, index, val->node);
+ p->tree->node = val->node;
+ } else {
+ error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
+ }
+ break;
+ }
+
+ case kExprI32LoadMem8S:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int8());
+ case kExprI32LoadMem8U:
+ return ReduceLoadMem(p, kAstI32, MachineType::Uint8());
+ case kExprI32LoadMem16S:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int16());
+ case kExprI32LoadMem16U:
+ return ReduceLoadMem(p, kAstI32, MachineType::Uint16());
+ case kExprI32LoadMem:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int32());
+
+ case kExprI64LoadMem8S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int8());
+ case kExprI64LoadMem8U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint8());
+ case kExprI64LoadMem16S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int16());
+ case kExprI64LoadMem16U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint16());
+ case kExprI64LoadMem32S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int32());
+ case kExprI64LoadMem32U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint32());
+ case kExprI64LoadMem:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int64());
+
+ case kExprF32LoadMem:
+ return ReduceLoadMem(p, kAstF32, MachineType::Float32());
+
+ case kExprF64LoadMem:
+ return ReduceLoadMem(p, kAstF64, MachineType::Float64());
+
+ case kExprI32StoreMem8:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int8());
+ case kExprI32StoreMem16:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int16());
+ case kExprI32StoreMem:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int32());
+
+ case kExprI64StoreMem8:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int8());
+ case kExprI64StoreMem16:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int16());
+ case kExprI64StoreMem32:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int32());
+ case kExprI64StoreMem:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int64());
+
+ case kExprF32StoreMem:
+ return ReduceStoreMem(p, kAstF32, MachineType::Float32());
+
+ case kExprF64StoreMem:
+ return ReduceStoreMem(p, kAstF64, MachineType::Float64());
+
+ case kExprGrowMemory:
+ TypeCheckLast(p, kAstI32);
+ // TODO(titzer): build node for GrowMemory
+ p->tree->node = BUILD(Int32Constant, 0);
+ return;
+
+ case kExprCallFunction: {
+ int len;
+ uint32_t index;
+ FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
+ if (!sig) break;
+ if (p->index > 0) {
+ TypeCheckLast(p, sig->GetParam(p->index - 1));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count + 1;
+ TFNode** buffer = builder_->Buffer(count);
+ FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
+ USE(sig);
+ buffer[0] = nullptr; // reserved for code object.
+ for (uint32_t i = 1; i < count; i++) {
+ buffer[i] = p->tree->children[i - 1]->node;
+ }
+ p->tree->node = builder_->CallDirect(index, buffer);
+ }
+ break;
+ }
+ case kExprCallIndirect: {
+ int len;
+ uint32_t index;
+ FunctionSig* sig = SigOperand(p->pc(), &index, &len);
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32);
+ } else {
+ TypeCheckLast(p, sig->GetParam(p->index - 2));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count;
+ TFNode** buffer = builder_->Buffer(count);
+ for (uint32_t i = 0; i < count; i++) {
+ buffer[i] = p->tree->children[i]->node;
+ }
+ p->tree->node = builder_->CallIndirect(index, buffer);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ void ReduceBreakToExprBlock(Production* p, Block* block) {
+ if (block->stack_depth < 0) {
+ // This is the inner loop block, which does not have a value.
+ Goto(ssa_env_, block->ssa_env);
+ } else {
+ // Merge the value into the production for the block.
+ Production* bp = &stack_[block->stack_depth];
+ MergeIntoProduction(bp, block->ssa_env, p->last());
+ }
+ }
+
+ void MergeIntoProduction(Production* p, SsaEnv* target, Tree* expr) {
+ if (!ssa_env_->go()) return;
+
+ bool first = target->state == SsaEnv::kUnreachable;
+ Goto(ssa_env_, target);
+ if (expr->type == kAstEnd) return;
+
+ if (first) {
+ // first merge to this environment; set the type and the node.
+ p->tree->type = expr->type;
+ p->tree->node = expr->node;
+ } else {
+ // merge with the existing value for this block.
+ LocalType type = p->tree->type;
+ if (expr->type != type) {
+ type = kAstStmt;
+ p->tree->type = kAstStmt;
+ p->tree->node = nullptr;
+ } else if (type != kAstStmt) {
+ p->tree->node = CreateOrMergeIntoPhi(type, target->control,
+ p->tree->node, expr->node);
+ }
+ }
+ }
+
+ void ReduceLoadMem(Production* p, LocalType type, MachineType mem_type) {
+ DCHECK_EQ(1, p->index);
+ TypeCheckLast(p, kAstI32); // index
+ if (build()) {
+ int length = 0;
+ uint32_t offset = 0;
+ MemoryAccessOperand(p->pc(), &length, &offset);
+ p->tree->node =
+ builder_->LoadMem(type, mem_type, p->last()->node, offset);
+ }
+ }
+
+ void ReduceStoreMem(Production* p, LocalType type, MachineType mem_type) {
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32); // index
+ } else {
+ DCHECK_EQ(2, p->index);
+ TypeCheckLast(p, type);
+ if (build()) {
+ int length = 0;
+ uint32_t offset = 0;
+ MemoryAccessOperand(p->pc(), &length, &offset);
+ TFNode* val = p->tree->children[1]->node;
+ builder_->StoreMem(mem_type, p->tree->children[0]->node, offset, val);
+ p->tree->node = val;
+ }
+ }
+ }
+
+ void TypeCheckLast(Production* p, LocalType expected) {
+ LocalType result = p->last()->type;
+ if (result == expected) return;
+ if (result == kAstEnd) return;
+ if (expected != kAstStmt) {
+ error(p->pc(), p->last()->pc,
+ "%s[%d] expected type %s, found %s of type %s",
+ WasmOpcodes::OpcodeName(p->opcode()), p->index - 1,
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::OpcodeName(p->last()->opcode()),
+ WasmOpcodes::TypeName(p->last()->type));
+ }
+ }
+
+ void SetEnv(const char* reason, SsaEnv* env) {
+ TRACE(" env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
+ static_cast<int>(blocks_.size()), reason);
+ if (env->control != nullptr && FLAG_trace_wasm_decoder) {
+ TRACE(", control = ");
+ compiler::WasmGraphBuilder::PrintDebugName(env->control);
+ }
+ TRACE("\n");
+ ssa_env_ = env;
+ if (builder_) {
+ builder_->set_control_ptr(&env->control);
+ builder_->set_effect_ptr(&env->effect);
+ }
+ }
+
+ void Goto(SsaEnv* from, SsaEnv* to) {
+ DCHECK_NOT_NULL(to);
+ if (!from->go()) return;
+ switch (to->state) {
+ case SsaEnv::kUnreachable: { // Overwrite destination.
+ to->state = SsaEnv::kReached;
+ to->locals = from->locals;
+ to->control = from->control;
+ to->effect = from->effect;
+ break;
+ }
+ case SsaEnv::kReached: { // Create a new merge.
+ to->state = SsaEnv::kMerged;
+ if (!builder_) break;
+ // Merge control.
+ TFNode* controls[] = {to->control, from->control};
+ TFNode* merge = builder_->Merge(2, controls);
+ to->control = merge;
+ // Merge effects.
+ if (from->effect != to->effect) {
+ TFNode* effects[] = {to->effect, from->effect, merge};
+ to->effect = builder_->EffectPhi(2, effects, merge);
+ }
+ // Merge SSA values.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ TFNode* a = to->locals[i];
+ TFNode* b = from->locals[i];
+ if (a != b) {
+ TFNode* vals[] = {a, b};
+ to->locals[i] =
+ builder_->Phi(function_env_->GetLocalType(i), 2, vals, merge);
+ }
+ }
+ break;
+ }
+ case SsaEnv::kMerged: {
+ if (!builder_) break;
+ TFNode* merge = to->control;
+ // Extend the existing merge.
+ builder_->AppendToMerge(merge, from->control);
+ // Merge effects.
+ if (builder_->IsPhiWithMerge(to->effect, merge)) {
+ builder_->AppendToPhi(merge, to->effect, from->effect);
+ } else if (to->effect != from->effect) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** effects = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ effects[j] = to->effect;
+ }
+ effects[count - 1] = from->effect;
+ to->effect = builder_->EffectPhi(count, effects, merge);
+ }
+ // Merge locals.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ TFNode* tnode = to->locals[i];
+ TFNode* fnode = from->locals[i];
+ if (builder_->IsPhiWithMerge(tnode, merge)) {
+ builder_->AppendToPhi(merge, tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** vals = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ vals[j] = tnode;
+ }
+ vals[count - 1] = fnode;
+ to->locals[i] = builder_->Phi(function_env_->GetLocalType(i), count,
+ vals, merge);
+ }
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return from->Kill();
+ }
+
+ TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
+ TFNode* fnode) {
+ if (builder_->IsPhiWithMerge(tnode, merge)) {
+ builder_->AppendToPhi(merge, tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** vals = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
+ vals[count - 1] = fnode;
+ return builder_->Phi(type, count, vals, merge);
+ }
+ return tnode;
+ }
+
+ void BuildInfiniteLoop() {
+ if (ssa_env_->go()) {
+ PrepareForLoop(ssa_env_);
+ SsaEnv* cont_env = ssa_env_;
+ ssa_env_ = Split(ssa_env_);
+ ssa_env_->state = SsaEnv::kReached;
+ Goto(ssa_env_, cont_env);
+ }
+ }
+
+ void PrepareForLoop(SsaEnv* env) {
+ if (env->go()) {
+ env->state = SsaEnv::kMerged;
+ if (builder_) {
+ env->control = builder_->Loop(env->control);
+ env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ builder_->Terminate(env->effect, env->control);
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ env->locals[i] = builder_->Phi(function_env_->GetLocalType(i), 1,
+ &env->locals[i], env->control);
+ }
+ }
+ }
+ }
+
+ // Create a complete copy of the {from}.
+ SsaEnv* Split(SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * EnvironmentCount();
+ result->control = from->control;
+ result->effect = from->effect;
+ result->state = from->state == SsaEnv::kUnreachable ? SsaEnv::kUnreachable
+ : SsaEnv::kReached;
+
+ if (from->go()) {
+ result->state = SsaEnv::kReached;
+ result->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
+ memcpy(result->locals, from->locals, size);
+ } else {
+ result->state = SsaEnv::kUnreachable;
+ result->locals = nullptr;
+ }
+
+ return result;
+ }
+
+ // Create a copy of {from} that steals its state and leaves {from}
+ // unreachable.
+ SsaEnv* Steal(SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ if (!from->go()) return UnreachableEnv();
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kReached;
+ result->locals = from->locals;
+ result->control = from->control;
+ result->effect = from->effect;
+ from->Kill(SsaEnv::kUnreachable);
+ return result;
+ }
+
+ // Create an unreachable environment.
+ SsaEnv* UnreachableEnv() {
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kUnreachable;
+ result->control = nullptr;
+ result->effect = nullptr;
+ result->locals = nullptr;
+ return result;
+ }
+
+ // Load an operand at [pc + 1].
+ template <typename V>
+ V Operand(const byte* pc) {
+ if ((limit_ - pc) < static_cast<int>(1 + sizeof(V))) {
+ const char* msg = "Expected operand following opcode";
+ switch (sizeof(V)) {
+ case 1:
+ msg = "Expected 1-byte operand following opcode";
+ break;
+ case 2:
+ msg = "Expected 2-byte operand following opcode";
+ break;
+ case 4:
+ msg = "Expected 4-byte operand following opcode";
+ break;
+ default:
+ break;
+ }
+ error(pc, msg);
+ return -1;
+ }
+ return *reinterpret_cast<const V*>(pc + 1);
+ }
+
+ int EnvironmentCount() {
+ if (builder_) return static_cast<int>(function_env_->GetLocalCount());
+ return 0; // if we aren't building a graph, don't bother with SSA renaming.
+ }
+
+ LocalType LocalOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->IsValidLocal(*index)) {
+ return function_env_->GetLocalType(*index);
+ }
+ error(pc, "invalid local variable index");
+ return kAstStmt;
+ }
+
+ LocalType GlobalOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidGlobal(*index)) {
+ return WasmOpcodes::LocalTypeFor(
+ function_env_->module->GetGlobalType(*index));
+ }
+ error(pc, "invalid global variable index");
+ return kAstStmt;
+ }
+
+ FunctionSig* FunctionSigOperand(const byte* pc, uint32_t* index,
+ int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidFunction(*index)) {
+ return function_env_->module->GetFunctionSignature(*index);
+ }
+ error(pc, "invalid function index");
+ return nullptr;
+ }
+
+ FunctionSig* SigOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidSignature(*index)) {
+ return function_env_->module->GetSignature(*index);
+ }
+ error(pc, "invalid signature index");
+ return nullptr;
+ }
+
+ uint32_t UnsignedLEB128Operand(const byte* pc, int* length) {
+ uint32_t result = 0;
+ ReadUnsignedLEB128ErrorCode error_code =
+ ReadUnsignedLEB128Operand(pc + 1, limit_, length, &result);
+ if (error_code == kInvalidLEB128) error(pc, "invalid LEB128 varint");
+ if (error_code == kMissingLEB128) error(pc, "expected LEB128 varint");
+ (*length)++;
+ return result;
+ }
+
+ void MemoryAccessOperand(const byte* pc, int* length, uint32_t* offset) {
+ byte bitfield = Operand<uint8_t>(pc);
+ if (MemoryAccess::OffsetField::decode(bitfield)) {
+ *offset = UnsignedLEB128Operand(pc + 1, length);
+ (*length)++; // to account for the memory access byte
+ } else {
+ *offset = 0;
+ *length = 2;
+ }
+ }
+
+ virtual void onFirstError() {
+ limit_ = start_; // Terminate decoding loop.
+ builder_ = nullptr; // Don't build any more nodes.
+#if DEBUG
+ PrintStackForDebugging();
+#endif
+ }
+
+#if DEBUG
+ void PrintStackForDebugging() { PrintProduction(0); }
+
+ void PrintProduction(size_t depth) {
+ if (depth >= stack_.size()) return;
+ Production* p = &stack_[depth];
+ for (size_t d = 0; d < depth; d++) PrintF(" ");
+
+ PrintF("@%d %s [%d]\n", static_cast<int>(p->tree->pc - start_),
+ WasmOpcodes::OpcodeName(p->opcode()), p->tree->count);
+ for (int i = 0; i < p->index; i++) {
+ Tree* child = p->tree->children[i];
+ for (size_t d = 0; d <= depth; d++) PrintF(" ");
+ PrintF("@%d %s [%d]", static_cast<int>(child->pc - start_),
+ WasmOpcodes::OpcodeName(child->opcode()), child->count);
+ if (child->node) {
+ PrintF(" => TF");
+ compiler::WasmGraphBuilder::PrintDebugName(child->node);
+ }
+ PrintF("\n");
+ }
+ PrintProduction(depth + 1);
+ }
+#endif
+};
+
+
+TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
+ const byte* end) {
+ Zone zone;
+ LR_WasmDecoder decoder(&zone, nullptr);
+ TreeResult result = decoder.Decode(env, base, start, end);
+ return result;
+}
+
+
+TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
+ const byte* start, const byte* end) {
+ Zone zone;
+ LR_WasmDecoder decoder(&zone, builder);
+ TreeResult result = decoder.Decode(env, base, start, end);
+ return result;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Tree& tree) {
+ if (tree.pc == nullptr) {
+ os << "null";
+ return os;
+ }
+ PrintF("%s", WasmOpcodes::OpcodeName(tree.opcode()));
+ if (tree.count > 0) os << "(";
+ for (uint32_t i = 0; i < tree.count; i++) {
+ if (i > 0) os << ", ";
+ os << *tree.children[i];
+ }
+ if (tree.count > 0) os << ")";
+ return os;
+}
+
+
+ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
+ const byte* limit,
+ int* length,
+ uint32_t* result) {
+ *result = 0;
+ const byte* ptr = pc;
+ const byte* end = pc + 5; // maximum 5 bytes.
+ if (end > limit) end = limit;
+ int shift = 0;
+ byte b = 0;
+ while (ptr < end) {
+ b = *ptr++;
+ *result = *result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+ DCHECK_LE(ptr - pc, 5);
+ *length = static_cast<int>(ptr - pc);
+ if (ptr == end && (b & 0x80)) {
+ return kInvalidLEB128;
+ } else if (*length == 0) {
+ return kMissingLEB128;
+ } else {
+ return kNoError;
+ }
+}
+
+
+int OpcodeLength(const byte* pc) {
+ switch (static_cast<WasmOpcode>(*pc)) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+
+ case kExprI8Const:
+ case kExprBlock:
+ case kExprLoop:
+ case kExprBr:
+ case kExprBrIf:
+ return 2;
+ case kExprI32Const:
+ case kExprF32Const:
+ return 5;
+ case kExprI64Const:
+ case kExprF64Const:
+ return 9;
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ case kExprLoadGlobal:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ case kExprGetLocal: {
+ int length;
+ uint32_t result = 0;
+ ReadUnsignedLEB128Operand(pc + 1, pc + 6, &length, &result);
+ return 1 + length;
+ }
+ case kExprTableSwitch: {
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc + 3);
+ return 5 + table_count * 2;
+ }
+
+ default:
+ return 1;
+ }
+}
+
+
+int OpcodeArity(FunctionEnv* env, const byte* pc) {
+#define DECLARE_ARITY(name, ...) \
+ static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const int kArity_##name = \
+ static_cast<int>(arraysize(kTypes_##name) - 1);
+
+ FOREACH_SIGNATURE(DECLARE_ARITY);
+#undef DECLARE_ARITY
+
+ switch (static_cast<WasmOpcode>(*pc)) {
+ case kExprI8Const:
+ case kExprI32Const:
+ case kExprI64Const:
+ case kExprF64Const:
+ case kExprF32Const:
+ case kExprGetLocal:
+ case kExprLoadGlobal:
+ case kExprNop:
+ case kExprUnreachable:
+ return 0;
+
+ case kExprBr:
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ return 1;
+
+ case kExprIf:
+ case kExprBrIf:
+ return 2;
+ case kExprIfElse:
+ case kExprSelect:
+ return 3;
+ case kExprBlock:
+ case kExprLoop:
+ return *(pc + 1);
+
+ case kExprCallFunction: {
+ int index = *(pc + 1);
+ return static_cast<int>(
+ env->module->GetFunctionSignature(index)->parameter_count());
+ }
+ case kExprCallIndirect: {
+ int index = *(pc + 1);
+ return 1 + static_cast<int>(
+ env->module->GetSignature(index)->parameter_count());
+ }
+ case kExprReturn:
+ return static_cast<int>(env->sig->return_count());
+ case kExprTableSwitch: {
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc + 1);
+ return 1 + case_count;
+ }
+
+#define DECLARE_OPCODE_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return kArity_##sig;
+
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
new file mode 100644
index 0000000000..5b95ad9f87
--- /dev/null
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_AST_DECODER_H_
+#define V8_WASM_AST_DECODER_H_
+
+#include "src/signature.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler { // external declarations from compiler.
+class WasmGraphBuilder;
+}
+
+namespace wasm {
+
+typedef compiler::WasmGraphBuilder TFBuilder;
+struct ModuleEnv; // forward declaration of module interface.
+
+// Interface the function environment during decoding, include the signature
+// and number of locals.
+struct FunctionEnv {
+ ModuleEnv* module; // module environment
+ FunctionSig* sig; // signature of this function
+ uint32_t local_int32_count; // number of int32 locals
+ uint32_t local_int64_count; // number of int64 locals
+ uint32_t local_float32_count; // number of float32 locals
+ uint32_t local_float64_count; // number of float64 locals
+ uint32_t total_locals; // sum of parameters and all locals
+
+ bool IsValidLocal(uint32_t index) { return index < total_locals; }
+ uint32_t GetLocalCount() { return total_locals; }
+ LocalType GetLocalType(uint32_t index) {
+ if (index < static_cast<uint32_t>(sig->parameter_count())) {
+ return sig->GetParam(index);
+ }
+ index -= static_cast<uint32_t>(sig->parameter_count());
+ if (index < local_int32_count) return kAstI32;
+ index -= local_int32_count;
+ if (index < local_int64_count) return kAstI64;
+ index -= local_int64_count;
+ if (index < local_float32_count) return kAstF32;
+ index -= local_float32_count;
+ if (index < local_float64_count) return kAstF64;
+ return kAstStmt;
+ }
+
+ void AddLocals(LocalType type, uint32_t count) {
+ switch (type) {
+ case kAstI32:
+ local_int32_count += count;
+ break;
+ case kAstI64:
+ local_int64_count += count;
+ break;
+ case kAstF32:
+ local_float32_count += count;
+ break;
+ case kAstF64:
+ local_float64_count += count;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ total_locals += count;
+ DCHECK(total_locals ==
+ (sig->parameter_count() + local_int32_count + local_int64_count +
+ local_float32_count + local_float64_count));
+ }
+
+ void SumLocals() {
+ total_locals = static_cast<uint32_t>(sig->parameter_count()) +
+ local_int32_count + local_int64_count + local_float32_count +
+ local_float64_count;
+ }
+};
+
+struct Tree;
+typedef Result<Tree*> TreeResult;
+
+std::ostream& operator<<(std::ostream& os, const Tree& tree);
+
+TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
+ const byte* end);
+TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
+ const byte* start, const byte* end);
+
+inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
+ const byte* end) {
+ return VerifyWasmCode(env, nullptr, start, end);
+}
+
+inline TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env,
+ const byte* start, const byte* end) {
+ return BuildTFGraph(builder, env, nullptr, start, end);
+}
+
+enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
+
+ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
+ int*, uint32_t*);
+
+// Computes the length of the opcode at the given address.
+int OpcodeLength(const byte* pc);
+
+// Computes the arity (number of sub-nodes) of the opcode at the given address.
+int OpcodeArity(FunctionEnv* env, const byte* pc);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_AST_DECODER_H_
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
new file mode 100644
index 0000000000..698919d6a0
--- /dev/null
+++ b/deps/v8/src/wasm/decoder.h
@@ -0,0 +1,233 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_DECODER_H_
+#define V8_WASM_DECODER_H_
+
+#include "src/base/smart-pointers.h"
+#include "src/flags.h"
+#include "src/signature.h"
+#include "src/wasm/wasm-result.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+// A helper utility to decode bytes, integers, fields, varints, etc, from
+// a buffer of bytes.
+class Decoder {
+ public:
+ Decoder(const byte* start, const byte* end)
+ : start_(start),
+ pc_(start),
+ limit_(end),
+ error_pc_(nullptr),
+ error_pt_(nullptr) {}
+
+ virtual ~Decoder() {}
+
+ // Reads a 8-bit unsigned integer (byte) and advances {pc_}.
+ uint8_t u8(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint8_t");
+ if (checkAvailable(1)) {
+ byte val = *(pc_++);
+ TRACE("%02x = %d\n", val, val);
+ return val;
+ } else {
+ error("expected 1 byte, but fell off end");
+ return traceOffEnd<uint8_t>();
+ }
+ }
+
+ // Reads a 16-bit unsigned integer (little endian) and advances {pc_}.
+ uint16_t u16(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint16_t");
+ if (checkAvailable(2)) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ byte b0 = pc_[0];
+ byte b1 = pc_[1];
+#else
+ byte b1 = pc_[0];
+ byte b0 = pc_[1];
+#endif
+ uint16_t val = static_cast<uint16_t>(b1 << 8) | b0;
+ TRACE("%02x %02x = %d\n", pc_[0], pc_[1], val);
+ pc_ += 2;
+ return val;
+ } else {
+ error("expected 2 bytes, but fell off end");
+ return traceOffEnd<uint16_t>();
+ }
+ }
+
+ // Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
+ uint32_t u32(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint32_t");
+ if (checkAvailable(4)) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ byte b0 = pc_[0];
+ byte b1 = pc_[1];
+ byte b2 = pc_[2];
+ byte b3 = pc_[3];
+#else
+ byte b3 = pc_[0];
+ byte b2 = pc_[1];
+ byte b1 = pc_[2];
+ byte b0 = pc_[3];
+#endif
+ uint32_t val = static_cast<uint32_t>(b3 << 24) |
+ static_cast<uint32_t>(b2 << 16) |
+ static_cast<uint32_t>(b1 << 8) | b0;
+ TRACE("%02x %02x %02x %02x = %u\n", pc_[0], pc_[1], pc_[2], pc_[3], val);
+ pc_ += 4;
+ return val;
+ } else {
+ error("expected 4 bytes, but fell off end");
+ return traceOffEnd<uint32_t>();
+ }
+ }
+
+ // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
+ uint32_t u32v(int* length, const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "varint");
+
+ if (!checkAvailable(1)) {
+ error("expected at least 1 byte, but fell off end");
+ return traceOffEnd<uint32_t>();
+ }
+
+ const byte* pos = pc_;
+ const byte* end = pc_ + 5;
+ if (end > limit_) end = limit_;
+
+ uint32_t result = 0;
+ int shift = 0;
+ byte b = 0;
+ while (pc_ < end) {
+ b = *pc_++;
+ TRACE("%02x ", b);
+ result = result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+
+ *length = static_cast<int>(pc_ - pos);
+ if (pc_ == end && (b & 0x80)) {
+ error(pc_ - 1, "varint too large");
+ } else {
+ TRACE("= %u\n", result);
+ }
+ return result;
+ }
+
+ // Check that at least {size} bytes exist between {pc_} and {limit_}.
+ bool checkAvailable(int size) {
+ if (pc_ < start_ || (pc_ + size) > limit_) {
+ error(pc_, nullptr, "expected %d bytes, fell off end", size);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void error(const char* msg) { error(pc_, nullptr, msg); }
+
+ void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
+
+ // Sets internal error state.
+ void error(const byte* pc, const byte* pt, const char* format, ...) {
+ if (ok()) {
+#if DEBUG
+ if (FLAG_wasm_break_on_decoder_error) {
+ base::OS::DebugBreak();
+ }
+#endif
+ const int kMaxErrorMsg = 256;
+ char* buffer = new char[kMaxErrorMsg];
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VSNPrintF(buffer, kMaxErrorMsg - 1, format, arguments);
+ va_end(arguments);
+ error_msg_.Reset(buffer);
+ error_pc_ = pc;
+ error_pt_ = pt;
+ onFirstError();
+ }
+ }
+
+ // Behavior triggered on first error, overridden in subclasses.
+ virtual void onFirstError() {}
+
+ // Debugging helper to print bytes up to the end.
+ template <typename T>
+ T traceOffEnd() {
+ T t = 0;
+ for (const byte* ptr = pc_; ptr < limit_; ptr++) {
+ TRACE("%02x ", *ptr);
+ }
+ TRACE("<end>\n");
+ pc_ = limit_;
+ return t;
+ }
+
+ // Converts the given value to a {Result}, copying the error if necessary.
+ template <typename T>
+ Result<T> toResult(T val) {
+ Result<T> result;
+ if (error_pc_) {
+ result.error_code = kError;
+ result.start = start_;
+ result.error_pc = error_pc_;
+ result.error_pt = error_pt_;
+ result.error_msg = error_msg_;
+ error_msg_.Reset(nullptr);
+ } else {
+ result.error_code = kSuccess;
+ }
+ result.val = val;
+ return result;
+ }
+
+ // Resets the boundaries of this decoder.
+ void Reset(const byte* start, const byte* end) {
+ start_ = start;
+ pc_ = start;
+ limit_ = end;
+ error_pc_ = nullptr;
+ error_pt_ = nullptr;
+ error_msg_.Reset(nullptr);
+ }
+
+ bool ok() const { return error_pc_ == nullptr; }
+ bool failed() const { return error_pc_ != nullptr; }
+
+ protected:
+ const byte* start_;
+ const byte* pc_;
+ const byte* limit_;
+ const byte* error_pc_;
+ const byte* error_pt_;
+ base::SmartArrayPointer<char> error_msg_;
+};
+
+#undef TRACE
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_DECODER_H_
diff --git a/deps/v8/src/wasm/encoder.cc b/deps/v8/src/wasm/encoder.cc
new file mode 100644
index 0000000000..d8d36338b1
--- /dev/null
+++ b/deps/v8/src/wasm/encoder.cc
@@ -0,0 +1,592 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/signature.h"
+
+#include "src/handles.h"
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/v8memory.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+/*TODO: add error cases for adding too many locals, too many functions and bad
+ indices in body */
+
+namespace {
+void EmitUint8(byte** b, uint8_t x) {
+ Memory::uint8_at(*b) = x;
+ *b += 1;
+}
+
+
+void EmitUint16(byte** b, uint16_t x) {
+ Memory::uint16_at(*b) = x;
+ *b += 2;
+}
+
+
+void EmitUint32(byte** b, uint32_t x) {
+ Memory::uint32_at(*b) = x;
+ *b += 4;
+}
+
+
+void EmitVarInt(byte** b, size_t val) {
+ while (true) {
+ size_t next = val >> 7;
+ byte out = static_cast<byte>(val & 0x7f);
+ if (next) {
+ *((*b)++) = 0x80 | out;
+ val = next;
+ } else {
+ *((*b)++) = out;
+ break;
+ }
+ }
+}
+} // namespace
+
+
+struct WasmFunctionBuilder::Type {
+ bool param_;
+ LocalType type_;
+};
+
+
+WasmFunctionBuilder::WasmFunctionBuilder(Zone* zone)
+ : return_type_(kAstI32),
+ locals_(zone),
+ exported_(0),
+ external_(0),
+ body_(zone),
+ local_indices_(zone),
+ name_(zone) {}
+
+
+uint16_t WasmFunctionBuilder::AddParam(LocalType type) {
+ return AddVar(type, true);
+}
+
+
+uint16_t WasmFunctionBuilder::AddLocal(LocalType type) {
+ return AddVar(type, false);
+}
+
+
+uint16_t WasmFunctionBuilder::AddVar(LocalType type, bool param) {
+ locals_.push_back({param, type});
+ return static_cast<uint16_t>(locals_.size() - 1);
+}
+
+
+void WasmFunctionBuilder::ReturnType(LocalType type) { return_type_ = type; }
+
+
+void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
+ EmitCode(code, code_size, nullptr, 0);
+}
+
+
+void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size,
+ const uint32_t* local_indices,
+ uint32_t indices_size) {
+ size_t size = body_.size();
+ for (size_t i = 0; i < code_size; i++) {
+ body_.push_back(code[i]);
+ }
+ for (size_t i = 0; i < indices_size; i++) {
+ local_indices_.push_back(local_indices[i] + static_cast<uint32_t>(size));
+ }
+}
+
+
+void WasmFunctionBuilder::Emit(WasmOpcode opcode) {
+ body_.push_back(static_cast<byte>(opcode));
+}
+
+
+void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
+ body_.push_back(static_cast<byte>(opcode));
+ body_.push_back(immediate);
+}
+
+
+void WasmFunctionBuilder::EmitWithLocal(WasmOpcode opcode) {
+ body_.push_back(static_cast<byte>(opcode));
+ local_indices_.push_back(static_cast<uint32_t>(body_.size()) - 1);
+}
+
+
+uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
+ body_.push_back(immediate);
+ return static_cast<uint32_t>(body_.size()) - 1;
+}
+
+
+void WasmFunctionBuilder::EditImmediate(uint32_t offset, const byte immediate) {
+ DCHECK(offset < body_.size());
+ body_[offset] = immediate;
+}
+
+
+void WasmFunctionBuilder::Exported(uint8_t flag) { exported_ = flag; }
+
+
+void WasmFunctionBuilder::External(uint8_t flag) { external_ = flag; }
+
+void WasmFunctionBuilder::SetName(const unsigned char* name, int name_length) {
+ name_.clear();
+ if (name_length > 0) {
+ for (int i = 0; i < name_length; i++) {
+ name_.push_back(*(name + i));
+ }
+ name_.push_back('\0');
+ }
+}
+
+
+WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
+ WasmModuleBuilder* mb) const {
+ WasmFunctionEncoder* e =
+ new (zone) WasmFunctionEncoder(zone, return_type_, exported_, external_);
+ uint16_t* var_index = zone->NewArray<uint16_t>(locals_.size());
+ IndexVars(e, var_index);
+ if (body_.size() > 0) {
+ // TODO(titzer): iterate over local indexes, not the bytes.
+ const byte* start = &body_[0];
+ const byte* end = start + body_.size();
+ size_t local_index = 0;
+ for (size_t i = 0; i < body_.size();) {
+ if (local_index < local_indices_.size() &&
+ i == local_indices_[local_index]) {
+ int length = 0;
+ uint32_t index;
+ ReadUnsignedLEB128Operand(start + i, end, &length, &index);
+ uint16_t new_index = var_index[index];
+ const std::vector<uint8_t>& index_vec = UnsignedLEB128From(new_index);
+ for (size_t j = 0; j < index_vec.size(); j++) {
+ e->body_.push_back(index_vec.at(j));
+ }
+ i += length;
+ local_index++;
+ } else {
+ e->body_.push_back(*(start + i));
+ i++;
+ }
+ }
+ }
+ FunctionSig::Builder sig(zone, return_type_ == kAstStmt ? 0 : 1,
+ e->params_.size());
+ if (return_type_ != kAstStmt) {
+ sig.AddReturn(static_cast<LocalType>(return_type_));
+ }
+ for (size_t i = 0; i < e->params_.size(); i++) {
+ sig.AddParam(static_cast<LocalType>(e->params_[i]));
+ }
+ e->signature_index_ = mb->AddSignature(sig.Build());
+ e->name_.insert(e->name_.begin(), name_.begin(), name_.end());
+ return e;
+}
+
+
+void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
+ uint16_t* var_index) const {
+ uint16_t param = 0;
+ uint16_t int32 = 0;
+ uint16_t int64 = 0;
+ uint16_t float32 = 0;
+ uint16_t float64 = 0;
+ for (size_t i = 0; i < locals_.size(); i++) {
+ if (locals_.at(i).param_) {
+ param++;
+ } else if (locals_.at(i).type_ == kAstI32) {
+ int32++;
+ } else if (locals_.at(i).type_ == kAstI64) {
+ int64++;
+ } else if (locals_.at(i).type_ == kAstF32) {
+ float32++;
+ } else if (locals_.at(i).type_ == kAstF64) {
+ float64++;
+ }
+ }
+ e->local_int32_count_ = int32;
+ e->local_int64_count_ = int64;
+ e->local_float32_count_ = float32;
+ e->local_float64_count_ = float64;
+ float64 = param + int32 + int64 + float32;
+ float32 = param + int32 + int64;
+ int64 = param + int32;
+ int32 = param;
+ param = 0;
+ for (size_t i = 0; i < locals_.size(); i++) {
+ if (locals_.at(i).param_) {
+ e->params_.push_back(locals_.at(i).type_);
+ var_index[i] = param++;
+ } else if (locals_.at(i).type_ == kAstI32) {
+ var_index[i] = int32++;
+ } else if (locals_.at(i).type_ == kAstI64) {
+ var_index[i] = int64++;
+ } else if (locals_.at(i).type_ == kAstF32) {
+ var_index[i] = float32++;
+ } else if (locals_.at(i).type_ == kAstF64) {
+ var_index[i] = float64++;
+ }
+ }
+}
+
+
+WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalType return_type,
+ bool exported, bool external)
+ : params_(zone),
+ exported_(exported),
+ external_(external),
+ body_(zone),
+ name_(zone) {}
+
+
+uint32_t WasmFunctionEncoder::HeaderSize() const {
+ uint32_t size = 3;
+ if (HasLocals()) size += 8;
+ if (!external_) size += 2;
+ if (HasName()) size += 4;
+ return size;
+}
+
+
+uint32_t WasmFunctionEncoder::BodySize(void) const {
+ return external_ ? 0 : static_cast<uint32_t>(body_.size());
+}
+
+
+uint32_t WasmFunctionEncoder::NameSize() const {
+ return exported_ ? static_cast<uint32_t>(name_.size()) : 0;
+}
+
+
+void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
+ byte** body) const {
+ uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
+ (external_ ? kDeclFunctionImport : 0) |
+ (HasLocals() ? kDeclFunctionLocals : 0) |
+ (HasName() ? kDeclFunctionName : 0);
+
+ EmitUint8(header, decl_bits);
+ EmitUint16(header, signature_index_);
+
+ if (HasName()) {
+ uint32_t name_offset = static_cast<uint32_t>(*body - buffer);
+ EmitUint32(header, name_offset);
+ std::memcpy(*body, &name_[0], name_.size());
+ (*body) += name_.size();
+ }
+
+ if (HasLocals()) {
+ EmitUint16(header, local_int32_count_);
+ EmitUint16(header, local_int64_count_);
+ EmitUint16(header, local_float32_count_);
+ EmitUint16(header, local_float64_count_);
+ }
+
+ if (!external_) {
+ EmitUint16(header, static_cast<uint16_t>(body_.size()));
+ if (body_.size() > 0) {
+ std::memcpy(*header, &body_[0], body_.size());
+ (*header) += body_.size();
+ }
+ }
+}
+
+
+WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
+ uint32_t size, uint32_t dest)
+ : data_(zone), dest_(dest) {
+ for (size_t i = 0; i < size; i++) {
+ data_.push_back(data[i]);
+ }
+}
+
+
+uint32_t WasmDataSegmentEncoder::HeaderSize() const {
+ static const int kDataSegmentSize = 13;
+ return kDataSegmentSize;
+}
+
+
+uint32_t WasmDataSegmentEncoder::BodySize() const {
+ return static_cast<uint32_t>(data_.size());
+}
+
+
+void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
+ byte** body) const {
+ uint32_t body_offset = static_cast<uint32_t>(*body - buffer);
+ EmitUint32(header, dest_);
+ EmitUint32(header, body_offset);
+ EmitUint32(header, static_cast<uint32_t>(data_.size()));
+ EmitUint8(header, 1); // init
+
+ std::memcpy(*body, &data_[0], data_.size());
+ (*body) += data_.size();
+}
+
+
+WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
+ : zone_(zone),
+ signatures_(zone),
+ functions_(zone),
+ data_segments_(zone),
+ indirect_functions_(zone),
+ globals_(zone),
+ signature_map_(zone) {}
+
+
+uint16_t WasmModuleBuilder::AddFunction() {
+ functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
+ return static_cast<uint16_t>(functions_.size() - 1);
+}
+
+
+WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
+ if (functions_.size() > index) {
+ return functions_.at(index);
+ } else {
+ return nullptr;
+ }
+}
+
+
+void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
+ data_segments_.push_back(data);
+}
+
+
+int WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
+ FunctionSig* b) const {
+ if (a->return_count() < b->return_count()) return -1;
+ if (a->return_count() > b->return_count()) return 1;
+ if (a->parameter_count() < b->parameter_count()) return -1;
+ if (a->parameter_count() > b->parameter_count()) return 1;
+ for (size_t r = 0; r < a->return_count(); r++) {
+ if (a->GetReturn(r) < b->GetReturn(r)) return -1;
+ if (a->GetReturn(r) > b->GetReturn(r)) return 1;
+ }
+ for (size_t p = 0; p < a->parameter_count(); p++) {
+ if (a->GetParam(p) < b->GetParam(p)) return -1;
+ if (a->GetParam(p) > b->GetParam(p)) return 1;
+ }
+ return 0;
+}
+
+
+uint16_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
+ SignatureMap::iterator pos = signature_map_.find(sig);
+ if (pos != signature_map_.end()) {
+ return pos->second;
+ } else {
+ uint16_t index = static_cast<uint16_t>(signatures_.size());
+ signature_map_[sig] = index;
+ signatures_.push_back(sig);
+ return index;
+ }
+}
+
+
+void WasmModuleBuilder::AddIndirectFunction(uint16_t index) {
+ indirect_functions_.push_back(index);
+}
+
+
+WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
+ WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
+ for (auto function : functions_) {
+ writer->functions_.push_back(function->Build(zone, this));
+ }
+ for (auto segment : data_segments_) {
+ writer->data_segments_.push_back(segment);
+ }
+ for (auto sig : signatures_) {
+ writer->signatures_.push_back(sig);
+ }
+ for (auto index : indirect_functions_) {
+ writer->indirect_functions_.push_back(index);
+ }
+ for (auto global : globals_) {
+ writer->globals_.push_back(global);
+ }
+ return writer;
+}
+
+
+uint32_t WasmModuleBuilder::AddGlobal(MachineType type, bool exported) {
+ globals_.push_back(std::make_pair(type, exported));
+ return static_cast<uint32_t>(globals_.size() - 1);
+}
+
+
+WasmModuleWriter::WasmModuleWriter(Zone* zone)
+ : functions_(zone),
+ data_segments_(zone),
+ signatures_(zone),
+ indirect_functions_(zone),
+ globals_(zone) {}
+
+
+struct Sizes {
+ size_t header_size;
+ size_t body_size;
+
+ size_t total() { return header_size + body_size; }
+
+ void Add(size_t header, size_t body) {
+ header_size += header;
+ body_size += body;
+ }
+
+ void AddSection(size_t size) {
+ if (size > 0) {
+ Add(1, 0);
+ while (size > 0) {
+ Add(1, 0);
+ size = size >> 7;
+ }
+ }
+ }
+};
+
+
+WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
+ Sizes sizes = {0, 0};
+
+ sizes.Add(1, 0);
+ sizes.Add(kDeclMemorySize, 0);
+
+ sizes.AddSection(signatures_.size());
+ for (auto sig : signatures_) {
+ sizes.Add(2 + sig->parameter_count(), 0);
+ }
+
+ sizes.AddSection(globals_.size());
+ if (globals_.size() > 0) {
+ sizes.Add(kDeclGlobalSize * globals_.size(), 0);
+ }
+
+ sizes.AddSection(functions_.size());
+ for (auto function : functions_) {
+ sizes.Add(function->HeaderSize() + function->BodySize(),
+ function->NameSize());
+ }
+
+ sizes.AddSection(data_segments_.size());
+ for (auto segment : data_segments_) {
+ sizes.Add(segment->HeaderSize(), segment->BodySize());
+ }
+
+ sizes.AddSection(indirect_functions_.size());
+ sizes.Add(2 * static_cast<uint32_t>(indirect_functions_.size()), 0);
+
+ if (sizes.body_size > 0) sizes.Add(1, 0);
+
+ ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + sizes.header_size;
+
+ // -- emit memory declaration ------------------------------------------------
+ EmitUint8(&header, kDeclMemory);
+ EmitUint8(&header, 16); // min memory size
+ EmitUint8(&header, 16); // max memory size
+ EmitUint8(&header, 0); // memory export
+
+ // -- emit globals -----------------------------------------------------------
+ if (globals_.size() > 0) {
+ EmitUint8(&header, kDeclGlobals);
+ EmitVarInt(&header, globals_.size());
+
+ for (auto global : globals_) {
+ EmitUint32(&header, 0);
+ EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
+ EmitUint8(&header, global.second);
+ }
+ }
+
+ // -- emit signatures --------------------------------------------------------
+ if (signatures_.size() > 0) {
+ EmitUint8(&header, kDeclSignatures);
+ EmitVarInt(&header, signatures_.size());
+
+ for (FunctionSig* sig : signatures_) {
+ EmitUint8(&header, static_cast<byte>(sig->parameter_count()));
+ if (sig->return_count() > 0) {
+ EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
+ } else {
+ EmitUint8(&header, kLocalVoid);
+ }
+ for (size_t j = 0; j < sig->parameter_count(); j++) {
+ EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+ }
+ }
+ }
+
+ // -- emit functions ---------------------------------------------------------
+ if (functions_.size() > 0) {
+ EmitUint8(&header, kDeclFunctions);
+ EmitVarInt(&header, functions_.size());
+
+ for (auto func : functions_) {
+ func->Serialize(buffer, &header, &body);
+ }
+ }
+
+ // -- emit data segments -----------------------------------------------------
+ if (data_segments_.size() > 0) {
+ EmitUint8(&header, kDeclDataSegments);
+ EmitVarInt(&header, data_segments_.size());
+
+ for (auto segment : data_segments_) {
+ segment->Serialize(buffer, &header, &body);
+ }
+ }
+
+ // -- emit function table ----------------------------------------------------
+ if (indirect_functions_.size() > 0) {
+ EmitUint8(&header, kDeclFunctionTable);
+ EmitVarInt(&header, indirect_functions_.size());
+
+ for (auto index : indirect_functions_) {
+ EmitUint16(&header, index);
+ }
+ }
+
+ if (sizes.body_size > 0) EmitUint8(&header, kDeclEnd);
+
+ return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
+}
+
+
+std::vector<uint8_t> UnsignedLEB128From(uint32_t result) {
+ std::vector<uint8_t> output;
+ uint8_t next = 0;
+ int shift = 0;
+ do {
+ next = static_cast<uint8_t>(result >> shift);
+ if (((result >> shift) & 0xFFFFFF80) != 0) {
+ next = next | 0x80;
+ }
+ output.push_back(next);
+ shift += 7;
+ } while ((next & 0x80) != 0);
+ return output;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/encoder.h b/deps/v8/src/wasm/encoder.h
new file mode 100644
index 0000000000..f0fabe998a
--- /dev/null
+++ b/deps/v8/src/wasm/encoder.h
@@ -0,0 +1,157 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_ENCODER_H_
+#define V8_WASM_ENCODER_H_
+
+#include "src/signature.h"
+#include "src/zone-containers.h"
+
+#include "src/base/smart-pointers.h"
+
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleBuilder;
+
+class WasmFunctionEncoder : public ZoneObject {
+ public:
+ uint32_t HeaderSize() const;
+ uint32_t BodySize() const;
+ uint32_t NameSize() const;
+ void Serialize(byte* buffer, byte** header, byte** body) const;
+
+ private:
+ WasmFunctionEncoder(Zone* zone, LocalType return_type, bool exported,
+ bool external);
+ friend class WasmFunctionBuilder;
+ uint16_t signature_index_;
+ ZoneVector<LocalType> params_;
+ uint16_t local_int32_count_;
+ uint16_t local_int64_count_;
+ uint16_t local_float32_count_;
+ uint16_t local_float64_count_;
+ bool exported_;
+ bool external_;
+ ZoneVector<uint8_t> body_;
+ ZoneVector<char> name_;
+
+ bool HasLocals() const {
+ return (local_int32_count_ + local_int64_count_ + local_float32_count_ +
+ local_float64_count_) > 0;
+ }
+
+ bool HasName() const { return exported_ && name_.size() > 0; }
+};
+
+class WasmFunctionBuilder : public ZoneObject {
+ public:
+ uint16_t AddParam(LocalType type);
+ uint16_t AddLocal(LocalType type);
+ void ReturnType(LocalType type);
+ void EmitCode(const byte* code, uint32_t code_size);
+ void EmitCode(const byte* code, uint32_t code_size,
+ const uint32_t* local_indices, uint32_t indices_size);
+ void Emit(WasmOpcode opcode);
+ void EmitWithU8(WasmOpcode opcode, const byte immediate);
+ void EmitWithLocal(WasmOpcode opcode);
+ uint32_t EmitEditableImmediate(const byte immediate);
+ void EditImmediate(uint32_t offset, const byte immediate);
+ void Exported(uint8_t flag);
+ void External(uint8_t flag);
+ void SetName(const unsigned char* name, int name_length);
+ WasmFunctionEncoder* Build(Zone* zone, WasmModuleBuilder* mb) const;
+
+ private:
+ explicit WasmFunctionBuilder(Zone* zone);
+ friend class WasmModuleBuilder;
+ LocalType return_type_;
+ struct Type;
+ ZoneVector<Type> locals_;
+ uint8_t exported_;
+ uint8_t external_;
+ ZoneVector<uint8_t> body_;
+ ZoneVector<uint32_t> local_indices_;
+ ZoneVector<char> name_;
+ uint16_t AddVar(LocalType type, bool param);
+ void IndexVars(WasmFunctionEncoder* e, uint16_t* var_index) const;
+};
+
+class WasmDataSegmentEncoder : public ZoneObject {
+ public:
+ WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
+ uint32_t dest);
+ uint32_t HeaderSize() const;
+ uint32_t BodySize() const;
+ void Serialize(byte* buffer, byte** header, byte** body) const;
+
+ private:
+ ZoneVector<byte> data_;
+ uint32_t dest_;
+};
+
+class WasmModuleIndex : public ZoneObject {
+ public:
+ const byte* Begin() const { return begin_; }
+ const byte* End() const { return end_; }
+
+ private:
+ friend class WasmModuleWriter;
+ WasmModuleIndex(const byte* begin, const byte* end)
+ : begin_(begin), end_(end) {}
+ const byte* begin_;
+ const byte* end_;
+};
+
+class WasmModuleWriter : public ZoneObject {
+ public:
+ WasmModuleIndex* WriteTo(Zone* zone) const;
+
+ private:
+ friend class WasmModuleBuilder;
+ explicit WasmModuleWriter(Zone* zone);
+ ZoneVector<WasmFunctionEncoder*> functions_;
+ ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<uint16_t> indirect_functions_;
+ ZoneVector<std::pair<MachineType, bool>> globals_;
+};
+
+class WasmModuleBuilder : public ZoneObject {
+ public:
+ explicit WasmModuleBuilder(Zone* zone);
+ uint16_t AddFunction();
+ uint32_t AddGlobal(MachineType type, bool exported);
+ WasmFunctionBuilder* FunctionAt(size_t index);
+ void AddDataSegment(WasmDataSegmentEncoder* data);
+ uint16_t AddSignature(FunctionSig* sig);
+ void AddIndirectFunction(uint16_t index);
+ WasmModuleWriter* Build(Zone* zone);
+
+ private:
+ struct CompareFunctionSigs {
+ int operator()(FunctionSig* a, FunctionSig* b) const;
+ };
+ typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
+
+ Zone* zone_;
+ ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<WasmFunctionBuilder*> functions_;
+ ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<uint16_t> indirect_functions_;
+ ZoneVector<std::pair<MachineType, bool>> globals_;
+ SignatureMap signature_map_;
+};
+
+std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_ENCODER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
new file mode 100644
index 0000000000..24f39822f9
--- /dev/null
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -0,0 +1,547 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/module-decoder.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+
+// The main logic for decoding the bytes of a module.
+class ModuleDecoder : public Decoder {
+ public:
+ ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
+ bool asm_js)
+ : Decoder(module_start, module_end), module_zone(zone), asm_js_(asm_js) {
+ result_.start = start_;
+ if (limit_ < start_) {
+ error(start_, "end is less than start");
+ limit_ = start_;
+ }
+ }
+
+ virtual void onFirstError() {
+ pc_ = limit_; // On error, terminate section decoding loop.
+ }
+
+ // Decodes an entire module.
+ ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
+ pc_ = start_;
+ module->module_start = start_;
+ module->module_end = limit_;
+ module->min_mem_size_log2 = 0;
+ module->max_mem_size_log2 = 0;
+ module->mem_export = false;
+ module->mem_external = false;
+ module->globals = new std::vector<WasmGlobal>();
+ module->signatures = new std::vector<FunctionSig*>();
+ module->functions = new std::vector<WasmFunction>();
+ module->data_segments = new std::vector<WasmDataSegment>();
+ module->function_table = new std::vector<uint16_t>();
+
+ bool sections[kMaxModuleSectionCode];
+ memset(sections, 0, sizeof(sections));
+
+ // Decode the module sections.
+ while (pc_ < limit_) {
+ TRACE("DecodeSection\n");
+ WasmSectionDeclCode section =
+ static_cast<WasmSectionDeclCode>(u8("section"));
+ // Each section should appear at most once.
+ if (section < kMaxModuleSectionCode) {
+ CheckForPreviousSection(sections, section, false);
+ sections[section] = true;
+ }
+
+ switch (section) {
+ case kDeclEnd:
+ // Terminate section decoding.
+ limit_ = pc_;
+ break;
+ case kDeclMemory:
+ module->min_mem_size_log2 = u8("min memory");
+ module->max_mem_size_log2 = u8("max memory");
+ module->mem_export = u8("export memory") != 0;
+ break;
+ case kDeclSignatures: {
+ int length;
+ uint32_t signatures_count = u32v(&length, "signatures count");
+ module->signatures->reserve(SafeReserve(signatures_count));
+ // Decode signatures.
+ for (uint32_t i = 0; i < signatures_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ FunctionSig* s = sig(); // read function sig.
+ module->signatures->push_back(s);
+ }
+ break;
+ }
+ case kDeclFunctions: {
+ // Functions require a signature table first.
+ CheckForPreviousSection(sections, kDeclSignatures, true);
+ int length;
+ uint32_t functions_count = u32v(&length, "functions count");
+ module->functions->reserve(SafeReserve(functions_count));
+ // Set up module environment for verification.
+ ModuleEnv menv;
+ menv.module = module;
+ menv.globals_area = 0;
+ menv.mem_start = 0;
+ menv.mem_end = 0;
+ menv.function_code = nullptr;
+ menv.asm_js = asm_js_;
+ // Decode functions.
+ for (uint32_t i = 0; i < functions_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeFunction[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->functions->push_back(
+ {nullptr, 0, 0, 0, 0, 0, 0, false, false});
+ WasmFunction* function = &module->functions->back();
+ DecodeFunctionInModule(module, function, false);
+ }
+ if (ok() && verify_functions) {
+ for (uint32_t i = 0; i < functions_count; i++) {
+ if (failed()) break;
+ WasmFunction* function = &module->functions->at(i);
+ if (!function->external) {
+ VerifyFunctionBody(i, &menv, function);
+ if (result_.failed())
+ error(result_.error_pc, result_.error_msg.get());
+ }
+ }
+ }
+ break;
+ }
+ case kDeclGlobals: {
+ int length;
+ uint32_t globals_count = u32v(&length, "globals count");
+ module->globals->reserve(SafeReserve(globals_count));
+ // Decode globals.
+ for (uint32_t i = 0; i < globals_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeGlobal[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->globals->push_back({0, MachineType::Int32(), 0, false});
+ WasmGlobal* global = &module->globals->back();
+ DecodeGlobalInModule(global);
+ }
+ break;
+ }
+ case kDeclDataSegments: {
+ int length;
+ uint32_t data_segments_count = u32v(&length, "data segments count");
+ module->data_segments->reserve(SafeReserve(data_segments_count));
+ // Decode data segments.
+ for (uint32_t i = 0; i < data_segments_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeDataSegment[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->data_segments->push_back({0, 0, 0});
+ WasmDataSegment* segment = &module->data_segments->back();
+ DecodeDataSegmentInModule(segment);
+ }
+ break;
+ }
+ case kDeclFunctionTable: {
+ // An indirect function table requires functions first.
+ CheckForPreviousSection(sections, kDeclFunctions, true);
+ int length;
+ uint32_t function_table_count = u32v(&length, "function table count");
+ module->function_table->reserve(SafeReserve(function_table_count));
+ // Decode function table.
+ for (uint32_t i = 0; i < function_table_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeFunctionTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ uint16_t index = u16();
+ if (index >= module->functions->size()) {
+ error(pc_ - 2, "invalid function index");
+ break;
+ }
+ module->function_table->push_back(index);
+ }
+ break;
+ }
+ case kDeclWLL: {
+ // Reserved for experimentation by the Web Low-level Language project
+ // which is augmenting the binary encoding with source code meta
+ // information. This section does not affect the semantics of the code
+ // and can be ignored by the runtime. https://github.com/JSStats/wll
+ int length = 0;
+ uint32_t section_size = u32v(&length, "section size");
+ if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
+ error(pc_ - length, "invalid section size");
+ break;
+ }
+ pc_ += section_size;
+ break;
+ }
+ default:
+ error(pc_ - 1, nullptr, "unrecognized section 0x%02x", section);
+ break;
+ }
+ }
+
+ return toResult(module);
+ }
+
+ uint32_t SafeReserve(uint32_t count) {
+ // Avoid OOM by only reserving up to a certain size.
+ const uint32_t kMaxReserve = 20000;
+ return count < kMaxReserve ? count : kMaxReserve;
+ }
+
+ void CheckForPreviousSection(bool* sections, WasmSectionDeclCode section,
+ bool present) {
+ if (section >= kMaxModuleSectionCode) return;
+ if (sections[section] == present) return;
+ const char* name = "";
+ switch (section) {
+ case kDeclMemory:
+ name = "memory";
+ break;
+ case kDeclSignatures:
+ name = "signatures";
+ break;
+ case kDeclFunctions:
+ name = "function declaration";
+ break;
+ case kDeclGlobals:
+ name = "global variable";
+ break;
+ case kDeclDataSegments:
+ name = "data segment";
+ break;
+ case kDeclFunctionTable:
+ name = "function table";
+ break;
+ default:
+ name = "";
+ break;
+ }
+ if (present) {
+ error(pc_ - 1, nullptr, "required %s section missing", name);
+ } else {
+ error(pc_ - 1, nullptr, "%s section already present", name);
+ }
+ }
+
+ // Decodes a single anonymous function starting at {start_}.
+ FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
+ WasmFunction* function) {
+ pc_ = start_;
+ function->sig = sig(); // read signature
+ function->name_offset = 0; // ---- name
+ function->code_start_offset = off(pc_ + 8); // ---- code start
+ function->code_end_offset = off(limit_); // ---- code end
+ function->local_int32_count = u16(); // read u16
+ function->local_int64_count = u16(); // read u16
+ function->local_float32_count = u16(); // read u16
+ function->local_float64_count = u16(); // read u16
+ function->exported = false; // ---- exported
+ function->external = false; // ---- external
+
+ if (ok()) VerifyFunctionBody(0, module_env, function);
+
+ FunctionResult result;
+ result.CopyFrom(result_); // Copy error code and location.
+ result.val = function;
+ return result;
+ }
+
+ // Decodes a single function signature at {start}.
+ FunctionSig* DecodeFunctionSignature(const byte* start) {
+ pc_ = start;
+ FunctionSig* result = sig();
+ return ok() ? result : nullptr;
+ }
+
+ private:
+ Zone* module_zone;
+ ModuleResult result_;
+ bool asm_js_;
+
+ uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
+
+ // Decodes a single global entry inside a module starting at {pc_}.
+ void DecodeGlobalInModule(WasmGlobal* global) {
+ global->name_offset = string("global name");
+ global->type = mem_type();
+ global->offset = 0;
+ global->exported = u8("exported") != 0;
+ }
+
+ // Decodes a single function entry inside a module starting at {pc_}.
+ void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
+ bool verify_body = true) {
+ byte decl_bits = u8("function decl");
+
+ const byte* sigpos = pc_;
+ function->sig_index = u16("signature index");
+
+ if (function->sig_index >= module->signatures->size()) {
+ return error(sigpos, "invalid signature index");
+ } else {
+ function->sig = module->signatures->at(function->sig_index);
+ }
+
+ TRACE(" +%d <function attributes:%s%s%s%s%s>\n",
+ static_cast<int>(pc_ - start_),
+ decl_bits & kDeclFunctionName ? " name" : "",
+ decl_bits & kDeclFunctionImport ? " imported" : "",
+ decl_bits & kDeclFunctionLocals ? " locals" : "",
+ decl_bits & kDeclFunctionExport ? " exported" : "",
+ (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
+
+ if (decl_bits & kDeclFunctionName) {
+ function->name_offset = string("function name");
+ }
+
+ function->exported = decl_bits & kDeclFunctionExport;
+
+ // Imported functions have no locals or body.
+ if (decl_bits & kDeclFunctionImport) {
+ function->external = true;
+ return;
+ }
+
+ if (decl_bits & kDeclFunctionLocals) {
+ function->local_int32_count = u16("int32 count");
+ function->local_int64_count = u16("int64 count");
+ function->local_float32_count = u16("float32 count");
+ function->local_float64_count = u16("float64 count");
+ }
+
+ uint16_t size = u16("body size");
+ if (ok()) {
+ if ((pc_ + size) > limit_) {
+ return error(pc_, limit_,
+ "expected %d bytes for function body, fell off end", size);
+ }
+ function->code_start_offset = static_cast<uint32_t>(pc_ - start_);
+ function->code_end_offset = function->code_start_offset + size;
+ TRACE(" +%d %-20s: (%d bytes)\n", static_cast<int>(pc_ - start_),
+ "function body", size);
+ pc_ += size;
+ }
+ }
+
+ // Decodes a single data segment entry inside a module starting at {pc_}.
+ void DecodeDataSegmentInModule(WasmDataSegment* segment) {
+ segment->dest_addr =
+ u32("destination"); // TODO(titzer): check it's within the memory size.
+ segment->source_offset = offset("source offset");
+ segment->source_size =
+ u32("source size"); // TODO(titzer): check the size is reasonable.
+ segment->init = u8("init");
+ }
+
+ // Verifies the body (code) of a given function.
+ void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
+ WasmFunction* function) {
+ if (FLAG_trace_wasm_decode_time) {
+ // TODO(titzer): clean me up a bit.
+ OFStream os(stdout);
+ os << "Verifying WASM function:";
+ if (function->name_offset > 0) {
+ os << menv->module->GetName(function->name_offset);
+ }
+ os << std::endl;
+ }
+ FunctionEnv fenv;
+ fenv.module = menv;
+ fenv.sig = function->sig;
+ fenv.local_int32_count = function->local_int32_count;
+ fenv.local_int64_count = function->local_int64_count;
+ fenv.local_float32_count = function->local_float32_count;
+ fenv.local_float64_count = function->local_float64_count;
+ fenv.SumLocals();
+
+ TreeResult result =
+ VerifyWasmCode(&fenv, start_, start_ + function->code_start_offset,
+ start_ + function->code_end_offset);
+ if (result.failed()) {
+ // Wrap the error message from the function decoder.
+ std::ostringstream str;
+ str << "in function #" << func_num << ": ";
+ // TODO(titzer): add function name for the user?
+ str << result;
+ std::string strval = str.str();
+ const char* raw = strval.c_str();
+ size_t len = strlen(raw);
+ char* buffer = new char[len];
+ strncpy(buffer, raw, len);
+ buffer[len - 1] = 0;
+
+ // Copy error code and location.
+ result_.CopyFrom(result);
+ result_.error_msg.Reset(buffer);
+ }
+ }
+
+ // Reads a single 32-bit unsigned integer interpreted as an offset, checking
+ // the offset is within bounds and advances.
+ uint32_t offset(const char* name = nullptr) {
+ uint32_t offset = u32(name ? name : "offset");
+ if (offset > static_cast<uint32_t>(limit_ - start_)) {
+ error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
+ }
+ return offset;
+ }
+
+ // Reads a single 32-bit unsigned integer interpreted as an offset into the
+ // data and validating the string there and advances.
+ uint32_t string(const char* name = nullptr) {
+ return offset(name ? name : "string"); // TODO(titzer): validate string
+ }
+
+ // Reads a single 8-bit integer, interpreting it as a local type.
+ LocalType local_type() {
+ byte val = u8("local type");
+ LocalTypeCode t = static_cast<LocalTypeCode>(val);
+ switch (t) {
+ case kLocalVoid:
+ return kAstStmt;
+ case kLocalI32:
+ return kAstI32;
+ case kLocalI64:
+ return kAstI64;
+ case kLocalF32:
+ return kAstF32;
+ case kLocalF64:
+ return kAstF64;
+ default:
+ error(pc_ - 1, "invalid local type");
+ return kAstStmt;
+ }
+ }
+
+ // Reads a single 8-bit integer, interpreting it as a memory type.
+ MachineType mem_type() {
+ byte val = u8("memory type");
+ MemTypeCode t = static_cast<MemTypeCode>(val);
+ switch (t) {
+ case kMemI8:
+ return MachineType::Int8();
+ case kMemU8:
+ return MachineType::Uint8();
+ case kMemI16:
+ return MachineType::Int16();
+ case kMemU16:
+ return MachineType::Uint16();
+ case kMemI32:
+ return MachineType::Int32();
+ case kMemU32:
+ return MachineType::Uint32();
+ case kMemI64:
+ return MachineType::Int64();
+ case kMemU64:
+ return MachineType::Uint64();
+ case kMemF32:
+ return MachineType::Float32();
+ case kMemF64:
+ return MachineType::Float64();
+ default:
+ error(pc_ - 1, "invalid memory type");
+ return MachineType::None();
+ }
+ }
+
+ // Parses an inline function signature.
+ FunctionSig* sig() {
+ byte count = u8("param count");
+ LocalType ret = local_type();
+ FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
+ if (ret != kAstStmt) builder.AddReturn(ret);
+
+ for (int i = 0; i < count; i++) {
+ LocalType param = local_type();
+ if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
+ builder.AddParam(param);
+ }
+ return builder.Build();
+ }
+};
+
+
+// Helpers for nice error messages.
+class ModuleError : public ModuleResult {
+ public:
+ explicit ModuleError(const char* msg) {
+ error_code = kError;
+ size_t len = strlen(msg) + 1;
+ char* result = new char[len];
+ strncpy(result, msg, len);
+ result[len - 1] = 0;
+ error_msg.Reset(result);
+ }
+};
+
+
+// Helpers for nice error messages.
+class FunctionError : public FunctionResult {
+ public:
+ explicit FunctionError(const char* msg) {
+ error_code = kError;
+ size_t len = strlen(msg) + 1;
+ char* result = new char[len];
+ strncpy(result, msg, len);
+ result[len - 1] = 0;
+ error_msg.Reset(result);
+ }
+};
+
+
+ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, bool asm_js) {
+ size_t size = module_end - module_start;
+ if (module_start > module_end) return ModuleError("start > end");
+ if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+ WasmModule* module = new WasmModule();
+ ModuleDecoder decoder(zone, module_start, module_end, asm_js);
+ return decoder.DecodeModule(module, verify_functions);
+}
+
+
+FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+ const byte* end) {
+ ModuleDecoder decoder(zone, start, end, false);
+ return decoder.DecodeFunctionSignature(start);
+}
+
+
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
+ ModuleEnv* module_env,
+ const byte* function_start,
+ const byte* function_end) {
+ size_t size = function_end - function_start;
+ if (function_start > function_end) return FunctionError("start > end");
+ if (size > kMaxFunctionSize)
+ return FunctionError("size > maximum function size");
+ WasmFunction* function = new WasmFunction();
+ ModuleDecoder decoder(zone, function_start, function_end, false);
+ return decoder.DecodeSingleFunction(module_env, function);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
new file mode 100644
index 0000000000..3f469a500e
--- /dev/null
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_DECODER_H_
+#define V8_WASM_MODULE_DECODER_H_
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+// Decodes the bytes of a WASM module between {module_start} and {module_end}.
+ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, bool asm_js);
+
+// Exposed for testing. Decodes a single function signature, allocating it
+// in the given zone. Returns {nullptr} upon failure.
+FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+ const byte* end);
+
+// Decodes the bytes of a WASM function between
+// {function_start} and {function_end}.
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
+ const byte* function_start,
+ const byte* function_end);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_DECODER_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
new file mode 100644
index 0000000000..80d8bdb236
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -0,0 +1,345 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
+#include "src/api-natives.h"
+#include "src/assert-scope.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/factory.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/parsing/parser.h"
+#include "src/typing-asm.h"
+
+#include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/encoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+typedef uint8_t byte;
+
+using v8::internal::wasm::ErrorThrower;
+
+namespace v8 {
+
+namespace {
+struct RawBuffer {
+ const byte* start;
+ const byte* end;
+ size_t size() { return static_cast<size_t>(end - start); }
+};
+
+
+RawBuffer GetRawBufferArgument(
+ ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
+ thrower.Error("Argument 0 must be an array buffer");
+ return {nullptr, nullptr};
+ }
+ Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ // TODO(titzer): allow offsets into buffers, views, etc.
+
+ const byte* start = reinterpret_cast<const byte*>(contents.Data());
+ const byte* end = start + contents.ByteLength();
+
+ if (start == nullptr) {
+ thrower.Error("ArrayBuffer argument is empty");
+ }
+ return {start, end};
+}
+
+
+void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.verifyModule()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, true, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.verifyFunction()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ internal::wasm::FunctionResult result;
+ {
+ // Verification of a single function shouldn't allocate.
+ i::DisallowHeapAllocation no_allocation;
+ i::Zone zone;
+ result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
+ buffer.start, buffer.end);
+ }
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void CompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.compileRun()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ // Decode and pre-verify the functions before compiling and running.
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, true, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Compile and run!
+ int32_t retval = i::wasm::CompileAndRunWasmModule(isolate, result.val);
+ args.GetReturnValue().Set(retval);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(i::ParseInfo* info) {
+ info->set_global();
+ info->set_lazy(false);
+ info->set_allow_lazy_parsing(false);
+ info->set_toplevel(true);
+
+ if (!i::Compiler::ParseAndAnalyze(info)) {
+ return nullptr;
+ }
+
+ info->set_literal(
+ info->scope()->declarations()->at(0)->AsFunctionDeclaration()->fun());
+
+ v8::internal::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
+ info->literal());
+ if (!typer.Validate()) {
+ return nullptr;
+ }
+
+ auto module = v8::internal::wasm::AsmWasmBuilder(
+ info->isolate(), info->zone(), info->literal())
+ .Run();
+ return module;
+}
+
+
+void AsmCompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.asmCompileRun()");
+
+ if (args.Length() != 1) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+ if (!args[0]->IsString()) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+
+ i::Factory* factory = isolate->factory();
+ i::Zone zone;
+ Local<String> source = Local<String>::Cast(args[0]);
+ i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+ i::ParseInfo info(&zone, script);
+
+ auto module = TranslateAsmModule(&info);
+ if (module == nullptr) {
+ thrower.Error("Asm.js validation failed");
+ return;
+ }
+
+ int32_t result = v8::internal::wasm::CompileAndRunWasmModule(
+ isolate, module->Begin(), module->End(), true);
+ args.GetReturnValue().Set(result);
+}
+
+
+// TODO(aseemgarg): deal with arraybuffer and foreign functions
+void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
+
+ if (args.Length() != 1) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+ if (!args[0]->IsString()) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+
+ i::Factory* factory = isolate->factory();
+ i::Zone zone;
+ Local<String> source = Local<String>::Cast(args[0]);
+ i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+ i::ParseInfo info(&zone, script);
+
+ auto module = TranslateAsmModule(&info);
+ if (module == nullptr) {
+ thrower.Error("Asm.js validation failed");
+ return;
+ }
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, module->Begin(), module->End(), false, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.instantiateModule()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (buffer.start == nullptr) return;
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ Local<Object> obj = Local<Object>::Cast(args[2]);
+ i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+ memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+ }
+
+ // Decode but avoid a redundant pass over function bodies for verification.
+ // Verification will happen during compilation.
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, false, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> obj = Local<Object>::Cast(args[1]);
+ ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+ }
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
+ }
+
+ if (result.val) delete result.val;
+}
+} // namespace
+
+
+// TODO(titzer): we use the API to create the function template because the
+// internal guts are too ugly to replicate here.
+static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
+ FunctionCallback func) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ Local<FunctionTemplate> local = FunctionTemplate::New(isolate, func);
+ return v8::Utils::OpenHandle(*local);
+}
+
+
+namespace internal {
+static Handle<String> v8_str(Isolate* isolate, const char* str) {
+ return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+
+
+static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback func) {
+ Handle<String> name = v8_str(isolate, str);
+ Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ Handle<JSFunction> function =
+ ApiNatives::InstantiateFunction(temp).ToHandleChecked();
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ JSObject::AddProperty(object, name, function, attributes);
+}
+
+
+void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
+ // Setup wasm function map.
+ Handle<Context> context(global->native_context(), isolate);
+ InstallWasmFunctionMap(isolate, context);
+
+ // Bind the WASM object.
+ Factory* factory = isolate->factory();
+ Handle<String> name = v8_str(isolate, "_WASMEXP_");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons, Handle<Object>(context->initial_object_prototype(), isolate));
+ cons->shared()->set_instance_class_name(*name);
+ Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+ JSObject::AddProperty(global, name, wasm_object, attributes);
+
+ // Install functions on the WASM object.
+ InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
+ InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
+ InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
+ InstallFunc(isolate, wasm_object, "compileRun", CompileRun);
+ InstallFunc(isolate, wasm_object, "asmCompileRun", AsmCompileRun);
+ InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
+ InstantiateModuleFromAsm);
+}
+
+
+void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
+ if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
+ Handle<Map> wasm_function_map = isolate->factory()->NewMap(
+ JS_FUNCTION_TYPE, JSFunction::kSize + kPointerSize);
+ wasm_function_map->set_is_callable();
+ context->set_wasm_function_map(*wasm_function_map);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
new file mode 100644
index 0000000000..e7305aa164
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_JS_H_
+#define V8_WASM_JS_H_
+
+#ifndef V8_SHARED
+#include "src/allocation.h"
+#include "src/hashmap.h"
+#else
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#endif // !V8_SHARED
+
+namespace v8 {
+namespace internal {
+// Exposes a WASM API to JavaScript through the V8 API.
+class WasmJs {
+ public:
+ static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
+ static void InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context);
+};
+
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
new file mode 100644
index 0000000000..470804a73d
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -0,0 +1,265 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MACRO_GEN_H_
+#define V8_WASM_MACRO_GEN_H_
+
+#include "src/wasm/wasm-opcodes.h"
+
+// Convenience macros for building Wasm bytecode directly into a byte array.
+
+//------------------------------------------------------------------------------
+// Control.
+//------------------------------------------------------------------------------
+#define WASM_NOP kExprNop
+
+#define WASM_BLOCK(count, ...) kExprBlock, static_cast<byte>(count), __VA_ARGS__
+#define WASM_INFINITE_LOOP kExprLoop, 1, kExprBr, 0, kExprNop
+#define WASM_LOOP(count, ...) kExprLoop, static_cast<byte>(count), __VA_ARGS__
+#define WASM_IF(cond, tstmt) kExprIf, cond, tstmt
+#define WASM_IF_ELSE(cond, tstmt, fstmt) kExprIfElse, cond, tstmt, fstmt
+#define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
+#define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define WASM_BR_IF(depth, cond) \
+ kExprBrIf, static_cast<byte>(depth), cond, kExprNop
+#define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
+#define WASM_BRV_IF(depth, cond, val) \
+ kExprBrIf, static_cast<byte>(depth), cond, val
+#define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
+#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
+#define WASM_RETURN0 kExprReturn
+#define WASM_RETURN(...) kExprReturn, __VA_ARGS__
+#define WASM_UNREACHABLE kExprUnreachable
+
+#define WASM_TABLESWITCH_OP(case_count, table_count, ...) \
+ kExprTableSwitch, static_cast<byte>(case_count), \
+ static_cast<byte>(case_count >> 8), static_cast<byte>(table_count), \
+ static_cast<byte>(table_count >> 8), __VA_ARGS__
+
+#define WASM_TABLESWITCH_BODY0(key) key
+
+#define WASM_TABLESWITCH_BODY(key, ...) key, __VA_ARGS__
+
+#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
+#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
+
+//------------------------------------------------------------------------------
+// Misc expressions.
+//------------------------------------------------------------------------------
+#define WASM_ID(...) __VA_ARGS__
+#define WASM_ZERO kExprI8Const, 0
+#define WASM_ONE kExprI8Const, 1
+#define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
+#define WASM_I32(val) \
+ kExprI32Const, static_cast<byte>(val), static_cast<byte>(val >> 8), \
+ static_cast<byte>(val >> 16), static_cast<byte>(val >> 24)
+#define WASM_I64(val) \
+ kExprI64Const, static_cast<byte>(static_cast<uint64_t>(val)), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 8), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 16), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 24), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 32), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 40), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 48), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 56)
+#define WASM_F32(val) \
+ kExprF32Const, \
+ static_cast<byte>(bit_cast<int32_t>(static_cast<float>(val))), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 8), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 16), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 24)
+#define WASM_F64(val) \
+ kExprF64Const, static_cast<byte>(bit_cast<uint64_t>(val)), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 8), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 16), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 24), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 32), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 40), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 48), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
+#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
+#define WASM_SET_LOCAL(index, val) kExprSetLocal, static_cast<byte>(index), val
+#define WASM_LOAD_GLOBAL(index) kExprLoadGlobal, static_cast<byte>(index)
+#define WASM_STORE_GLOBAL(index, val) \
+ kExprStoreGlobal, static_cast<byte>(index), val
+#define WASM_LOAD_MEM(type, index) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index
+#define WASM_STORE_MEM(type, index, val) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index, val
+#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
+ static_cast<byte>(offset), index
+#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
+ static_cast<byte>(offset), index, val
+#define WASM_CALL_FUNCTION(index, ...) \
+ kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
+#define WASM_CALL_INDIRECT(index, func, ...) \
+ kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
+#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT0(index, func) \
+ kExprCallIndirect, static_cast<byte>(index), func
+#define WASM_NOT(x) kExprBoolNot, x
+
+//------------------------------------------------------------------------------
+// Constructs that are composed of multiple bytecodes.
+//------------------------------------------------------------------------------
+#define WASM_WHILE(x, y) kExprLoop, 1, kExprIf, x, kExprBr, 0, y
+#define WASM_INC_LOCAL(index) \
+ kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
+ static_cast<byte>(index), kExprI8Const, 1
+#define WASM_INC_LOCAL_BY(index, count) \
+ kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
+ static_cast<byte>(index), kExprI8Const, static_cast<int8_t>(count)
+
+#define WASM_UNOP(opcode, x) static_cast<byte>(opcode), x
+#define WASM_BINOP(opcode, x, y) static_cast<byte>(opcode), x, y
+
+//------------------------------------------------------------------------------
+// Int32 operations
+//------------------------------------------------------------------------------
+#define WASM_I32_ADD(x, y) kExprI32Add, x, y
+#define WASM_I32_SUB(x, y) kExprI32Sub, x, y
+#define WASM_I32_MUL(x, y) kExprI32Mul, x, y
+#define WASM_I32_DIVS(x, y) kExprI32DivS, x, y
+#define WASM_I32_DIVU(x, y) kExprI32DivU, x, y
+#define WASM_I32_REMS(x, y) kExprI32RemS, x, y
+#define WASM_I32_REMU(x, y) kExprI32RemU, x, y
+#define WASM_I32_AND(x, y) kExprI32And, x, y
+#define WASM_I32_IOR(x, y) kExprI32Ior, x, y
+#define WASM_I32_XOR(x, y) kExprI32Xor, x, y
+#define WASM_I32_SHL(x, y) kExprI32Shl, x, y
+#define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
+#define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
+#define WASM_I32_EQ(x, y) kExprI32Eq, x, y
+#define WASM_I32_NE(x, y) kExprI32Ne, x, y
+#define WASM_I32_LTS(x, y) kExprI32LtS, x, y
+#define WASM_I32_LES(x, y) kExprI32LeS, x, y
+#define WASM_I32_LTU(x, y) kExprI32LtU, x, y
+#define WASM_I32_LEU(x, y) kExprI32LeU, x, y
+#define WASM_I32_GTS(x, y) kExprI32GtS, x, y
+#define WASM_I32_GES(x, y) kExprI32GeS, x, y
+#define WASM_I32_GTU(x, y) kExprI32GtU, x, y
+#define WASM_I32_GEU(x, y) kExprI32GeU, x, y
+#define WASM_I32_CLZ(x) kExprI32Clz, x
+#define WASM_I32_CTZ(x) kExprI32Ctz, x
+#define WASM_I32_POPCNT(x) kExprI32Popcnt, x
+
+//------------------------------------------------------------------------------
+// Int64 operations
+//------------------------------------------------------------------------------
+#define WASM_I64_ADD(x, y) kExprI64Add, x, y
+#define WASM_I64_SUB(x, y) kExprI64Sub, x, y
+#define WASM_I64_MUL(x, y) kExprI64Mul, x, y
+#define WASM_I64_DIVS(x, y) kExprI64DivS, x, y
+#define WASM_I64_DIVU(x, y) kExprI64DivU, x, y
+#define WASM_I64_REMS(x, y) kExprI64RemS, x, y
+#define WASM_I64_REMU(x, y) kExprI64RemU, x, y
+#define WASM_I64_AND(x, y) kExprI64And, x, y
+#define WASM_I64_IOR(x, y) kExprI64Ior, x, y
+#define WASM_I64_XOR(x, y) kExprI64Xor, x, y
+#define WASM_I64_SHL(x, y) kExprI64Shl, x, y
+#define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
+#define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
+#define WASM_I64_EQ(x, y) kExprI64Eq, x, y
+#define WASM_I64_NE(x, y) kExprI64Ne, x, y
+#define WASM_I64_LTS(x, y) kExprI64LtS, x, y
+#define WASM_I64_LES(x, y) kExprI64LeS, x, y
+#define WASM_I64_LTU(x, y) kExprI64LtU, x, y
+#define WASM_I64_LEU(x, y) kExprI64LeU, x, y
+#define WASM_I64_GTS(x, y) kExprI64GtS, x, y
+#define WASM_I64_GES(x, y) kExprI64GeS, x, y
+#define WASM_I64_GTU(x, y) kExprI64GtU, x, y
+#define WASM_I64_GEU(x, y) kExprI64GeU, x, y
+#define WASM_I64_CLZ(x) kExprI64Clz, x
+#define WASM_I64_CTZ(x) kExprI64Ctz, x
+#define WASM_I64_POPCNT(x) kExprI64Popcnt, x
+
+//------------------------------------------------------------------------------
+// Float32 operations
+//------------------------------------------------------------------------------
+#define WASM_F32_ADD(x, y) kExprF32Add, x, y
+#define WASM_F32_SUB(x, y) kExprF32Sub, x, y
+#define WASM_F32_MUL(x, y) kExprF32Mul, x, y
+#define WASM_F32_DIV(x, y) kExprF32Div, x, y
+#define WASM_F32_MIN(x, y) kExprF32Min, x, y
+#define WASM_F32_MAX(x, y) kExprF32Max, x, y
+#define WASM_F32_ABS(x) kExprF32Abs, x
+#define WASM_F32_NEG(x) kExprF32Neg, x
+#define WASM_F32_COPYSIGN(x, y) kExprF32CopySign, x, y
+#define WASM_F32_CEIL(x) kExprF32Ceil, x
+#define WASM_F32_FLOOR(x) kExprF32Floor, x
+#define WASM_F32_TRUNC(x) kExprF32Trunc, x
+#define WASM_F32_NEARESTINT(x) kExprF32NearestInt, x
+#define WASM_F32_SQRT(x) kExprF32Sqrt, x
+#define WASM_F32_EQ(x, y) kExprF32Eq, x, y
+#define WASM_F32_NE(x, y) kExprF32Ne, x, y
+#define WASM_F32_LT(x, y) kExprF32Lt, x, y
+#define WASM_F32_LE(x, y) kExprF32Le, x, y
+#define WASM_F32_GT(x, y) kExprF32Gt, x, y
+#define WASM_F32_GE(x, y) kExprF32Ge, x, y
+
+//------------------------------------------------------------------------------
+// Float64 operations
+//------------------------------------------------------------------------------
+#define WASM_F64_ADD(x, y) kExprF64Add, x, y
+#define WASM_F64_SUB(x, y) kExprF64Sub, x, y
+#define WASM_F64_MUL(x, y) kExprF64Mul, x, y
+#define WASM_F64_DIV(x, y) kExprF64Div, x, y
+#define WASM_F64_MIN(x, y) kExprF64Min, x, y
+#define WASM_F64_MAX(x, y) kExprF64Max, x, y
+#define WASM_F64_ABS(x) kExprF64Abs, x
+#define WASM_F64_NEG(x) kExprF64Neg, x
+#define WASM_F64_COPYSIGN(x, y) kExprF64CopySign, x, y
+#define WASM_F64_CEIL(x) kExprF64Ceil, x
+#define WASM_F64_FLOOR(x) kExprF64Floor, x
+#define WASM_F64_TRUNC(x) kExprF64Trunc, x
+#define WASM_F64_NEARESTINT(x) kExprF64NearestInt, x
+#define WASM_F64_SQRT(x) kExprF64Sqrt, x
+#define WASM_F64_EQ(x, y) kExprF64Eq, x, y
+#define WASM_F64_NE(x, y) kExprF64Ne, x, y
+#define WASM_F64_LT(x, y) kExprF64Lt, x, y
+#define WASM_F64_LE(x, y) kExprF64Le, x, y
+#define WASM_F64_GT(x, y) kExprF64Gt, x, y
+#define WASM_F64_GE(x, y) kExprF64Ge, x, y
+
+//------------------------------------------------------------------------------
+// Type conversions.
+//------------------------------------------------------------------------------
+#define WASM_I32_SCONVERT_F32(x) kExprI32SConvertF32, x
+#define WASM_I32_SCONVERT_F64(x) kExprI32SConvertF64, x
+#define WASM_I32_UCONVERT_F32(x) kExprI32UConvertF32, x
+#define WASM_I32_UCONVERT_F64(x) kExprI32UConvertF64, x
+#define WASM_I32_CONVERT_I64(x) kExprI32ConvertI64, x
+#define WASM_I64_SCONVERT_F32(x) kExprI64SConvertF32, x
+#define WASM_I64_SCONVERT_F64(x) kExprI64SConvertF64, x
+#define WASM_I64_UCONVERT_F32(x) kExprI64UConvertF32, x
+#define WASM_I64_UCONVERT_F64(x) kExprI64UConvertF64, x
+#define WASM_I64_SCONVERT_I32(x) kExprI64SConvertI32, x
+#define WASM_I64_UCONVERT_I32(x) kExprI64UConvertI32, x
+#define WASM_F32_SCONVERT_I32(x) kExprF32SConvertI32, x
+#define WASM_F32_UCONVERT_I32(x) kExprF32UConvertI32, x
+#define WASM_F32_SCONVERT_I64(x) kExprF32SConvertI64, x
+#define WASM_F32_UCONVERT_I64(x) kExprF32UConvertI64, x
+#define WASM_F32_CONVERT_F64(x) kExprF32ConvertF64, x
+#define WASM_F32_REINTERPRET_I32(x) kExprF32ReinterpretI32, x
+#define WASM_F64_SCONVERT_I32(x) kExprF64SConvertI32, x
+#define WASM_F64_UCONVERT_I32(x) kExprF64UConvertI32, x
+#define WASM_F64_SCONVERT_I64(x) kExprF64SConvertI64, x
+#define WASM_F64_UCONVERT_I64(x) kExprF64UConvertI64, x
+#define WASM_F64_CONVERT_F32(x) kExprF64ConvertF32, x
+#define WASM_F64_REINTERPRET_I64(x) kExprF64ReinterpretI64, x
+#define WASM_I32_REINTERPRET_F32(x) kExprI32ReinterpretF32, x
+#define WASM_I64_REINTERPRET_F64(x) kExprI64ReinterpretF64, x
+
+#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
new file mode 100644
index 0000000000..fd2428080b
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -0,0 +1,511 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "src/simulator.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+ os << "WASM module with ";
+ os << (1 << module.min_mem_size_log2) << " min mem";
+ os << (1 << module.max_mem_size_log2) << " max mem";
+ if (module.functions) os << module.functions->size() << " functions";
+ if (module.globals) os << module.functions->size() << " globals";
+ if (module.data_segments) os << module.functions->size() << " data segments";
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+ os << "WASM function with signature ";
+
+ // TODO(titzer): factor out rendering of signatures.
+ if (function.sig->return_count() == 0) os << "v";
+ for (size_t i = 0; i < function.sig->return_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(function.sig->GetReturn(i));
+ }
+ os << "_";
+ if (function.sig->parameter_count() == 0) os << "v";
+ for (size_t i = 0; i < function.sig->parameter_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(function.sig->GetParam(i));
+ }
+ os << " locals: ";
+ if (function.local_int32_count)
+ os << function.local_int32_count << " int32s ";
+ if (function.local_int64_count)
+ os << function.local_int64_count << " int64s ";
+ if (function.local_float32_count)
+ os << function.local_float32_count << " float32s ";
+ if (function.local_float64_count)
+ os << function.local_float64_count << " float64s ";
+
+ os << " code bytes: "
+ << (function.code_end_offset - function.code_start_offset);
+ return os;
+}
+
+
+// A helper class for compiling multiple wasm functions that offers
+// placeholder code objects for calling functions that are not yet compiled.
+class WasmLinker {
+ public:
+ WasmLinker(Isolate* isolate, size_t size)
+ : isolate_(isolate), placeholder_code_(size), function_code_(size) {}
+
+ // Get the code object for a function, allocating a placeholder if it has
+ // not yet been compiled.
+ Handle<Code> GetFunctionCode(uint32_t index) {
+ DCHECK(index < function_code_.size());
+ if (function_code_[index].is_null()) {
+ // Create a placeholder code object and encode the corresponding index in
+ // the {constant_pool_offset} field of the code object.
+ // TODO(titzer): placeholder code objects are somewhat dangerous.
+ Handle<Code> self(nullptr, isolate_);
+ byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fake instructions.
+ CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr};
+ Handle<Code> code = isolate_->factory()->NewCode(
+ desc, Code::KindField::encode(Code::WASM_FUNCTION), self);
+ code->set_constant_pool_offset(index + kPlaceholderMarker);
+ placeholder_code_[index] = code;
+ function_code_[index] = code;
+ }
+ return function_code_[index];
+ }
+
+ void Finish(uint32_t index, Handle<Code> code) {
+ DCHECK(index < function_code_.size());
+ function_code_[index] = code;
+ }
+
+ void Link(Handle<FixedArray> function_table,
+ std::vector<uint16_t>* functions) {
+ for (size_t i = 0; i < function_code_.size(); i++) {
+ LinkFunction(function_code_[i]);
+ }
+ if (functions && !function_table.is_null()) {
+ int table_size = static_cast<int>(functions->size());
+ DCHECK_EQ(function_table->length(), table_size * 2);
+ for (int i = 0; i < table_size; i++) {
+ function_table->set(i + table_size, *function_code_[functions->at(i)]);
+ }
+ }
+ }
+
+ private:
+ static const int kPlaceholderMarker = 1000000000;
+
+ Isolate* isolate_;
+ std::vector<Handle<Code>> placeholder_code_;
+ std::vector<Handle<Code>> function_code_;
+
+ void LinkFunction(Handle<Code> code) {
+ bool modified = false;
+ int mode_mask = RelocInfo::kCodeTargetMask;
+ AllowDeferredHandleDereference embedding_raw_address;
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsCodeTarget(mode)) {
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION &&
+ target->constant_pool_offset() >= kPlaceholderMarker) {
+ // Patch direct calls to placeholder code objects.
+ uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+ CHECK(index < function_code_.size());
+ Handle<Code> new_target = function_code_[index];
+ if (target != *new_target) {
+ CHECK_EQ(*placeholder_code_[index], target);
+ it.rinfo()->set_target_address(new_target->instruction_start(),
+ SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ modified = true;
+ }
+ }
+ }
+ }
+ if (modified) {
+ Assembler::FlushICache(isolate_, code->instruction_start(),
+ code->instruction_size());
+ }
+ }
+};
+
+namespace {
+// Internal constants for the layout of the module object.
+const int kWasmModuleInternalFieldCount = 4;
+const int kWasmModuleFunctionTable = 0;
+const int kWasmModuleCodeTable = 1;
+const int kWasmMemArrayBuffer = 2;
+const int kWasmGlobalsArrayBuffer = 3;
+
+
+size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>* globals) {
+ uint32_t offset = 0;
+ if (!globals) return 0;
+ for (WasmGlobal& global : *globals) {
+ byte size = WasmOpcodes::MemSize(global.type);
+ offset = (offset + size - 1) & ~(size - 1); // align
+ global.offset = offset;
+ offset += size;
+ }
+ return offset;
+}
+
+
+void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
+ for (const WasmDataSegment& segment : *module->data_segments) {
+ if (!segment.init) continue;
+ CHECK_LT(segment.dest_addr, mem_size);
+ CHECK_LE(segment.source_size, mem_size);
+ CHECK_LE(segment.dest_addr + segment.source_size, mem_size);
+ byte* addr = mem_addr + segment.dest_addr;
+ memcpy(addr, module->module_start + segment.source_offset,
+ segment.source_size);
+ }
+}
+
+
+Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
+ if (!module->function_table || module->function_table->size() == 0) {
+ return Handle<FixedArray>::null();
+ }
+ int table_size = static_cast<int>(module->function_table->size());
+ Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
+ for (int i = 0; i < table_size; i++) {
+ WasmFunction* function =
+ &module->functions->at(module->function_table->at(i));
+ fixed->set(i, Smi::FromInt(function->sig_index));
+ }
+ return fixed;
+}
+
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, int size,
+ byte** backing_store) {
+ void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ if (!memory) return Handle<JSArrayBuffer>::null();
+ *backing_store = reinterpret_cast<byte*>(memory);
+
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ for (int i = 0; i < size; i++) {
+ DCHECK_EQ(0, (*backing_store)[i]);
+ }
+#endif
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, false, memory, size);
+ buffer->set_is_neuterable(false);
+ return buffer;
+}
+} // namespace
+
+
+WasmModule::WasmModule()
+ : globals(nullptr),
+ signatures(nullptr),
+ functions(nullptr),
+ data_segments(nullptr),
+ function_table(nullptr) {}
+
+
+WasmModule::~WasmModule() {
+ if (globals) delete globals;
+ if (signatures) delete signatures;
+ if (functions) delete functions;
+ if (data_segments) delete data_segments;
+ if (function_table) delete function_table;
+}
+
+
+// Instantiates a wasm module as a JSObject.
+// * allocates a backing store of {mem_size} bytes.
+// * installs a named property "memory" for that buffer if exported
+// * installs named properties on the object for exported functions
+// * compiles wasm code to machine code
+MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
+ Handle<JSObject> ffi,
+ Handle<JSArrayBuffer> memory) {
+ this->shared_isolate = isolate; // TODO(titzer): have a real shared isolate.
+ ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
+
+ Factory* factory = isolate->factory();
+ // Memory is bigger than maximum supported size.
+ if (memory.is_null() && min_mem_size_log2 > kMaxMemSize) {
+ thrower.Error("Out of memory: wasm memory too large");
+ return MaybeHandle<JSObject>();
+ }
+
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
+
+ //-------------------------------------------------------------------------
+ // Allocate the module object.
+ //-------------------------------------------------------------------------
+ Handle<JSObject> module = factory->NewJSObjectFromMap(map, TENURED);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+
+ //-------------------------------------------------------------------------
+ // Allocate the linear memory.
+ //-------------------------------------------------------------------------
+ uint32_t mem_size = 1 << min_mem_size_log2;
+ byte* mem_addr = nullptr;
+ Handle<JSArrayBuffer> mem_buffer;
+ if (!memory.is_null()) {
+ memory->set_is_neuterable(false);
+ mem_addr = reinterpret_cast<byte*>(memory->backing_store());
+ mem_size = memory->byte_length()->Number();
+ mem_buffer = memory;
+ } else {
+ mem_buffer = NewArrayBuffer(isolate, mem_size, &mem_addr);
+ if (!mem_addr) {
+ // Not enough space for backing store of memory
+ thrower.Error("Out of memory: wasm memory");
+ return MaybeHandle<JSObject>();
+ }
+ }
+
+ // Load initialized data segments.
+ LoadDataSegments(this, mem_addr, mem_size);
+
+ module->SetInternalField(kWasmMemArrayBuffer, *mem_buffer);
+
+ if (mem_export) {
+ // Export the memory as a named property.
+ Handle<String> name = factory->InternalizeUtf8String("memory");
+ JSObject::AddProperty(module, name, mem_buffer, READ_ONLY);
+ }
+
+ //-------------------------------------------------------------------------
+ // Allocate the globals area if necessary.
+ //-------------------------------------------------------------------------
+ size_t globals_size = AllocateGlobalsOffsets(globals);
+ byte* globals_addr = nullptr;
+ if (globals_size > 0) {
+ Handle<JSArrayBuffer> globals_buffer =
+ NewArrayBuffer(isolate, mem_size, &globals_addr);
+ if (!globals_addr) {
+ // Not enough space for backing store of globals.
+ thrower.Error("Out of memory: wasm globals");
+ return MaybeHandle<JSObject>();
+ }
+
+ module->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
+ } else {
+ module->SetInternalField(kWasmGlobalsArrayBuffer, Smi::FromInt(0));
+ }
+
+ //-------------------------------------------------------------------------
+ // Compile all functions in the module.
+ //-------------------------------------------------------------------------
+ int index = 0;
+ WasmLinker linker(isolate, functions->size());
+ ModuleEnv module_env;
+ module_env.module = this;
+ module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr);
+ module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr) + mem_size;
+ module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr);
+ module_env.linker = &linker;
+ module_env.function_code = nullptr;
+ module_env.function_table = BuildFunctionTable(isolate, this);
+ module_env.memory = memory;
+ module_env.context = isolate->native_context();
+ module_env.asm_js = false;
+
+ // First pass: compile each function and initialize the code table.
+ for (const WasmFunction& func : *functions) {
+ if (thrower.error()) break;
+
+ const char* cstr = GetName(func.name_offset);
+ Handle<String> name = factory->InternalizeUtf8String(cstr);
+ Handle<Code> code = Handle<Code>::null();
+ Handle<JSFunction> function = Handle<JSFunction>::null();
+ if (func.external) {
+ // Lookup external function in FFI object.
+ if (!ffi.is_null()) {
+ MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+ if (!result.is_null()) {
+ Handle<Object> obj = result.ToHandleChecked();
+ if (obj->IsJSFunction()) {
+ function = Handle<JSFunction>::cast(obj);
+ code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
+ function, index);
+ } else {
+ thrower.Error("FFI function #%d:%s is not a JSFunction.", index,
+ cstr);
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ thrower.Error("FFI function #%d:%s not found.", index, cstr);
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ thrower.Error("FFI table is not an object.");
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ // Compile the function.
+ code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func,
+ index);
+ if (code.is_null()) {
+ thrower.Error("Compilation of #%d:%s failed.", index, cstr);
+ return MaybeHandle<JSObject>();
+ }
+ if (func.exported) {
+ function = compiler::CompileJSToWasmWrapper(isolate, &module_env, name,
+ code, module, index);
+ }
+ }
+ if (!code.is_null()) {
+ // Install the code into the linker table.
+ linker.Finish(index, code);
+ code_table->set(index, *code);
+ }
+ if (func.exported) {
+ // Exported functions are installed as read-only properties on the module.
+ JSObject::AddProperty(module, name, function, READ_ONLY);
+ }
+ index++;
+ }
+
+ // Second pass: patch all direct call sites.
+ linker.Link(module_env.function_table, this->function_table);
+
+ module->SetInternalField(kWasmModuleFunctionTable, Smi::FromInt(0));
+ module->SetInternalField(kWasmModuleCodeTable, *code_table);
+ return module;
+}
+
+
+Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ if (linker) return linker->GetFunctionCode(index);
+ if (function_code) return function_code->at(index);
+ return Handle<Code>::null();
+}
+
+
+compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
+ uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ // Always make a direct call to whatever is in the table at that location.
+ // A wrapper will be generated for FFI calls.
+ WasmFunction* function = &module->functions->at(index);
+ return GetWasmCallDescriptor(zone, function->sig);
+}
+
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js) {
+ HandleScope scope(isolate);
+ Zone zone;
+ // Decode the module, but don't verify function bodies, since we'll
+ // be compiling them anyway.
+ ModuleResult result =
+ DecodeWasmModule(isolate, &zone, module_start, module_end, false, false);
+ if (result.failed()) {
+ // Module verification failed. throw.
+ std::ostringstream str;
+ str << "WASM.compileRun() failed: " << result;
+ isolate->Throw(
+ *isolate->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+ return -1;
+ }
+
+ int32_t retval = CompileAndRunWasmModule(isolate, result.val);
+ delete result.val;
+ return retval;
+}
+
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+
+ // Allocate temporary linear memory and globals.
+ size_t mem_size = 1 << module->min_mem_size_log2;
+ size_t globals_size = AllocateGlobalsOffsets(module->globals);
+
+ base::SmartArrayPointer<byte> mem_addr(new byte[mem_size]);
+ base::SmartArrayPointer<byte> globals_addr(new byte[globals_size]);
+
+ memset(mem_addr.get(), 0, mem_size);
+ memset(globals_addr.get(), 0, globals_size);
+
+ // Create module environment.
+ WasmLinker linker(isolate, module->functions->size());
+ ModuleEnv module_env;
+ module_env.module = module;
+ module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr.get());
+ module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr.get()) + mem_size;
+ module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr.get());
+ module_env.linker = &linker;
+ module_env.function_code = nullptr;
+ module_env.function_table = BuildFunctionTable(isolate, module);
+ module_env.asm_js = false;
+
+ // Load data segments.
+ // TODO(titzer): throw instead of crashing if segments don't fit in memory?
+ LoadDataSegments(module, mem_addr.get(), mem_size);
+
+ // Compile all functions.
+ Handle<Code> main_code = Handle<Code>::null(); // record last code.
+ int index = 0;
+ for (const WasmFunction& func : *module->functions) {
+ if (!func.external) {
+ // Compile the function and install it in the code table.
+ Handle<Code> code = compiler::CompileWasmFunction(
+ thrower, isolate, &module_env, func, index);
+ if (!code.is_null()) {
+ if (func.exported) main_code = code;
+ linker.Finish(index, code);
+ }
+ if (thrower.error()) return -1;
+ }
+ index++;
+ }
+
+ if (!main_code.is_null()) {
+ linker.Link(module_env.function_table, module->function_table);
+#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
+ // Run the main code on arm64 simulator.
+ Simulator* simulator = Simulator::current(isolate);
+ Simulator::CallArgument args[] = {Simulator::CallArgument(0),
+ Simulator::CallArgument::End()};
+ return static_cast<int32_t>(simulator->CallInt64(main_code->entry(), args));
+#elif USE_SIMULATOR
+ // Run the main code on simulator.
+ Simulator* simulator = Simulator::current(isolate);
+ return static_cast<int32_t>(
+ simulator->Call(main_code->entry(), 4, 0, 0, 0, 0));
+#else
+ // Run the main code as raw machine code.
+ int32_t (*raw_func)() = reinterpret_cast<int32_t (*)()>(
+ reinterpret_cast<uintptr_t>(main_code->entry()));
+ return raw_func();
+#endif
+ } else {
+ // No main code was found.
+ isolate->Throw(*isolate->factory()->NewStringFromStaticChars(
+ "WASM.compileRun() failed: no valid main code produced."));
+ }
+ return -1;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
new file mode 100644
index 0000000000..5e2ba58a44
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -0,0 +1,192 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_H_
+#define V8_WASM_MODULE_H_
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/api.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CallDescriptor;
+}
+
+namespace wasm {
+const size_t kMaxModuleSize = 1024 * 1024 * 1024;
+const size_t kMaxFunctionSize = 128 * 1024;
+const size_t kMaxStringSize = 256;
+
+enum WasmSectionDeclCode {
+ kDeclMemory = 0x00,
+ kDeclSignatures = 0x01,
+ kDeclFunctions = 0x02,
+ kDeclGlobals = 0x03,
+ kDeclDataSegments = 0x04,
+ kDeclFunctionTable = 0x05,
+ kDeclWLL = 0x11,
+ kDeclEnd = 0x06,
+};
+
+static const int kMaxModuleSectionCode = 6;
+
+enum WasmFunctionDeclBit {
+ kDeclFunctionName = 0x01,
+ kDeclFunctionImport = 0x02,
+ kDeclFunctionLocals = 0x04,
+ kDeclFunctionExport = 0x08
+};
+
+// Constants for fixed-size elements within a module.
+static const size_t kDeclMemorySize = 3;
+static const size_t kDeclGlobalSize = 6;
+static const size_t kDeclDataSegmentSize = 13;
+
+// Static representation of a wasm function.
+struct WasmFunction {
+ FunctionSig* sig; // signature of the function.
+ uint16_t sig_index; // index into the signature table.
+ uint32_t name_offset; // offset in the module bytes of the name, if any.
+ uint32_t code_start_offset; // offset in the module bytes of code start.
+ uint32_t code_end_offset; // offset in the module bytes of code end.
+ uint16_t local_int32_count; // number of int32 local variables.
+ uint16_t local_int64_count; // number of int64 local variables.
+ uint16_t local_float32_count; // number of float32 local variables.
+ uint16_t local_float64_count; // number of float64 local variables.
+ bool exported; // true if this function is exported.
+ bool external; // true if this function is externally supplied.
+};
+
+struct ModuleEnv; // forward declaration of decoder interface.
+
+// Static representation of a wasm global variable.
+struct WasmGlobal {
+ uint32_t name_offset; // offset in the module bytes of the name, if any.
+ MachineType type; // type of the global.
+ uint32_t offset; // offset from beginning of globals area.
+ bool exported; // true if this global is exported.
+};
+
+// Static representation of a wasm data segment.
+struct WasmDataSegment {
+ uint32_t dest_addr; // destination memory address of the data.
+ uint32_t source_offset; // start offset in the module bytes.
+ uint32_t source_size; // end offset in the module bytes.
+ bool init; // true if loaded upon instantiation.
+};
+
+// Static representation of a module.
+struct WasmModule {
+ static const uint8_t kMinMemSize = 12; // Minimum memory size = 4kb
+ static const uint8_t kMaxMemSize = 30; // Maximum memory size = 1gb
+
+ Isolate* shared_isolate; // isolate for storing shared code.
+ const byte* module_start; // starting address for the module bytes.
+ const byte* module_end; // end address for the module bytes.
+ uint8_t min_mem_size_log2; // minimum size of the memory (log base 2).
+ uint8_t max_mem_size_log2; // maximum size of the memory (log base 2).
+ bool mem_export; // true if the memory is exported.
+ bool mem_external; // true if the memory is external.
+
+ std::vector<WasmGlobal>* globals; // globals in this module.
+ std::vector<FunctionSig*>* signatures; // signatures in this module.
+ std::vector<WasmFunction>* functions; // functions in this module.
+ std::vector<WasmDataSegment>* data_segments; // data segments in this module.
+ std::vector<uint16_t>* function_table; // function table.
+
+ WasmModule();
+ ~WasmModule();
+
+ // Get a pointer to a string stored in the module bytes representing a name.
+ const char* GetName(uint32_t offset) {
+ CHECK(BoundsCheck(offset, offset + 1));
+ if (offset == 0) return "<?>"; // no name.
+ return reinterpret_cast<const char*>(module_start + offset);
+ }
+
+ // Checks the given offset range is contained within the module bytes.
+ bool BoundsCheck(uint32_t start, uint32_t end) {
+ size_t size = module_end - module_start;
+ return start < size && end < size;
+ }
+
+ // Creates a new instantiation of the module in the given isolate.
+ MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSObject> ffi,
+ Handle<JSArrayBuffer> memory);
+};
+
+// forward declaration.
+class WasmLinker;
+
+// Interface provided to the decoder/graph builder which contains only
+// minimal information about the globals, functions, and function tables.
+struct ModuleEnv {
+ uintptr_t globals_area; // address of the globals area.
+ uintptr_t mem_start; // address of the start of linear memory.
+ uintptr_t mem_end; // address of the end of linear memory.
+
+ WasmModule* module;
+ WasmLinker* linker;
+ std::vector<Handle<Code>>* function_code;
+ Handle<FixedArray> function_table;
+ Handle<JSArrayBuffer> memory;
+ Handle<Context> context;
+ bool asm_js; // true if the module originated from asm.js.
+
+ bool IsValidGlobal(uint32_t index) {
+ return module && index < module->globals->size();
+ }
+ bool IsValidFunction(uint32_t index) {
+ return module && index < module->functions->size();
+ }
+ bool IsValidSignature(uint32_t index) {
+ return module && index < module->signatures->size();
+ }
+ MachineType GetGlobalType(uint32_t index) {
+ DCHECK(IsValidGlobal(index));
+ return module->globals->at(index).type;
+ }
+ FunctionSig* GetFunctionSignature(uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ return module->functions->at(index).sig;
+ }
+ FunctionSig* GetSignature(uint32_t index) {
+ DCHECK(IsValidSignature(index));
+ return module->signatures->at(index);
+ }
+ size_t FunctionTableSize() {
+ return module ? module->function_table->size() : 0;
+ }
+
+ Handle<Code> GetFunctionCode(uint32_t index);
+ Handle<FixedArray> GetFunctionTable();
+
+ compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone, FunctionSig* sig);
+ compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
+};
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module);
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
+
+typedef Result<WasmModule*> ModuleResult;
+typedef Result<WasmFunction*> FunctionResult;
+
+// For testing. Decode, verify, and run the last exported function in the
+// given encoded module.
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js = false);
+
+// For testing. Decode, verify, and run the last exported function in the
+// given decoded module.
+int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_H_
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
new file mode 100644
index 0000000000..25eef034d7
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -0,0 +1,133 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+typedef Signature<LocalType> FunctionSig;
+
+const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return "Expr" #name;
+ FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+ default:
+ break;
+ }
+ return "Unknown";
+}
+
+
+#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
+
+
+enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
+
+
+// TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
+#define DECLARE_SIG(name, ...) \
+ static LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const FunctionSig kSig_##name( \
+ 1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
+
+FOREACH_SIGNATURE(DECLARE_SIG)
+
+#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
+
+static const FunctionSig* kSimpleExprSigs[] = {
+ nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+
+static byte kSimpleExprSigTable[256];
+
+
+// Initialize the signature table.
+static void InitSigTable() {
+#define SET_SIG_TABLE(name, opcode, sig) \
+ kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
+ FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
+#undef SET_SIG_TABLE
+}
+
+
+FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
+ // TODO(titzer): use LazyInstance to make this thread safe.
+ if (kSimpleExprSigTable[kExprI32Add] == 0) InitSigTable();
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+}
+
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+
+bool WasmOpcodes::IsSupported(WasmOpcode opcode) {
+#if !WASM_64
+ switch (opcode) {
+ // Opcodes not supported on 32-bit platforms.
+ case kExprI64Add:
+ case kExprI64Sub:
+ case kExprI64Mul:
+ case kExprI64DivS:
+ case kExprI64DivU:
+ case kExprI64RemS:
+ case kExprI64RemU:
+ case kExprI64And:
+ case kExprI64Ior:
+ case kExprI64Xor:
+ case kExprI64Shl:
+ case kExprI64ShrU:
+ case kExprI64ShrS:
+ case kExprI64Eq:
+ case kExprI64Ne:
+ case kExprI64LtS:
+ case kExprI64LeS:
+ case kExprI64LtU:
+ case kExprI64LeU:
+ case kExprI64GtS:
+ case kExprI64GeS:
+ case kExprI64GtU:
+ case kExprI64GeU:
+
+ case kExprI32ConvertI64:
+ case kExprI64SConvertI32:
+ case kExprI64UConvertI32:
+
+ case kExprF64ReinterpretI64:
+ case kExprI64ReinterpretF64:
+
+ case kExprI64Clz:
+ case kExprI64Ctz:
+ case kExprI64Popcnt:
+
+ case kExprF32SConvertI64:
+ case kExprF32UConvertI64:
+ case kExprF64SConvertI64:
+ case kExprF64UConvertI64:
+ case kExprI64SConvertF32:
+ case kExprI64SConvertF64:
+ case kExprI64UConvertF32:
+ case kExprI64UConvertF64:
+
+ return false;
+ default:
+ return true;
+ }
+#else
+ return true;
+#endif
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
new file mode 100644
index 0000000000..ae2843a6c1
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -0,0 +1,476 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OPCODES_H_
+#define V8_WASM_OPCODES_H_
+
+#include "src/machine-type.h"
+#include "src/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Binary encoding of local types.
+enum LocalTypeCode {
+ kLocalVoid = 0,
+ kLocalI32 = 1,
+ kLocalI64 = 2,
+ kLocalF32 = 3,
+ kLocalF64 = 4
+};
+
+// Binary encoding of memory types.
+enum MemTypeCode {
+ kMemI8 = 0,
+ kMemU8 = 1,
+ kMemI16 = 2,
+ kMemU16 = 3,
+ kMemI32 = 4,
+ kMemU32 = 5,
+ kMemI64 = 6,
+ kMemU64 = 7,
+ kMemF32 = 8,
+ kMemF64 = 9
+};
+
+// We reuse the internal machine type to represent WebAssembly AST types.
+// A typedef improves readability without adding a whole new type system.
+typedef MachineRepresentation LocalType;
+const LocalType kAstStmt = MachineRepresentation::kNone;
+const LocalType kAstI32 = MachineRepresentation::kWord32;
+const LocalType kAstI64 = MachineRepresentation::kWord64;
+const LocalType kAstF32 = MachineRepresentation::kFloat32;
+const LocalType kAstF64 = MachineRepresentation::kFloat64;
+// We use kTagged here because kNone is already used by kAstStmt.
+const LocalType kAstEnd = MachineRepresentation::kTagged;
+
+// Functionality related to encoding memory accesses.
+struct MemoryAccess {
+ // Atomicity annotations for access to the memory and globals.
+ enum Atomicity {
+ kNone = 0, // non-atomic
+ kSequential = 1, // sequential consistency
+ kAcquire = 2, // acquire semantics
+ kRelease = 3 // release semantics
+ };
+
+ // Alignment annotations for memory accesses.
+ enum Alignment { kAligned = 0, kUnaligned = 1 };
+
+ // Bitfields for the various annotations for memory accesses.
+ typedef BitField<Alignment, 7, 1> AlignmentField;
+ typedef BitField<Atomicity, 5, 2> AtomicityField;
+ typedef BitField<bool, 4, 1> OffsetField;
+};
+
+typedef Signature<LocalType> FunctionSig;
+
+// Control expressions and blocks.
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Nop, 0x00, _) \
+ V(Block, 0x01, _) \
+ V(Loop, 0x02, _) \
+ V(If, 0x03, _) \
+ V(IfElse, 0x04, _) \
+ V(Select, 0x05, _) \
+ V(Br, 0x06, _) \
+ V(BrIf, 0x07, _) \
+ V(TableSwitch, 0x08, _) \
+ V(Return, 0x14, _) \
+ V(Unreachable, 0x15, _)
+// TODO(titzer): numbering
+
+// Constants, locals, globals, and calls.
+#define FOREACH_MISC_OPCODE(V) \
+ V(I8Const, 0x09, _) \
+ V(I32Const, 0x0a, _) \
+ V(I64Const, 0x0b, _) \
+ V(F64Const, 0x0c, _) \
+ V(F32Const, 0x0d, _) \
+ V(GetLocal, 0x0e, _) \
+ V(SetLocal, 0x0f, _) \
+ V(LoadGlobal, 0x10, _) \
+ V(StoreGlobal, 0x11, _) \
+ V(CallFunction, 0x12, _) \
+ V(CallIndirect, 0x13, _)
+
+// Load memory expressions.
+#define FOREACH_LOAD_MEM_OPCODE(V) \
+ V(I32LoadMem8S, 0x20, i_i) \
+ V(I32LoadMem8U, 0x21, i_i) \
+ V(I32LoadMem16S, 0x22, i_i) \
+ V(I32LoadMem16U, 0x23, i_i) \
+ V(I64LoadMem8S, 0x24, l_i) \
+ V(I64LoadMem8U, 0x25, l_i) \
+ V(I64LoadMem16S, 0x26, l_i) \
+ V(I64LoadMem16U, 0x27, l_i) \
+ V(I64LoadMem32S, 0x28, l_i) \
+ V(I64LoadMem32U, 0x29, l_i) \
+ V(I32LoadMem, 0x2a, i_i) \
+ V(I64LoadMem, 0x2b, l_i) \
+ V(F32LoadMem, 0x2c, f_i) \
+ V(F64LoadMem, 0x2d, d_i)
+
+// Store memory expressions.
+#define FOREACH_STORE_MEM_OPCODE(V) \
+ V(I32StoreMem8, 0x2e, i_ii) \
+ V(I32StoreMem16, 0x2f, i_ii) \
+ V(I64StoreMem8, 0x30, l_il) \
+ V(I64StoreMem16, 0x31, l_il) \
+ V(I64StoreMem32, 0x32, l_il) \
+ V(I32StoreMem, 0x33, i_ii) \
+ V(I64StoreMem, 0x34, l_il) \
+ V(F32StoreMem, 0x35, f_if) \
+ V(F64StoreMem, 0x36, d_id)
+
+// Load memory expressions.
+#define FOREACH_MISC_MEM_OPCODE(V) \
+ V(MemorySize, 0x3b, i_v) \
+ V(GrowMemory, 0x39, i_i)
+
+// Expressions with signatures.
+#define FOREACH_SIMPLE_OPCODE(V) \
+ V(I32Add, 0x40, i_ii) \
+ V(I32Sub, 0x41, i_ii) \
+ V(I32Mul, 0x42, i_ii) \
+ V(I32DivS, 0x43, i_ii) \
+ V(I32DivU, 0x44, i_ii) \
+ V(I32RemS, 0x45, i_ii) \
+ V(I32RemU, 0x46, i_ii) \
+ V(I32And, 0x47, i_ii) \
+ V(I32Ior, 0x48, i_ii) \
+ V(I32Xor, 0x49, i_ii) \
+ V(I32Shl, 0x4a, i_ii) \
+ V(I32ShrU, 0x4b, i_ii) \
+ V(I32ShrS, 0x4c, i_ii) \
+ V(I32Eq, 0x4d, i_ii) \
+ V(I32Ne, 0x4e, i_ii) \
+ V(I32LtS, 0x4f, i_ii) \
+ V(I32LeS, 0x50, i_ii) \
+ V(I32LtU, 0x51, i_ii) \
+ V(I32LeU, 0x52, i_ii) \
+ V(I32GtS, 0x53, i_ii) \
+ V(I32GeS, 0x54, i_ii) \
+ V(I32GtU, 0x55, i_ii) \
+ V(I32GeU, 0x56, i_ii) \
+ V(I32Clz, 0x57, i_i) \
+ V(I32Ctz, 0x58, i_i) \
+ V(I32Popcnt, 0x59, i_i) \
+ V(BoolNot, 0x5a, i_i) \
+ V(I64Add, 0x5b, l_ll) \
+ V(I64Sub, 0x5c, l_ll) \
+ V(I64Mul, 0x5d, l_ll) \
+ V(I64DivS, 0x5e, l_ll) \
+ V(I64DivU, 0x5f, l_ll) \
+ V(I64RemS, 0x60, l_ll) \
+ V(I64RemU, 0x61, l_ll) \
+ V(I64And, 0x62, l_ll) \
+ V(I64Ior, 0x63, l_ll) \
+ V(I64Xor, 0x64, l_ll) \
+ V(I64Shl, 0x65, l_ll) \
+ V(I64ShrU, 0x66, l_ll) \
+ V(I64ShrS, 0x67, l_ll) \
+ V(I64Eq, 0x68, i_ll) \
+ V(I64Ne, 0x69, i_ll) \
+ V(I64LtS, 0x6a, i_ll) \
+ V(I64LeS, 0x6b, i_ll) \
+ V(I64LtU, 0x6c, i_ll) \
+ V(I64LeU, 0x6d, i_ll) \
+ V(I64GtS, 0x6e, i_ll) \
+ V(I64GeS, 0x6f, i_ll) \
+ V(I64GtU, 0x70, i_ll) \
+ V(I64GeU, 0x71, i_ll) \
+ V(I64Clz, 0x72, l_l) \
+ V(I64Ctz, 0x73, l_l) \
+ V(I64Popcnt, 0x74, l_l) \
+ V(F32Add, 0x75, f_ff) \
+ V(F32Sub, 0x76, f_ff) \
+ V(F32Mul, 0x77, f_ff) \
+ V(F32Div, 0x78, f_ff) \
+ V(F32Min, 0x79, f_ff) \
+ V(F32Max, 0x7a, f_ff) \
+ V(F32Abs, 0x7b, f_f) \
+ V(F32Neg, 0x7c, f_f) \
+ V(F32CopySign, 0x7d, f_ff) \
+ V(F32Ceil, 0x7e, f_f) \
+ V(F32Floor, 0x7f, f_f) \
+ V(F32Trunc, 0x80, f_f) \
+ V(F32NearestInt, 0x81, f_f) \
+ V(F32Sqrt, 0x82, f_f) \
+ V(F32Eq, 0x83, i_ff) \
+ V(F32Ne, 0x84, i_ff) \
+ V(F32Lt, 0x85, i_ff) \
+ V(F32Le, 0x86, i_ff) \
+ V(F32Gt, 0x87, i_ff) \
+ V(F32Ge, 0x88, i_ff) \
+ V(F64Add, 0x89, d_dd) \
+ V(F64Sub, 0x8a, d_dd) \
+ V(F64Mul, 0x8b, d_dd) \
+ V(F64Div, 0x8c, d_dd) \
+ V(F64Min, 0x8d, d_dd) \
+ V(F64Max, 0x8e, d_dd) \
+ V(F64Abs, 0x8f, d_d) \
+ V(F64Neg, 0x90, d_d) \
+ V(F64CopySign, 0x91, d_dd) \
+ V(F64Ceil, 0x92, d_d) \
+ V(F64Floor, 0x93, d_d) \
+ V(F64Trunc, 0x94, d_d) \
+ V(F64NearestInt, 0x95, d_d) \
+ V(F64Sqrt, 0x96, d_d) \
+ V(F64Eq, 0x97, i_dd) \
+ V(F64Ne, 0x98, i_dd) \
+ V(F64Lt, 0x99, i_dd) \
+ V(F64Le, 0x9a, i_dd) \
+ V(F64Gt, 0x9b, i_dd) \
+ V(F64Ge, 0x9c, i_dd) \
+ V(I32SConvertF32, 0x9d, i_f) \
+ V(I32SConvertF64, 0x9e, i_d) \
+ V(I32UConvertF32, 0x9f, i_f) \
+ V(I32UConvertF64, 0xa0, i_d) \
+ V(I32ConvertI64, 0xa1, i_l) \
+ V(I64SConvertF32, 0xa2, l_f) \
+ V(I64SConvertF64, 0xa3, l_d) \
+ V(I64UConvertF32, 0xa4, l_f) \
+ V(I64UConvertF64, 0xa5, l_d) \
+ V(I64SConvertI32, 0xa6, l_i) \
+ V(I64UConvertI32, 0xa7, l_i) \
+ V(F32SConvertI32, 0xa8, f_i) \
+ V(F32UConvertI32, 0xa9, f_i) \
+ V(F32SConvertI64, 0xaa, f_l) \
+ V(F32UConvertI64, 0xab, f_l) \
+ V(F32ConvertF64, 0xac, f_d) \
+ V(F32ReinterpretI32, 0xad, f_i) \
+ V(F64SConvertI32, 0xae, d_i) \
+ V(F64UConvertI32, 0xaf, d_i) \
+ V(F64SConvertI64, 0xb0, d_l) \
+ V(F64UConvertI64, 0xb1, d_l) \
+ V(F64ConvertF32, 0xb2, d_f) \
+ V(F64ReinterpretI64, 0xb3, d_l) \
+ V(I32ReinterpretF32, 0xb4, i_f) \
+ V(I64ReinterpretF64, 0xb5, l_d)
+
+// All opcodes.
+#define FOREACH_OPCODE(V) \
+ FOREACH_CONTROL_OPCODE(V) \
+ FOREACH_MISC_OPCODE(V) \
+ FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_STORE_MEM_OPCODE(V) \
+ FOREACH_LOAD_MEM_OPCODE(V) \
+ FOREACH_MISC_MEM_OPCODE(V)
+
+// All signatures.
+#define FOREACH_SIGNATURE(V) \
+ V(i_ii, kAstI32, kAstI32, kAstI32) \
+ V(i_i, kAstI32, kAstI32) \
+ V(i_v, kAstI32) \
+ V(i_ff, kAstI32, kAstF32, kAstF32) \
+ V(i_f, kAstI32, kAstF32) \
+ V(i_dd, kAstI32, kAstF64, kAstF64) \
+ V(i_d, kAstI32, kAstF64) \
+ V(i_l, kAstI32, kAstI64) \
+ V(l_ll, kAstI64, kAstI64, kAstI64) \
+ V(i_ll, kAstI32, kAstI64, kAstI64) \
+ V(l_l, kAstI64, kAstI64) \
+ V(l_i, kAstI64, kAstI32) \
+ V(l_f, kAstI64, kAstF32) \
+ V(l_d, kAstI64, kAstF64) \
+ V(f_ff, kAstF32, kAstF32, kAstF32) \
+ V(f_f, kAstF32, kAstF32) \
+ V(f_d, kAstF32, kAstF64) \
+ V(f_i, kAstF32, kAstI32) \
+ V(f_l, kAstF32, kAstI64) \
+ V(d_dd, kAstF64, kAstF64, kAstF64) \
+ V(d_d, kAstF64, kAstF64) \
+ V(d_f, kAstF64, kAstF32) \
+ V(d_i, kAstF64, kAstI32) \
+ V(d_l, kAstF64, kAstI64) \
+ V(d_id, kAstF64, kAstI32, kAstF64) \
+ V(f_if, kAstF32, kAstI32, kAstF32) \
+ V(l_il, kAstI64, kAstI32, kAstI64)
+
+enum WasmOpcode {
+// Declare expression opcodes.
+#define DECLARE_NAMED_ENUM(name, opcode, sig) kExpr##name = opcode,
+ FOREACH_OPCODE(DECLARE_NAMED_ENUM)
+#undef DECLARE_NAMED_ENUM
+};
+
+// A collection of opcode-related static methods.
+class WasmOpcodes {
+ public:
+ static bool IsSupported(WasmOpcode opcode);
+ static const char* OpcodeName(WasmOpcode opcode);
+ static FunctionSig* Signature(WasmOpcode opcode);
+
+ static byte MemSize(MachineType type) {
+ return 1 << ElementSizeLog2Of(type.representation());
+ }
+
+ static LocalTypeCode LocalTypeCodeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return kLocalI32;
+ case kAstI64:
+ return kLocalI64;
+ case kAstF32:
+ return kLocalF32;
+ case kAstF64:
+ return kLocalF64;
+ case kAstStmt:
+ return kLocalVoid;
+ default:
+ UNREACHABLE();
+ return kLocalVoid;
+ }
+ }
+
+ static MemTypeCode MemTypeCodeFor(MachineType type) {
+ if (type == MachineType::Int8()) {
+ return kMemI8;
+ } else if (type == MachineType::Uint8()) {
+ return kMemU8;
+ } else if (type == MachineType::Int16()) {
+ return kMemI16;
+ } else if (type == MachineType::Uint16()) {
+ return kMemU16;
+ } else if (type == MachineType::Int32()) {
+ return kMemI32;
+ } else if (type == MachineType::Uint32()) {
+ return kMemU32;
+ } else if (type == MachineType::Int64()) {
+ return kMemI64;
+ } else if (type == MachineType::Uint64()) {
+ return kMemU64;
+ } else if (type == MachineType::Float32()) {
+ return kMemF32;
+ } else if (type == MachineType::Float64()) {
+ return kMemF64;
+ } else {
+ UNREACHABLE();
+ return kMemI32;
+ }
+ }
+
+ static MachineType MachineTypeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return MachineType::Int32();
+ case kAstI64:
+ return MachineType::Int64();
+ case kAstF32:
+ return MachineType::Float32();
+ case kAstF64:
+ return MachineType::Float64();
+ case kAstStmt:
+ return MachineType::None();
+ default:
+ UNREACHABLE();
+ return MachineType::None();
+ }
+ }
+
+ static LocalType LocalTypeFor(MachineType type) {
+ if (type == MachineType::Int8()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint8()) {
+ return kAstI32;
+ } else if (type == MachineType::Int16()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint16()) {
+ return kAstI32;
+ } else if (type == MachineType::Int32()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint32()) {
+ return kAstI32;
+ } else if (type == MachineType::Int64()) {
+ return kAstI64;
+ } else if (type == MachineType::Uint64()) {
+ return kAstI64;
+ } else if (type == MachineType::Float32()) {
+ return kAstF32;
+ } else if (type == MachineType::Float64()) {
+ return kAstF64;
+ } else {
+ UNREACHABLE();
+ return kAstI32;
+ }
+ }
+
+ // TODO(titzer): remove this method
+ static WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
+ if (type == MachineType::Int8()) {
+ return store ? kExprI32StoreMem8 : kExprI32LoadMem8S;
+ } else if (type == MachineType::Uint8()) {
+ return store ? kExprI32StoreMem8 : kExprI32LoadMem8U;
+ } else if (type == MachineType::Int16()) {
+ return store ? kExprI32StoreMem16 : kExprI32LoadMem16S;
+ } else if (type == MachineType::Uint16()) {
+ return store ? kExprI32StoreMem16 : kExprI32LoadMem16U;
+ } else if (type == MachineType::Int32()) {
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ } else if (type == MachineType::Uint32()) {
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ } else if (type == MachineType::Int64()) {
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ } else if (type == MachineType::Uint64()) {
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ } else if (type == MachineType::Float32()) {
+ return store ? kExprF32StoreMem : kExprF32LoadMem;
+ } else if (type == MachineType::Float64()) {
+ return store ? kExprF64StoreMem : kExprF64LoadMem;
+ } else {
+ UNREACHABLE();
+ return kExprNop;
+ }
+ }
+
+ static byte LoadStoreAccessOf(bool with_offset) {
+ return MemoryAccess::OffsetField::encode(with_offset);
+ }
+
+ static char ShortNameOf(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return 'i';
+ case kAstI64:
+ return 'l';
+ case kAstF32:
+ return 'f';
+ case kAstF64:
+ return 'd';
+ case kAstStmt:
+ return 'v';
+ case kAstEnd:
+ return 'x';
+ default:
+ UNREACHABLE();
+ return '?';
+ }
+ }
+
+ static const char* TypeName(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return "i32";
+ case kAstI64:
+ return "i64";
+ case kAstF32:
+ return "f32";
+ case kAstF64:
+ return "f64";
+ case kAstStmt:
+ return "<stmt>";
+ case kAstEnd:
+ return "<end>";
+ default:
+ return "<unknown>";
+ }
+ }
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OPCODES_H_
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
new file mode 100644
index 0000000000..4fd17ee364
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-result.h"
+
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects.h"
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code) {
+ switch (error_code) {
+ case kSuccess:
+ os << "Success";
+ break;
+ default: // TODO(titzer): render error codes
+ os << "Error";
+ break;
+ }
+ return os;
+}
+
+
+void ErrorThrower::Error(const char* format, ...) {
+ if (error_) return; // only report the first error.
+ error_ = true;
+ char buffer[256];
+
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VSNPrintF(buffer, 255, format, arguments);
+ va_end(arguments);
+
+ std::ostringstream str;
+ if (context_ != nullptr) {
+ str << context_ << ": ";
+ }
+ str << buffer;
+
+ isolate_->ScheduleThrow(
+ *isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
new file mode 100644
index 0000000000..59ab29ebe4
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_RESULT_H_
+#define V8_WASM_RESULT_H_
+
+#include "src/base/smart-pointers.h"
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace wasm {
+
+// Error codes for programmatic checking of the decoder's verification.
+enum ErrorCode {
+ kSuccess,
+ kError, // TODO(titzer): remove me
+ kOutOfMemory, // decoder ran out of memory
+ kEndOfCode, // end of code reached prematurely
+ kInvalidOpcode, // found invalid opcode
+ kUnreachableCode, // found unreachable code
+ kImproperContinue, // improperly nested continue
+ kImproperBreak, // improperly nested break
+ kReturnCount, // return count mismatch
+ kTypeError, // type mismatch
+ kInvalidLocalIndex, // invalid local
+ kInvalidGlobalIndex, // invalid global
+ kInvalidFunctionIndex, // invalid function
+ kInvalidMemType // invalid memory type
+};
+
+// The overall result of decoding a function or a module.
+template <typename T>
+struct Result {
+ Result()
+ : val(nullptr), error_code(kSuccess), start(nullptr), error_pc(nullptr) {
+ error_msg.Reset(nullptr);
+ }
+
+ T val;
+ ErrorCode error_code;
+ const byte* start;
+ const byte* error_pc;
+ const byte* error_pt;
+ base::SmartArrayPointer<char> error_msg;
+
+ bool ok() const { return error_code == kSuccess; }
+ bool failed() const { return error_code != kSuccess; }
+
+ template <typename V>
+ void CopyFrom(Result<V>& that) {
+ error_code = that.error_code;
+ start = that.start;
+ error_pc = that.error_pc;
+ error_pt = that.error_pt;
+ error_msg = that.error_msg;
+ }
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const Result<T>& result) {
+ os << "Result = ";
+ if (result.ok()) {
+ if (result.val != nullptr) {
+ os << *result.val;
+ } else {
+ os << "success (no value)";
+ }
+ } else if (result.error_msg.get() != nullptr) {
+ ptrdiff_t offset = result.error_pc - result.start;
+ if (offset < 0) {
+ os << result.error_msg.get() << " @" << offset;
+ } else {
+ os << result.error_msg.get() << " @+" << offset;
+ }
+ } else {
+ os << result.error_code;
+ }
+ os << std::endl;
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
+
+// A helper for generating error messages that bubble up to JS exceptions.
+class ErrorThrower {
+ public:
+ ErrorThrower(Isolate* isolate, const char* context)
+ : isolate_(isolate), context_(context), error_(false) {}
+
+ void Error(const char* fmt, ...);
+
+ template <typename T>
+ void Failed(const char* error, Result<T>& result) {
+ std::ostringstream str;
+ str << error << result;
+ return Error(str.str().c_str());
+ }
+
+ bool error() const { return error_; }
+
+ private:
+ Isolate* isolate_;
+ const char* context_;
+ bool error_;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif