summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-06-06 10:28:14 +0200
committerMichaël Zasso <targos@protonmail.com>2017-06-07 10:33:31 +0200
commit3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09 (patch)
tree9dee56e142638b34f1eccbd0ad88c3bce5377c29 /deps/v8/src/wasm
parent91a1bbe3055a660194ca4d403795aa0c03e9d056 (diff)
downloadnode-new-3dc8c3bed4cf3a77607edbb0b015e33f8b60fc09.tar.gz
deps: update V8 to 5.9.211.32
PR-URL: https://github.com/nodejs/node/pull/13263 Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm')
-rw-r--r--deps/v8/src/wasm/decoder.h427
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h162
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc286
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h3
-rw-r--r--deps/v8/src/wasm/module-decoder.cc402
-rw-r--r--deps/v8/src/wasm/module-decoder.h12
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc89
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc239
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc1184
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h61
-rw-r--r--deps/v8/src/wasm/wasm-js.cc108
-rw-r--r--deps/v8/src/wasm/wasm-limits.h49
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h30
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc110
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h18
-rw-r--r--deps/v8/src/wasm/wasm-module.cc1763
-rw-r--r--deps/v8/src/wasm/wasm-module.h128
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc507
-rw-r--r--deps/v8/src/wasm/wasm-objects.h286
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc54
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h136
-rw-r--r--deps/v8/src/wasm/wasm-result.cc33
-rw-r--r--deps/v8/src/wasm/wasm-result.h118
-rw-r--r--deps/v8/src/wasm/wasm-text.cc34
24 files changed, 4017 insertions, 2222 deletions
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index bfd14366e1..d9d25175ef 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_DECODER_H_
#define V8_WASM_DECODER_H_
+#include <cstdarg>
#include <memory>
#include "src/base/compiler-specific.h"
@@ -23,8 +24,13 @@ namespace wasm {
do { \
if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE_IF(cond, ...) \
+ do { \
+ if (FLAG_trace_wasm_decoder && (cond)) PrintF(__VA_ARGS__); \
+ } while (false)
#else
#define TRACE(...)
+#define TRACE_IF(...)
#endif
// A helper utility to decode bytes, integers, fields, varints, etc, from
@@ -32,171 +38,108 @@ namespace wasm {
class Decoder {
public:
Decoder(const byte* start, const byte* end)
- : start_(start),
- pc_(start),
- end_(end),
- error_pc_(nullptr),
- error_pt_(nullptr) {}
+ : start_(start), pc_(start), end_(end), error_pc_(nullptr) {}
Decoder(const byte* start, const byte* pc, const byte* end)
- : start_(start),
- pc_(pc),
- end_(end),
- error_pc_(nullptr),
- error_pt_(nullptr) {}
+ : start_(start), pc_(pc), end_(end), error_pc_(nullptr) {}
virtual ~Decoder() {}
- inline bool check(const byte* base, unsigned offset, unsigned length,
- const char* msg) {
- DCHECK_GE(base, start_);
- if ((base + offset + length) > end_) {
- error(base, base + offset, "%s", msg);
+ inline bool check(const byte* pc, unsigned length, const char* msg) {
+ DCHECK_LE(start_, pc);
+ if (V8_UNLIKELY(pc + length > end_)) {
+ error(pc, msg);
return false;
}
return true;
}
- // Reads a single 8-bit byte, reporting an error if out of bounds.
- inline uint8_t checked_read_u8(const byte* base, unsigned offset,
- const char* msg = "expected 1 byte") {
- return check(base, offset, 1, msg) ? base[offset] : 0;
+ // Reads an 8-bit unsigned integer.
+ template <bool checked>
+ inline uint8_t read_u8(const byte* pc, const char* msg = "expected 1 byte") {
+ return read_little_endian<uint8_t, checked>(pc, msg);
}
- // Reads 16-bit word, reporting an error if out of bounds.
- inline uint16_t checked_read_u16(const byte* base, unsigned offset,
- const char* msg = "expected 2 bytes") {
- return check(base, offset, 2, msg) ? read_u16(base + offset) : 0;
+ // Reads a 16-bit unsigned integer (little endian).
+ template <bool checked>
+ inline uint16_t read_u16(const byte* pc,
+ const char* msg = "expected 2 bytes") {
+ return read_little_endian<uint16_t, checked>(pc, msg);
}
- // Reads 32-bit word, reporting an error if out of bounds.
- inline uint32_t checked_read_u32(const byte* base, unsigned offset,
- const char* msg = "expected 4 bytes") {
- return check(base, offset, 4, msg) ? read_u32(base + offset) : 0;
+ // Reads a 32-bit unsigned integer (little endian).
+ template <bool checked>
+ inline uint32_t read_u32(const byte* pc,
+ const char* msg = "expected 4 bytes") {
+ return read_little_endian<uint32_t, checked>(pc, msg);
}
- // Reads 64-bit word, reporting an error if out of bounds.
- inline uint64_t checked_read_u64(const byte* base, unsigned offset,
- const char* msg = "expected 8 bytes") {
- return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
+ // Reads a 64-bit unsigned integer (little endian).
+ template <bool checked>
+ inline uint64_t read_u64(const byte* pc,
+ const char* msg = "expected 8 bytes") {
+ return read_little_endian<uint64_t, checked>(pc, msg);
}
// Reads a variable-length unsigned integer (little endian).
- uint32_t checked_read_u32v(const byte* base, unsigned offset,
- unsigned* length,
- const char* msg = "expected LEB32") {
- return checked_read_leb<uint32_t, false>(base, offset, length, msg);
+ template <bool checked>
+ uint32_t read_u32v(const byte* pc, unsigned* length,
+ const char* name = "LEB32") {
+ return read_leb<uint32_t, checked, false, false>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
- int32_t checked_read_i32v(const byte* base, unsigned offset, unsigned* length,
- const char* msg = "expected SLEB32") {
- uint32_t result =
- checked_read_leb<uint32_t, true>(base, offset, length, msg);
- if (*length == 5) return bit_cast<int32_t>(result);
- if (*length > 0) {
- int shift = 32 - 7 * *length;
- // Perform sign extension.
- return bit_cast<int32_t>(result << shift) >> shift;
- }
- return 0;
+ template <bool checked>
+ int32_t read_i32v(const byte* pc, unsigned* length,
+ const char* name = "signed LEB32") {
+ return read_leb<int32_t, checked, false, false>(pc, length, name);
}
// Reads a variable-length unsigned integer (little endian).
- uint64_t checked_read_u64v(const byte* base, unsigned offset,
- unsigned* length,
- const char* msg = "expected LEB64") {
- return checked_read_leb<uint64_t, false>(base, offset, length, msg);
+ template <bool checked>
+ uint64_t read_u64v(const byte* pc, unsigned* length,
+ const char* name = "LEB64") {
+ return read_leb<uint64_t, checked, false, false>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
- int64_t checked_read_i64v(const byte* base, unsigned offset, unsigned* length,
- const char* msg = "expected SLEB64") {
- uint64_t result =
- checked_read_leb<uint64_t, true>(base, offset, length, msg);
- if (*length == 10) return bit_cast<int64_t>(result);
- if (*length > 0) {
- int shift = 64 - 7 * *length;
- // Perform sign extension.
- return bit_cast<int64_t>(result << shift) >> shift;
- }
- return 0;
- }
-
- // Reads a single 16-bit unsigned integer (little endian).
- inline uint16_t read_u16(const byte* ptr) {
- DCHECK(ptr >= start_ && (ptr + 2) <= end_);
- return ReadLittleEndianValue<uint16_t>(ptr);
- }
-
- // Reads a single 32-bit unsigned integer (little endian).
- inline uint32_t read_u32(const byte* ptr) {
- DCHECK(ptr >= start_ && (ptr + 4) <= end_);
- return ReadLittleEndianValue<uint32_t>(ptr);
- }
-
- // Reads a single 64-bit unsigned integer (little endian).
- inline uint64_t read_u64(const byte* ptr) {
- DCHECK(ptr >= start_ && (ptr + 8) <= end_);
- return ReadLittleEndianValue<uint64_t>(ptr);
+ template <bool checked>
+ int64_t read_i64v(const byte* pc, unsigned* length,
+ const char* name = "signed LEB64") {
+ return read_leb<int64_t, checked, false, false>(pc, length, name);
}
// Reads a 8-bit unsigned integer (byte) and advances {pc_}.
- uint8_t consume_u8(const char* name = nullptr) {
- TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
- name ? name : "uint8_t");
- if (checkAvailable(1)) {
- byte val = *(pc_++);
- TRACE("%02x = %d\n", val, val);
- return val;
- }
- return traceOffEnd<uint8_t>();
+ uint8_t consume_u8(const char* name = "uint8_t") {
+ return consume_little_endian<uint8_t>(name);
}
// Reads a 16-bit unsigned integer (little endian) and advances {pc_}.
- uint16_t consume_u16(const char* name = nullptr) {
- TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
- name ? name : "uint16_t");
- if (checkAvailable(2)) {
- uint16_t val = read_u16(pc_);
- TRACE("%02x %02x = %d\n", pc_[0], pc_[1], val);
- pc_ += 2;
- return val;
- }
- return traceOffEnd<uint16_t>();
+ uint16_t consume_u16(const char* name = "uint16_t") {
+ return consume_little_endian<uint16_t>(name);
}
// Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
- uint32_t consume_u32(const char* name = nullptr) {
- TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
- name ? name : "uint32_t");
- if (checkAvailable(4)) {
- uint32_t val = read_u32(pc_);
- TRACE("%02x %02x %02x %02x = %u\n", pc_[0], pc_[1], pc_[2], pc_[3], val);
- pc_ += 4;
- return val;
- }
- return traceOffEnd<uint32_t>();
+ uint32_t consume_u32(const char* name = "uint32_t") {
+ return consume_little_endian<uint32_t>(name);
}
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
- return consume_leb<uint32_t, false>(name);
+ unsigned length = 0;
+ return read_leb<uint32_t, true, true, true>(pc_, &length, name);
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = nullptr) {
- return consume_leb<int32_t, true>(name);
+ unsigned length = 0;
+ return read_leb<int32_t, true, true, true>(pc_, &length, name);
}
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
void consume_bytes(uint32_t size, const char* name = "skip") {
-#if DEBUG
- if (name) {
- // Only trace if the name is not null.
- TRACE(" +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
- size);
- }
-#endif
+ // Only trace if the name is not null.
+ TRACE_IF(name, " +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_),
+ name, size);
if (checkAvailable(size)) {
pc_ += size;
} else {
@@ -208,73 +151,69 @@ class Decoder {
bool checkAvailable(int size) {
intptr_t pc_overflow_value = std::numeric_limits<intptr_t>::max() - size;
if (size < 0 || (intptr_t)pc_ > pc_overflow_value) {
- error(pc_, nullptr, "reading %d bytes would underflow/overflow", size);
+ errorf(pc_, "reading %d bytes would underflow/overflow", size);
return false;
} else if (pc_ < start_ || end_ < (pc_ + size)) {
- error(pc_, nullptr, "expected %d bytes, fell off end", size);
+ errorf(pc_, "expected %d bytes, fell off end", size);
return false;
} else {
return true;
}
}
- void error(const char* msg) { error(pc_, nullptr, "%s", msg); }
+ void error(const char* msg) { errorf(pc_, "%s", msg); }
- void error(const byte* pc, const char* msg) { error(pc, nullptr, "%s", msg); }
+ void error(const byte* pc, const char* msg) { errorf(pc, "%s", msg); }
// Sets internal error state.
- void PRINTF_FORMAT(4, 5)
- error(const byte* pc, const byte* pt, const char* format, ...) {
- if (ok()) {
+ void PRINTF_FORMAT(3, 4) errorf(const byte* pc, const char* format, ...) {
+ // Only report the first error.
+ if (!ok()) return;
#if DEBUG
- if (FLAG_wasm_break_on_decoder_error) {
- base::OS::DebugBreak();
- }
-#endif
- const int kMaxErrorMsg = 256;
- char* buffer = new char[kMaxErrorMsg];
- va_list arguments;
- va_start(arguments, format);
- base::OS::VSNPrintF(buffer, kMaxErrorMsg - 1, format, arguments);
- va_end(arguments);
- error_msg_.reset(buffer);
- error_pc_ = pc;
- error_pt_ = pt;
- onFirstError();
+ if (FLAG_wasm_break_on_decoder_error) {
+ base::OS::DebugBreak();
}
+#endif
+ constexpr int kMaxErrorMsg = 256;
+ EmbeddedVector<char, kMaxErrorMsg> buffer;
+ va_list arguments;
+ va_start(arguments, format);
+ int len = VSNPrintF(buffer, format, arguments);
+ CHECK_LT(0, len);
+ va_end(arguments);
+ error_msg_.assign(buffer.start(), len);
+ error_pc_ = pc;
+ onFirstError();
}
// Behavior triggered on first error, overridden in subclasses.
virtual void onFirstError() {}
+ // Debugging helper to print a bytes range as hex bytes.
+ void traceByteRange(const byte* start, const byte* end) {
+ DCHECK_LE(start, end);
+ for (const byte* p = start; p < end; ++p) TRACE("%02x ", *p);
+ }
+
// Debugging helper to print bytes up to the end.
- template <typename T>
- T traceOffEnd() {
- T t = 0;
- for (const byte* ptr = pc_; ptr < end_; ptr++) {
- TRACE("%02x ", *ptr);
- }
+ void traceOffEnd() {
+ traceByteRange(pc_, end_);
TRACE("<end>\n");
- pc_ = end_;
- return t;
}
// Converts the given value to a {Result}, copying the error if necessary.
- template <typename T>
- Result<T> toResult(T val) {
- Result<T> result;
+ template <typename T, typename U = typename std::remove_reference<T>::type>
+ Result<U> toResult(T&& val) {
+ Result<U> result(std::forward<T>(val));
if (failed()) {
- TRACE("Result error: %s\n", error_msg_.get());
- result.error_code = kError;
- result.start = start_;
- result.error_pc = error_pc_;
- result.error_pt = error_pt_;
- // transfer ownership of the error to the result.
- result.error_msg.reset(error_msg_.release());
- } else {
- result.error_code = kSuccess;
+ // The error message must not be empty, otherwise Result::failed() will be
+ // false.
+ DCHECK(!error_msg_.empty());
+ TRACE("Result error: %s\n", error_msg_.c_str());
+ DCHECK_GE(error_pc_, start_);
+ result.error_offset = static_cast<uint32_t>(error_pc_ - start_);
+ result.error_msg = std::move(error_msg_);
}
- result.val = std::move(val);
return result;
}
@@ -284,11 +223,10 @@ class Decoder {
pc_ = start;
end_ = end;
error_pc_ = nullptr;
- error_pt_ = nullptr;
- error_msg_.reset();
+ error_msg_.clear();
}
- bool ok() const { return error_msg_ == nullptr; }
+ bool ok() const { return error_msg_.empty(); }
bool failed() const { return !ok(); }
bool more() const { return pc_ < end_; }
@@ -302,104 +240,97 @@ class Decoder {
const byte* pc_;
const byte* end_;
const byte* error_pc_;
- const byte* error_pt_;
- std::unique_ptr<char[]> error_msg_;
+ std::string error_msg_;
private:
- template <typename IntType, bool is_signed>
- IntType checked_read_leb(const byte* base, unsigned offset, unsigned* length,
- const char* msg) {
- if (!check(base, offset, 1, msg)) {
- *length = 0;
- return 0;
+ template <typename IntType, bool checked>
+ inline IntType read_little_endian(const byte* pc, const char* msg) {
+ if (!checked) {
+ DCHECK(check(pc, sizeof(IntType), msg));
+ } else if (!check(pc, sizeof(IntType), msg)) {
+ return IntType{0};
}
+ return ReadLittleEndianValue<IntType>(pc);
+ }
- const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
- const byte* ptr = base + offset;
- const byte* end = ptr + kMaxLength;
- if (end > end_) end = end_;
+ template <typename IntType>
+ inline IntType consume_little_endian(const char* name) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_), name);
+ if (!checkAvailable(sizeof(IntType))) {
+ traceOffEnd();
+ pc_ = end_;
+ return IntType{0};
+ }
+ IntType val = read_little_endian<IntType, false>(pc_, name);
+ traceByteRange(pc_, pc_ + sizeof(IntType));
+ TRACE("= %d\n", val);
+ pc_ += sizeof(IntType);
+ return val;
+ }
+
+ template <typename IntType, bool checked, bool advance_pc, bool trace>
+ inline IntType read_leb(const byte* pc, unsigned* length,
+ const char* name = "varint") {
+ DCHECK_IMPLIES(advance_pc, pc == pc_);
+ constexpr bool is_signed = std::is_signed<IntType>::value;
+ TRACE_IF(trace, " +%d %-20s: ", static_cast<int>(pc - start_), name);
+ constexpr int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
+ const byte* ptr = pc;
+ const byte* end = Min(end_, ptr + kMaxLength);
+ // The end variable is only used if checked == true. MSVC recognizes this.
+ USE(end);
int shift = 0;
byte b = 0;
IntType result = 0;
- while (ptr < end) {
+ do {
+ if (checked && V8_UNLIKELY(ptr >= end)) {
+ TRACE_IF(trace,
+ ptr == pc + kMaxLength ? "<length overflow> " : "<end> ");
+ errorf(ptr, "expected %s", name);
+ result = 0;
+ break;
+ }
+ DCHECK_GT(end, ptr);
b = *ptr++;
- result = result | (static_cast<IntType>(b & 0x7F) << shift);
- if ((b & 0x80) == 0) break;
+ TRACE_IF(trace, "%02x ", b);
+ result = result | ((static_cast<IntType>(b) & 0x7F) << shift);
shift += 7;
- }
- DCHECK_LE(ptr - (base + offset), kMaxLength);
- *length = static_cast<unsigned>(ptr - (base + offset));
- if (ptr == end) {
- // Check there are no bits set beyond the bitwidth of {IntType}.
- const int kExtraBits = (1 + kMaxLength * 7) - (sizeof(IntType) * 8);
- const byte kExtraBitsMask =
- static_cast<byte>((0xFF << (8 - kExtraBits)) & 0xFF);
- int extra_bits_value;
- if (is_signed) {
- // A signed-LEB128 must sign-extend the final byte, excluding its
- // most-signifcant bit. e.g. for a 32-bit LEB128:
- // kExtraBits = 4
- // kExtraBitsMask = 0xf0
- // If b is 0x0f, the value is negative, so extra_bits_value is 0x70.
- // If b is 0x03, the value is positive, so extra_bits_value is 0x00.
- extra_bits_value = (static_cast<int8_t>(b << kExtraBits) >> 8) &
- kExtraBitsMask & ~0x80;
- } else {
- extra_bits_value = 0;
- }
- if (*length == kMaxLength && (b & kExtraBitsMask) != extra_bits_value) {
- error(base, ptr, "extra bits in varint");
- return 0;
- }
- if ((b & 0x80) != 0) {
- error(base, ptr, "%s", msg);
- return 0;
+ } while (b & 0x80);
+ DCHECK_LE(ptr - pc, kMaxLength);
+ *length = static_cast<unsigned>(ptr - pc);
+ if (advance_pc) pc_ = ptr;
+ if (*length == kMaxLength) {
+ // A signed-LEB128 must sign-extend the final byte, excluding its
+ // most-significant bit; e.g. for a 32-bit LEB128:
+ // kExtraBits = 4 (== 32 - (5-1) * 7)
+ // For unsigned values, the extra bits must be all zero.
+ // For signed values, the extra bits *plus* the most significant bit must
+ // either be 0, or all ones.
+ constexpr int kExtraBits = (sizeof(IntType) * 8) - ((kMaxLength - 1) * 7);
+ constexpr int kSignExtBits = kExtraBits - (is_signed ? 1 : 0);
+ const byte checked_bits = b & (0xFF << kSignExtBits);
+ constexpr byte kSignExtendedExtraBits = 0x7f & (0xFF << kSignExtBits);
+ bool valid_extra_bits =
+ checked_bits == 0 ||
+ (is_signed && checked_bits == kSignExtendedExtraBits);
+ if (!checked) {
+ DCHECK(valid_extra_bits);
+ } else if (!valid_extra_bits) {
+ error(ptr, "extra bits in varint");
+ result = 0;
}
}
- return result;
- }
-
- template <typename IntType, bool is_signed>
- IntType consume_leb(const char* name = nullptr) {
- TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
- name ? name : "varint");
- if (checkAvailable(1)) {
- const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
- const byte* pos = pc_;
- const byte* end = pc_ + kMaxLength;
- if (end > end_) end = end_;
-
- IntType result = 0;
- int shift = 0;
- byte b = 0;
- while (pc_ < end) {
- b = *pc_++;
- TRACE("%02x ", b);
- result = result | (static_cast<IntType>(b & 0x7F) << shift);
- shift += 7;
- if ((b & 0x80) == 0) break;
- }
-
- int length = static_cast<int>(pc_ - pos);
- if (pc_ == end && (b & 0x80)) {
- TRACE("\n");
- error(pc_ - 1, "varint too large");
- } else if (length == 0) {
- TRACE("\n");
- error(pc_, "varint of length 0");
- } else if (is_signed) {
- if (length < kMaxLength) {
- int sign_ext_shift = 8 * sizeof(IntType) - shift;
- // Perform sign extension.
- result = (result << sign_ext_shift) >> sign_ext_shift;
- }
- TRACE("= %" PRIi64 "\n", static_cast<int64_t>(result));
- } else {
- TRACE("= %" PRIu64 "\n", static_cast<uint64_t>(result));
- }
- return result;
+ if (is_signed && *length < kMaxLength) {
+ int sign_ext_shift = 8 * sizeof(IntType) - shift;
+ // Perform sign extension.
+ result = (result << sign_ext_shift) >> sign_ext_shift;
}
- return traceOffEnd<uint32_t>();
+ if (trace && is_signed) {
+ TRACE("= %" PRIi64 "\n", static_cast<int64_t>(result));
+ } else if (trace) {
+ TRACE("= %" PRIu64 "\n", static_cast<uint64_t>(result));
+ }
+ return result;
}
};
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 6759ed6f2a..27e95b2138 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -14,96 +14,103 @@ namespace wasm {
struct WasmGlobal;
+// Use this macro to check a condition if checked == true, and DCHECK the
+// condition otherwise.
+#define CHECKED_COND(cond) \
+ (checked ? (cond) : ([&] { \
+ DCHECK(cond); \
+ return true; \
+ })())
+
// Helpers for decoding different kinds of operands which follow bytecodes.
+template <bool checked>
struct LocalIndexOperand {
uint32_t index;
- ValueType type;
+ ValueType type = kWasmStmt;
unsigned length;
inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "local index");
- type = kWasmStmt;
+ index = decoder->read_u32v<checked>(pc + 1, &length, "local index");
}
};
+template <bool checked>
struct ImmI32Operand {
int32_t value;
unsigned length;
inline ImmI32Operand(Decoder* decoder, const byte* pc) {
- value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
+ value = decoder->read_i32v<checked>(pc + 1, &length, "immi32");
}
};
+template <bool checked>
struct ImmI64Operand {
int64_t value;
unsigned length;
inline ImmI64Operand(Decoder* decoder, const byte* pc) {
- value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
+ value = decoder->read_i64v<checked>(pc + 1, &length, "immi64");
}
};
+template <bool checked>
struct ImmF32Operand {
float value;
- unsigned length;
+ unsigned length = 4;
inline ImmF32Operand(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint32_t tmp = decoder->checked_read_u32(pc, 1, "immf32");
+ uint32_t tmp = decoder->read_u32<checked>(pc + 1, "immf32");
memcpy(&value, &tmp, sizeof(value));
- length = 4;
}
};
+template <bool checked>
struct ImmF64Operand {
double value;
- unsigned length;
+ unsigned length = 8;
inline ImmF64Operand(Decoder* decoder, const byte* pc) {
// Avoid bit_cast because it might not preserve the signalling bit of a NaN.
- uint64_t tmp = decoder->checked_read_u64(pc, 1, "immf64");
+ uint64_t tmp = decoder->read_u64<checked>(pc + 1, "immf64");
memcpy(&value, &tmp, sizeof(value));
- length = 8;
}
};
+template <bool checked>
struct GlobalIndexOperand {
uint32_t index;
- ValueType type;
- const WasmGlobal* global;
+ ValueType type = kWasmStmt;
+ const WasmGlobal* global = nullptr;
unsigned length;
inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "global index");
- global = nullptr;
- type = kWasmStmt;
+ index = decoder->read_u32v<checked>(pc + 1, &length, "global index");
}
};
+template <bool checked>
struct BlockTypeOperand {
- uint32_t arity;
- const byte* types; // pointer to encoded types for the block.
- unsigned length;
+ uint32_t arity = 0;
+ const byte* types = nullptr; // pointer to encoded types for the block.
+ unsigned length = 1;
inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
- uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
+ uint8_t val = decoder->read_u8<checked>(pc + 1, "block type");
ValueType type = kWasmStmt;
- length = 1;
- arity = 0;
- types = nullptr;
if (decode_local_type(val, &type)) {
arity = type == kWasmStmt ? 0 : 1;
types = pc + 1;
} else {
// Handle multi-value blocks.
- if (!FLAG_wasm_mv_prototype) {
- decoder->error(pc, pc + 1, "invalid block arity > 1");
+ if (!CHECKED_COND(FLAG_wasm_mv_prototype)) {
+ decoder->error(pc + 1, "invalid block arity > 1");
return;
}
- if (val != kMultivalBlock) {
- decoder->error(pc, pc + 1, "invalid block type");
+ if (!CHECKED_COND(val == kMultivalBlock)) {
+ decoder->error(pc + 1, "invalid block type");
return;
}
// Decode and check the types vector of the block.
unsigned len = 0;
- uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
+ uint32_t count = decoder->read_u32v<checked>(pc + 2, &len, "block arity");
// {count} is encoded as {arity-2}, so that a {0} count here corresponds
// to a block with 2 values. This makes invalid/redundant encodings
// impossible.
@@ -113,18 +120,19 @@ struct BlockTypeOperand {
for (uint32_t i = 0; i < arity; i++) {
uint32_t offset = 1 + 1 + len + i;
- val = decoder->checked_read_u8(pc, offset, "block type");
+ val = decoder->read_u8<checked>(pc + offset, "block type");
decode_local_type(val, &type);
- if (type == kWasmStmt) {
- decoder->error(pc, pc + offset, "invalid block type");
+ if (!CHECKED_COND(type != kWasmStmt)) {
+ decoder->error(pc + offset, "invalid block type");
return;
}
}
}
}
+
// Decode a byte representing a local type. Return {false} if the encoded
// byte was invalid or {kMultivalBlock}.
- bool decode_local_type(uint8_t val, ValueType* result) {
+ inline bool decode_local_type(uint8_t val, ValueType* result) {
switch (static_cast<ValueTypeCode>(val)) {
case kLocalVoid:
*result = kWasmStmt;
@@ -167,77 +175,72 @@ struct BlockTypeOperand {
};
struct Control;
+template <bool checked>
struct BreakDepthOperand {
uint32_t depth;
- Control* target;
+ Control* target = nullptr;
unsigned length;
inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
- target = nullptr;
+ depth = decoder->read_u32v<checked>(pc + 1, &length, "break depth");
}
};
+template <bool checked>
struct CallIndirectOperand {
uint32_t table_index;
uint32_t index;
- FunctionSig* sig;
+ FunctionSig* sig = nullptr;
unsigned length;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
unsigned len = 0;
- index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
- table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
- if (table_index != 0) {
- decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
- table_index);
+ index = decoder->read_u32v<checked>(pc + 1, &len, "signature index");
+ table_index = decoder->read_u8<checked>(pc + 1 + len, "table index");
+ if (!CHECKED_COND(table_index == 0)) {
+ decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
+ table_index);
}
length = 1 + len;
- sig = nullptr;
}
};
+template <bool checked>
struct CallFunctionOperand {
uint32_t index;
- FunctionSig* sig;
+ FunctionSig* sig = nullptr;
unsigned length;
inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
- unsigned len1 = 0;
- unsigned len2 = 0;
- index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
- length = len1 + len2;
- sig = nullptr;
+ index = decoder->read_u32v<checked>(pc + 1, &length, "function index");
}
};
+template <bool checked>
struct MemoryIndexOperand {
uint32_t index;
- unsigned length;
+ unsigned length = 1;
inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u8(pc, 1, "memory index");
- if (index != 0) {
- decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
+ index = decoder->read_u8<checked>(pc + 1, "memory index");
+ if (!CHECKED_COND(index == 0)) {
+ decoder->errorf(pc + 1, "expected memory index 0, found %u", index);
}
- length = 1;
}
};
+template <bool checked>
struct BranchTableOperand {
uint32_t table_count;
const byte* start;
const byte* table;
inline BranchTableOperand(Decoder* decoder, const byte* pc) {
- DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
+ DCHECK_EQ(kExprBrTable, decoder->read_u8<checked>(pc, "opcode"));
start = pc + 1;
- unsigned len1 = 0;
- table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
- if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
- len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
- decoder->error(pc, "branch table size overflow");
- }
- table = pc + 1 + len1;
+ unsigned len = 0;
+ table_count = decoder->read_u32v<checked>(pc + 1, &len, "table count");
+ table = pc + 1 + len;
}
};
// A helper to iterate over a branch table.
+template <bool checked>
class BranchTableIterator {
public:
unsigned cur_index() { return index_; }
@@ -245,9 +248,9 @@ class BranchTableIterator {
uint32_t next() {
DCHECK(has_next());
index_++;
- unsigned length = 0;
+ unsigned length;
uint32_t result =
- decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
+ decoder_->read_u32v<checked>(pc_, &length, "branch table entry");
pc_ += length;
return result;
}
@@ -259,7 +262,7 @@ class BranchTableIterator {
}
const byte* pc() { return pc_; }
- BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
+ BranchTableIterator(Decoder* decoder, BranchTableOperand<checked>& operand)
: decoder_(decoder),
start_(operand.start),
pc_(operand.table),
@@ -274,6 +277,7 @@ class BranchTableIterator {
uint32_t table_count_; // the count of entries, not including default.
};
+template <bool checked>
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
@@ -282,42 +286,44 @@ struct MemoryAccessOperand {
uint32_t max_alignment) {
unsigned alignment_length;
alignment =
- decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
- if (max_alignment < alignment) {
- decoder->error(pc, pc + 1,
- "invalid alignment; expected maximum alignment is %u, "
- "actual alignment is %u",
- max_alignment, alignment);
+ decoder->read_u32v<checked>(pc + 1, &alignment_length, "alignment");
+ if (!CHECKED_COND(alignment <= max_alignment)) {
+ decoder->errorf(pc + 1,
+ "invalid alignment; expected maximum alignment is %u, "
+ "actual alignment is %u",
+ max_alignment, alignment);
}
unsigned offset_length;
- offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
- &offset_length, "offset");
+ offset = decoder->read_u32v<checked>(pc + 1 + alignment_length,
+ &offset_length, "offset");
length = alignment_length + offset_length;
}
};
// Operand for SIMD lane operations.
+template <bool checked>
struct SimdLaneOperand {
uint8_t lane;
- unsigned length;
+ unsigned length = 1;
inline SimdLaneOperand(Decoder* decoder, const byte* pc) {
- lane = decoder->checked_read_u8(pc, 2, "lane");
- length = 1;
+ lane = decoder->read_u8<checked>(pc + 2, "lane");
}
};
// Operand for SIMD shift operations.
+template <bool checked>
struct SimdShiftOperand {
uint8_t shift;
- unsigned length;
+ unsigned length = 1;
inline SimdShiftOperand(Decoder* decoder, const byte* pc) {
- shift = decoder->checked_read_u8(pc, 2, "shift");
- length = 1;
+ shift = decoder->read_u8<checked>(pc + 2, "shift");
}
};
+#undef CHECKED_COND
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index dc2f83b459..cae2fcca78 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -35,13 +35,13 @@ namespace wasm {
#define TRACE(...)
#endif
-#define CHECK_PROTOTYPE_OPCODE(flag) \
- if (module_ != nullptr && module_->origin == kAsmJsOrigin) { \
- error("Opcode not supported for asmjs modules"); \
- } \
- if (!FLAG_##flag) { \
- error("Invalid opcode (enable with --" #flag ")"); \
- break; \
+#define CHECK_PROTOTYPE_OPCODE(flag) \
+ if (module_ != nullptr && module_->is_asm_js()) { \
+ error("Opcode not supported for asmjs modules"); \
+ } \
+ if (!FLAG_##flag) { \
+ error("Invalid opcode (enable with --" #flag ")"); \
+ break; \
}
// An SsaEnv environment carries the current local variable renaming
@@ -177,10 +177,7 @@ class WasmDecoder : public Decoder {
DCHECK_NOT_NULL(type_list);
// Initialize from signature.
if (sig != nullptr) {
- type_list->reserve(sig->parameter_count());
- for (size_t i = 0; i < sig->parameter_count(); ++i) {
- type_list->push_back(sig->GetParam(i));
- }
+ type_list->assign(sig->parameters().begin(), sig->parameters().end());
}
// Decode local declarations, if any.
uint32_t entries = decoder->consume_u32v("local decls count");
@@ -255,7 +252,7 @@ class WasmDecoder : public Decoder {
break;
case kExprSetLocal: // fallthru
case kExprTeeLocal: {
- LocalIndexOperand operand(decoder, pc);
+ LocalIndexOperand<true> operand(decoder, pc);
if (assigned->length() > 0 &&
operand.index < static_cast<uint32_t>(assigned->length())) {
// Unverified code might have an out-of-bounds index.
@@ -277,7 +274,7 @@ class WasmDecoder : public Decoder {
return decoder->ok() ? assigned : nullptr;
}
- inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
+ inline bool Validate(const byte* pc, LocalIndexOperand<true>& operand) {
if (operand.index < total_locals()) {
if (local_types_) {
operand.type = local_types_->at(operand.index);
@@ -286,21 +283,21 @@ class WasmDecoder : public Decoder {
}
return true;
}
- error(pc, pc + 1, "invalid local index: %u", operand.index);
+ errorf(pc + 1, "invalid local index: %u", operand.index);
return false;
}
- inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
+ inline bool Validate(const byte* pc, GlobalIndexOperand<true>& operand) {
if (module_ != nullptr && operand.index < module_->globals.size()) {
operand.global = &module_->globals[operand.index];
operand.type = operand.global->type;
return true;
}
- error(pc, pc + 1, "invalid global index: %u", operand.index);
+ errorf(pc + 1, "invalid global index: %u", operand.index);
return false;
}
- inline bool Complete(const byte* pc, CallFunctionOperand& operand) {
+ inline bool Complete(const byte* pc, CallFunctionOperand<true>& operand) {
if (module_ != nullptr && operand.index < module_->functions.size()) {
operand.sig = module_->functions[operand.index].sig;
return true;
@@ -308,15 +305,15 @@ class WasmDecoder : public Decoder {
return false;
}
- inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
+ inline bool Validate(const byte* pc, CallFunctionOperand<true>& operand) {
if (Complete(pc, operand)) {
return true;
}
- error(pc, pc + 1, "invalid function index: %u", operand.index);
+ errorf(pc + 1, "invalid function index: %u", operand.index);
return false;
}
- inline bool Complete(const byte* pc, CallIndirectOperand& operand) {
+ inline bool Complete(const byte* pc, CallIndirectOperand<true>& operand) {
if (module_ != nullptr && operand.index < module_->signatures.size()) {
operand.sig = module_->signatures[operand.index];
return true;
@@ -324,7 +321,7 @@ class WasmDecoder : public Decoder {
return false;
}
- inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
+ inline bool Validate(const byte* pc, CallIndirectOperand<true>& operand) {
if (module_ == nullptr || module_->function_tables.empty()) {
error("function table has to exist to execute call_indirect");
return false;
@@ -332,28 +329,28 @@ class WasmDecoder : public Decoder {
if (Complete(pc, operand)) {
return true;
}
- error(pc, pc + 1, "invalid signature index: #%u", operand.index);
+ errorf(pc + 1, "invalid signature index: #%u", operand.index);
return false;
}
- inline bool Validate(const byte* pc, BreakDepthOperand& operand,
+ inline bool Validate(const byte* pc, BreakDepthOperand<true>& operand,
ZoneVector<Control>& control) {
if (operand.depth < control.size()) {
operand.target = &control[control.size() - operand.depth - 1];
return true;
}
- error(pc, pc + 1, "invalid break depth: %u", operand.depth);
+ errorf(pc + 1, "invalid break depth: %u", operand.depth);
return false;
}
- bool Validate(const byte* pc, BranchTableOperand& operand,
+ bool Validate(const byte* pc, BranchTableOperand<true>& operand,
size_t block_depth) {
// TODO(titzer): add extra redundant validation for br_table here?
return true;
}
inline bool Validate(const byte* pc, WasmOpcode opcode,
- SimdLaneOperand& operand) {
+ SimdLaneOperand<true>& operand) {
uint8_t num_lanes = 0;
switch (opcode) {
case kExprF32x4ExtractLane:
@@ -375,7 +372,7 @@ class WasmDecoder : public Decoder {
break;
}
if (operand.lane < 0 || operand.lane >= num_lanes) {
- error(pc_, pc_ + 2, "invalid lane index");
+ error(pc_ + 2, "invalid lane index");
return false;
} else {
return true;
@@ -383,7 +380,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, WasmOpcode opcode,
- SimdShiftOperand& operand) {
+ SimdShiftOperand<true>& operand) {
uint8_t max_shift = 0;
switch (opcode) {
case kExprI32x4Shl:
@@ -406,7 +403,7 @@ class WasmDecoder : public Decoder {
break;
}
if (operand.shift < 0 || operand.shift >= max_shift) {
- error(pc_, pc_ + 2, "invalid shift amount");
+ error(pc_ + 2, "invalid shift amount");
return false;
} else {
return true;
@@ -420,26 +417,26 @@ class WasmDecoder : public Decoder {
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessOperand operand(decoder, pc, UINT32_MAX);
+ MemoryAccessOperand<true> operand(decoder, pc, UINT32_MAX);
return 1 + operand.length;
}
case kExprBr:
case kExprBrIf: {
- BreakDepthOperand operand(decoder, pc);
+ BreakDepthOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprSetGlobal:
case kExprGetGlobal: {
- GlobalIndexOperand operand(decoder, pc);
+ GlobalIndexOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprCallFunction: {
- CallFunctionOperand operand(decoder, pc);
+ CallFunctionOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprCallIndirect: {
- CallIndirectOperand operand(decoder, pc);
+ CallIndirectOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
@@ -447,7 +444,7 @@ class WasmDecoder : public Decoder {
case kExprIf: // fall thru
case kExprLoop:
case kExprBlock: {
- BlockTypeOperand operand(decoder, pc);
+ BlockTypeOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
@@ -455,25 +452,25 @@ class WasmDecoder : public Decoder {
case kExprTeeLocal:
case kExprGetLocal:
case kExprCatch: {
- LocalIndexOperand operand(decoder, pc);
+ LocalIndexOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprBrTable: {
- BranchTableOperand operand(decoder, pc);
- BranchTableIterator iterator(decoder, operand);
+ BranchTableOperand<true> operand(decoder, pc);
+ BranchTableIterator<true> iterator(decoder, operand);
return 1 + iterator.length();
}
case kExprI32Const: {
- ImmI32Operand operand(decoder, pc);
+ ImmI32Operand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprI64Const: {
- ImmI64Operand operand(decoder, pc);
+ ImmI64Operand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprGrowMemory:
case kExprMemorySize: {
- MemoryIndexOperand operand(decoder, pc);
+ MemoryIndexOperand<true> operand(decoder, pc);
return 1 + operand.length;
}
case kExprF32Const:
@@ -481,7 +478,7 @@ class WasmDecoder : public Decoder {
case kExprF64Const:
return 9;
case kSimdPrefix: {
- byte simd_index = decoder->checked_read_u8(pc, 1, "simd_index");
+ byte simd_index = decoder->read_u8<true>(pc + 1, "simd_index");
WasmOpcode opcode =
static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
switch (opcode) {
@@ -551,7 +548,7 @@ class WasmFullDecoder : public WasmDecoder {
// Generate a better error message whether the unterminated control
// structure is the function body block or an innner structure.
if (control_.size() > 1) {
- error(pc_, control_.back().pc, "unterminated control structure");
+ error(control_.back().pc, "unterminated control structure");
} else {
error("function body must end with \"end\" opcode.");
}
@@ -575,7 +572,7 @@ class WasmFullDecoder : public WasmDecoder {
bool TraceFailed() {
TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
- startrel(error_pc_), error_msg_.get());
+ startrel(error_pc_), error_msg_.c_str());
return false;
}
@@ -660,7 +657,13 @@ class WasmFullDecoder : public WasmDecoder {
case kWasmF64:
return builder_->Float64Constant(0);
case kWasmS128:
- return builder_->CreateS128Value(0);
+ return builder_->S128Zero();
+ case kWasmS1x4:
+ return builder_->S1x4Zero();
+ case kWasmS1x8:
+ return builder_->S1x8Zero();
+ case kWasmS1x16:
+ return builder_->S1x16Zero();
default:
UNREACHABLE();
return nullptr;
@@ -730,7 +733,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
case kExprBlock: {
// The break environment is the outer environment.
- BlockTypeOperand operand(this, pc_);
+ BlockTypeOperand<true> operand(this, pc_);
SsaEnv* break_env = ssa_env_;
PushBlock(break_env);
SetEnv("block:start", Steal(break_env));
@@ -750,7 +753,7 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprTry: {
CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- BlockTypeOperand operand(this, pc_);
+ BlockTypeOperand<true> operand(this, pc_);
SsaEnv* outer_env = ssa_env_;
SsaEnv* try_env = Steal(outer_env);
SsaEnv* catch_env = UnreachableEnv();
@@ -762,7 +765,7 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprCatch: {
CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- LocalIndexOperand operand(this, pc_);
+ LocalIndexOperand<true> operand(this, pc_);
len = 1 + operand.length;
if (control_.empty()) {
@@ -801,7 +804,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprLoop: {
- BlockTypeOperand operand(this, pc_);
+ BlockTypeOperand<true> operand(this, pc_);
SsaEnv* finish_try_env = Steal(ssa_env_);
// The continue environment is the inner environment.
SsaEnv* loop_body_env = PrepareForLoop(pc_, finish_try_env);
@@ -814,7 +817,7 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprIf: {
// Condition on top of stack. Split environments for branches.
- BlockTypeOperand operand(this, pc_);
+ BlockTypeOperand<true> operand(this, pc_);
Value cond = Pop(0, kWasmI32);
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
@@ -837,11 +840,11 @@ class WasmFullDecoder : public WasmDecoder {
}
Control* c = &control_.back();
if (!c->is_if()) {
- error(pc_, c->pc, "else does not match an if");
+ error(pc_, "else does not match an if");
break;
}
if (c->false_env == nullptr) {
- error(pc_, c->pc, "else already present for if");
+ error(pc_, "else already present for if");
break;
}
FallThruTo(c);
@@ -898,7 +901,7 @@ class WasmFullDecoder : public WasmDecoder {
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
if (pc_ + 1 != end_) {
- error(pc_, pc_ + 1, "trailing code after function end");
+ error(pc_ + 1, "trailing code after function end");
break;
}
last_end_found_ = true;
@@ -932,7 +935,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprBr: {
- BreakDepthOperand operand(this, pc_);
+ BreakDepthOperand<true> operand(this, pc_);
if (Validate(pc_, operand, control_)) {
BreakTo(operand.depth);
}
@@ -941,7 +944,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprBrIf: {
- BreakDepthOperand operand(this, pc_);
+ BreakDepthOperand<true> operand(this, pc_);
Value cond = Pop(0, kWasmI32);
if (ok() && Validate(pc_, operand, control_)) {
SsaEnv* fenv = ssa_env_;
@@ -956,8 +959,8 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprBrTable: {
- BranchTableOperand operand(this, pc_);
- BranchTableIterator iterator(this, operand);
+ BranchTableOperand<true> operand(this, pc_);
+ BranchTableIterator<true> iterator(this, operand);
if (Validate(pc_, operand, control_.size())) {
Value key = Pop(0, kWasmI32);
if (failed()) break;
@@ -985,21 +988,24 @@ class WasmFullDecoder : public WasmDecoder {
BreakTo(target);
// Check that label types match up.
+ static MergeValues loop_dummy = {0, {nullptr}};
Control* c = &control_[control_.size() - target - 1];
+ MergeValues* current = c->is_loop() ? &loop_dummy : &c->merge;
if (i == 0) {
- merge = &c->merge;
- } else if (merge->arity != c->merge.arity) {
- error(pos, pos, "inconsistent arity in br_table target %d"
- " (previous was %u, this one %u)",
- i, merge->arity, c->merge.arity);
+ merge = current;
+ } else if (merge->arity != current->arity) {
+ errorf(pos,
+ "inconsistent arity in br_table target %d"
+ " (previous was %u, this one %u)",
+ i, merge->arity, current->arity);
} else if (control_.back().unreachable) {
for (uint32_t j = 0; ok() && j < merge->arity; ++j) {
- if ((*merge)[j].type != c->merge[j].type) {
- error(pos, pos,
- "type error in br_table target %d operand %d"
- " (previous expected %s, this one %s)", i, j,
- WasmOpcodes::TypeName((*merge)[j].type),
- WasmOpcodes::TypeName(c->merge[j].type));
+ if ((*merge)[j].type != (*current)[j].type) {
+ errorf(pos,
+ "type error in br_table target %d operand %d"
+ " (previous expected %s, this one %s)",
+ i, j, WasmOpcodes::TypeName((*merge)[j].type),
+ WasmOpcodes::TypeName((*current)[j].type));
}
}
}
@@ -1032,31 +1038,31 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprI32Const: {
- ImmI32Operand operand(this, pc_);
+ ImmI32Operand<true> operand(this, pc_);
Push(kWasmI32, BUILD(Int32Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
- ImmI64Operand operand(this, pc_);
+ ImmI64Operand<true> operand(this, pc_);
Push(kWasmI64, BUILD(Int64Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
- ImmF32Operand operand(this, pc_);
+ ImmF32Operand<true> operand(this, pc_);
Push(kWasmF32, BUILD(Float32Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
- ImmF64Operand operand(this, pc_);
+ ImmF64Operand<true> operand(this, pc_);
Push(kWasmF64, BUILD(Float64Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprGetLocal: {
- LocalIndexOperand operand(this, pc_);
+ LocalIndexOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
if (build()) {
Push(operand.type, ssa_env_->locals[operand.index]);
@@ -1068,7 +1074,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprSetLocal: {
- LocalIndexOperand operand(this, pc_);
+ LocalIndexOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
Value val = Pop(0, local_type_vec_[operand.index]);
if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
@@ -1077,7 +1083,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprTeeLocal: {
- LocalIndexOperand operand(this, pc_);
+ LocalIndexOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
Value val = Pop(0, local_type_vec_[operand.index]);
if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
@@ -1091,7 +1097,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprGetGlobal: {
- GlobalIndexOperand operand(this, pc_);
+ GlobalIndexOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
Push(operand.type, BUILD(GetGlobal, operand.index));
}
@@ -1099,14 +1105,14 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprSetGlobal: {
- GlobalIndexOperand operand(this, pc_);
+ GlobalIndexOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
if (operand.global->mutability) {
Value val = Pop(0, operand.type);
BUILD(SetGlobal, operand.index, val.node);
} else {
- error(pc_, pc_ + 1, "immutable global #%u cannot be assigned",
- operand.index);
+ errorf(pc_, "immutable global #%u cannot be assigned",
+ operand.index);
}
}
len = 1 + operand.length;
@@ -1154,6 +1160,10 @@ class WasmFullDecoder : public WasmDecoder {
case kExprF64LoadMem:
len = DecodeLoadMem(kWasmF64, MachineType::Float64());
break;
+ case kExprS128LoadMem:
+ CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
+ len = DecodeLoadMem(kWasmS128, MachineType::Simd128());
+ break;
case kExprI32StoreMem8:
len = DecodeStoreMem(kWasmI32, MachineType::Int8());
break;
@@ -1181,11 +1191,15 @@ class WasmFullDecoder : public WasmDecoder {
case kExprF64StoreMem:
len = DecodeStoreMem(kWasmF64, MachineType::Float64());
break;
+ case kExprS128StoreMem:
+ CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
+ len = DecodeStoreMem(kWasmS128, MachineType::Simd128());
+ break;
case kExprGrowMemory: {
if (!CheckHasMemory()) break;
- MemoryIndexOperand operand(this, pc_);
+ MemoryIndexOperand<true> operand(this, pc_);
DCHECK_NOT_NULL(module_);
- if (module_->origin != kAsmJsOrigin) {
+ if (module_->is_wasm()) {
Value val = Pop(0, kWasmI32);
Push(kWasmI32, BUILD(GrowMemory, val.node));
} else {
@@ -1196,13 +1210,13 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprMemorySize: {
if (!CheckHasMemory()) break;
- MemoryIndexOperand operand(this, pc_);
+ MemoryIndexOperand<true> operand(this, pc_);
Push(kWasmI32, BUILD(CurrentMemoryPages));
len = 1 + operand.length;
break;
}
case kExprCallFunction: {
- CallFunctionOperand operand(this, pc_);
+ CallFunctionOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
TFNode** buffer = PopArgs(operand.sig);
TFNode** rets = nullptr;
@@ -1213,7 +1227,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprCallIndirect: {
- CallIndirectOperand operand(this, pc_);
+ CallIndirectOperand<true> operand(this, pc_);
if (Validate(pc_, operand)) {
Value index = Pop(0, kWasmI32);
TFNode** buffer = PopArgs(operand.sig);
@@ -1228,7 +1242,7 @@ class WasmFullDecoder : public WasmDecoder {
case kSimdPrefix: {
CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
len++;
- byte simd_index = checked_read_u8(pc_, 1, "simd index");
+ byte simd_index = read_u8<true>(pc_ + 1, "simd index");
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
TRACE(" @%-4d #%-20s|", startrel(pc_),
WasmOpcodes::OpcodeName(opcode));
@@ -1236,7 +1250,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kAtomicPrefix: {
- if (module_ == nullptr || module_->origin != kAsmJsOrigin) {
+ if (module_ == nullptr || !module_->is_asm_js()) {
error("Atomics are allowed only in AsmJs modules");
break;
}
@@ -1245,7 +1259,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
len = 2;
- byte atomic_opcode = checked_read_u8(pc_, 1, "atomic index");
+ byte atomic_opcode = read_u8<true>(pc_ + 1, "atomic index");
opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_opcode);
sig = WasmOpcodes::AtomicSignature(opcode);
if (sig) {
@@ -1255,7 +1269,7 @@ class WasmFullDecoder : public WasmDecoder {
}
default: {
// Deal with special asmjs opcodes.
- if (module_ != nullptr && module_->origin == kAsmJsOrigin) {
+ if (module_ != nullptr && module_->is_asm_js()) {
sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
@@ -1310,18 +1324,18 @@ class WasmFullDecoder : public WasmDecoder {
WasmOpcodes::OpcodeName(opcode));
switch (opcode) {
case kExprI32Const: {
- ImmI32Operand operand(this, val.pc);
+ ImmI32Operand<true> operand(this, val.pc);
PrintF("[%d]", operand.value);
break;
}
case kExprGetLocal: {
- LocalIndexOperand operand(this, val.pc);
+ LocalIndexOperand<true> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
case kExprSetLocal: // fallthru
case kExprTeeLocal: {
- LocalIndexOperand operand(this, val.pc);
+ LocalIndexOperand<true> operand(this, val.pc);
PrintF("[%u]", operand.index);
break;
}
@@ -1346,7 +1360,7 @@ class WasmFullDecoder : public WasmDecoder {
}
}
- void SetBlockType(Control* c, BlockTypeOperand& operand) {
+ void SetBlockType(Control* c, BlockTypeOperand<true>& operand) {
c->merge.arity = operand.arity;
if (c->merge.arity == 1) {
c->merge.vals.first = {pc_, nullptr, operand.read_entry(0)};
@@ -1405,8 +1419,8 @@ class WasmFullDecoder : public WasmDecoder {
int DecodeLoadMem(ValueType type, MachineType mem_type) {
if (!CheckHasMemory()) return 0;
- MemoryAccessOperand operand(this, pc_,
- ElementSizeLog2Of(mem_type.representation()));
+ MemoryAccessOperand<true> operand(
+ this, pc_, ElementSizeLog2Of(mem_type.representation()));
Value index = Pop(0, kWasmI32);
TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
@@ -1417,8 +1431,8 @@ class WasmFullDecoder : public WasmDecoder {
int DecodeStoreMem(ValueType type, MachineType mem_type) {
if (!CheckHasMemory()) return 0;
- MemoryAccessOperand operand(this, pc_,
- ElementSizeLog2Of(mem_type.representation()));
+ MemoryAccessOperand<true> operand(
+ this, pc_, ElementSizeLog2Of(mem_type.representation()));
Value val = Pop(1, type);
Value index = Pop(0, kWasmI32);
BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
@@ -1427,7 +1441,7 @@ class WasmFullDecoder : public WasmDecoder {
}
unsigned SimdExtractLane(WasmOpcode opcode, ValueType type) {
- SimdLaneOperand operand(this, pc_);
+ SimdLaneOperand<true> operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
@@ -1438,7 +1452,7 @@ class WasmFullDecoder : public WasmDecoder {
}
unsigned SimdReplaceLane(WasmOpcode opcode, ValueType type) {
- SimdLaneOperand operand(this, pc_);
+ SimdLaneOperand<true> operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(2, zone_);
inputs[1] = Pop(1, type).node;
@@ -1450,7 +1464,7 @@ class WasmFullDecoder : public WasmDecoder {
}
unsigned SimdShiftOp(WasmOpcode opcode) {
- SimdShiftOperand operand(this, pc_);
+ SimdShiftOperand<true> operand(this, pc_);
if (Validate(pc_, opcode, operand)) {
compiler::NodeVector inputs(1, zone_);
inputs[0] = Pop(0, ValueType::kSimd128).node;
@@ -1564,9 +1578,9 @@ class WasmFullDecoder : public WasmDecoder {
Value Pop(int index, ValueType expected) {
Value val = Pop();
if (val.type != expected && val.type != kWasmVar && expected != kWasmVar) {
- error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
- SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
+ errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
+ SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
+ SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
}
return val;
}
@@ -1577,7 +1591,7 @@ class WasmFullDecoder : public WasmDecoder {
// Popping past the current control start in reachable code.
Value val = {pc_, nullptr, kWasmVar};
if (!control_.back().unreachable) {
- error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+ errorf(pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
}
return val;
}
@@ -1601,8 +1615,8 @@ class WasmFullDecoder : public WasmDecoder {
// Merge the value(s) into the end of the block.
size_t expected = control_.back().stack_depth + c->merge.arity;
if (stack_.size() < expected && !control_.back().unreachable) {
- error(
- pc_, pc_,
+ errorf(
+ pc_,
"expected at least %u values on the stack for br to @%d, found %d",
c->merge.arity, startrel(c->pc),
static_cast<int>(stack_.size() - c->stack_depth));
@@ -1622,8 +1636,8 @@ class WasmFullDecoder : public WasmDecoder {
c->unreachable = false;
return;
}
- error(pc_, pc_, "expected %u elements on the stack for fallthru to @%d",
- c->merge.arity, startrel(c->pc));
+ errorf(pc_, "expected %u elements on the stack for fallthru to @%d",
+ c->merge.arity, startrel(c->pc));
}
inline Value& GetMergeValueFromStack(Control* c, size_t i) {
@@ -1636,8 +1650,8 @@ class WasmFullDecoder : public WasmDecoder {
int arity = static_cast<int>(c->merge.arity);
if (c->stack_depth + arity < stack_.size() ||
(c->stack_depth + arity != stack_.size() && !c->unreachable)) {
- error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
- arity, startrel(c->pc));
+ errorf(pc_, "expected %d elements on the stack for fallthru to @%d",
+ arity, startrel(c->pc));
return;
}
// Typecheck the values left on the stack.
@@ -1647,8 +1661,9 @@ class WasmFullDecoder : public WasmDecoder {
Value& val = GetMergeValueFromStack(c, i);
Value& old = c->merge[i];
if (val.type != old.type) {
- error(pc_, pc_, "type error in merge[%zu] (expected %s, got %s)", i,
- WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+ errorf(pc_, "type error in merge[%zu] (expected %s, got %s)", i,
+ WasmOpcodes::TypeName(old.type),
+ WasmOpcodes::TypeName(val.type));
return;
}
}
@@ -1666,8 +1681,9 @@ class WasmFullDecoder : public WasmDecoder {
Value& val = GetMergeValueFromStack(c, i);
Value& old = c->merge[i];
if (val.type != old.type && val.type != kWasmVar) {
- error(pc_, pc_, "type error in merge[%zu] (expected %s, got %s)", i,
- WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
+ errorf(pc_, "type error in merge[%zu] (expected %s, got %s)", i,
+ WasmOpcodes::TypeName(old.type),
+ WasmOpcodes::TypeName(val.type));
return;
}
if (builder_ && reachable) {
@@ -1853,22 +1869,20 @@ class WasmFullDecoder : public WasmDecoder {
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
- if (FLAG_wasm_loop_assignment_analysis) {
- BitVector* assigned = AnalyzeLoopAssignment(
- this, pc, static_cast<int>(total_locals()), zone_);
- if (failed()) return env;
- if (assigned != nullptr) {
- // Only introduce phis for variables assigned in this loop.
- for (int i = EnvironmentCount() - 1; i >= 0; i--) {
- if (!assigned->Contains(i)) continue;
- env->locals[i] = builder_->Phi(local_type_vec_[i], 1, &env->locals[i],
- env->control);
- }
- SsaEnv* loop_body_env = Split(env);
- builder_->StackCheck(position(), &(loop_body_env->effect),
- &(loop_body_env->control));
- return loop_body_env;
+ BitVector* assigned = AnalyzeLoopAssignment(
+ this, pc, static_cast<int>(total_locals()), zone_);
+ if (failed()) return env;
+ if (assigned != nullptr) {
+ // Only introduce phis for variables assigned in this loop.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ if (!assigned->Contains(i)) continue;
+ env->locals[i] =
+ builder_->Phi(local_type_vec_[i], 1, &env->locals[i], env->control);
}
+ SsaEnv* loop_body_env = Split(env);
+ builder_->StackCheck(position(), &(loop_body_env->effect),
+ &(loop_body_env->control));
+ return loop_body_env;
}
// Conservatively introduce phis for all local variables.
@@ -1934,9 +1948,9 @@ class WasmFullDecoder : public WasmDecoder {
}
virtual void onFirstError() {
- end_ = start_; // Terminate decoding loop.
+ end_ = pc_; // Terminate decoding loop.
builder_ = nullptr; // Don't build any more nodes.
- TRACE(" !%s\n", error_msg_.get());
+ TRACE(" !%s\n", error_msg_.c_str());
}
inline wasm::WasmCodePosition position() {
@@ -2104,7 +2118,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeOperand operand(&i, i.pc());
+ BlockTypeOperand<true> operand(&i, i.pc());
os << " // @" << i.pc_offset();
for (unsigned i = 0; i < operand.arity; i++) {
os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
@@ -2117,22 +2131,22 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
control_depth--;
break;
case kExprBr: {
- BreakDepthOperand operand(&i, i.pc());
+ BreakDepthOperand<true> operand(&i, i.pc());
os << " // depth=" << operand.depth;
break;
}
case kExprBrIf: {
- BreakDepthOperand operand(&i, i.pc());
+ BreakDepthOperand<true> operand(&i, i.pc());
os << " // depth=" << operand.depth;
break;
}
case kExprBrTable: {
- BranchTableOperand operand(&i, i.pc());
+ BranchTableOperand<true> operand(&i, i.pc());
os << " // entries=" << operand.table_count;
break;
}
case kExprCallIndirect: {
- CallIndirectOperand operand(&i, i.pc());
+ CallIndirectOperand<true> operand(&i, i.pc());
os << " // sig #" << operand.index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
@@ -2140,7 +2154,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
break;
}
case kExprCallFunction: {
- CallFunctionOperand operand(&i, i.pc());
+ CallFunctionOperand<true> operand(&i, i.pc());
os << " // function #" << operand.index;
if (decoder.Complete(i.pc(), operand)) {
os << ": " << *operand.sig;
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 6e6b824727..336b78afd9 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -170,8 +170,7 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
}
WasmOpcode current() {
- return static_cast<WasmOpcode>(
- checked_read_u8(pc_, 0, "expected bytecode"));
+ return static_cast<WasmOpcode>(read_u8<false>(pc_, "expected bytecode"));
}
void next() {
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 440e5dcbb9..2b58065e77 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -30,7 +30,7 @@ namespace wasm {
#define TRACE(...)
#endif
-const char* SectionName(WasmSectionCode code) {
+const char* SectionName(SectionCode code) {
switch (code) {
case kUnknownSectionCode:
return "Unknown";
@@ -90,6 +90,24 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
}
}
+// Reads a length-prefixed string, checking that it is within bounds. Returns
+// the offset of the string, and the length as an out parameter.
+uint32_t consume_string(Decoder& decoder, uint32_t* length, bool validate_utf8,
+ const char* name) {
+ *length = decoder.consume_u32v("string length");
+ uint32_t offset = decoder.pc_offset();
+ const byte* string_start = decoder.pc();
+ // Consume bytes before validation to guarantee that the string is not oob.
+ if (*length > 0) {
+ decoder.consume_bytes(*length, name);
+ if (decoder.ok() && validate_utf8 &&
+ !unibrow::Utf8::Validate(string_start, *length)) {
+ decoder.errorf(string_start, "%s: no valid UTF-8 string", name);
+ }
+ }
+ return offset;
+}
+
// An iterator over the sections in a WASM binary module.
// Automatically skips all unknown sections.
class WasmSectionIterator {
@@ -106,7 +124,7 @@ class WasmSectionIterator {
return section_code_ != kUnknownSectionCode && decoder_.more();
}
- inline WasmSectionCode section_code() const { return section_code_; }
+ inline SectionCode section_code() const { return section_code_; }
inline const byte* section_start() const { return section_start_; }
@@ -127,24 +145,24 @@ class WasmSectionIterator {
void advance() {
if (decoder_.pc() != section_end_) {
const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
- decoder_.error(decoder_.pc(), decoder_.pc(),
- "section was %s than expected size "
- "(%u bytes expected, %zu decoded)",
- msg, section_length(),
- static_cast<size_t>(decoder_.pc() - section_start_));
+ decoder_.errorf(decoder_.pc(),
+ "section was %s than expected size "
+ "(%u bytes expected, %zu decoded)",
+ msg, section_length(),
+ static_cast<size_t>(decoder_.pc() - section_start_));
}
next();
}
private:
Decoder& decoder_;
- WasmSectionCode section_code_;
+ SectionCode section_code_;
const byte* section_start_;
const byte* payload_start_;
const byte* section_end_;
// Reads the section code/name at the current position and sets up
- // the internal fields.
+ // the embedder fields.
void next() {
while (true) {
if (!decoder_.more()) {
@@ -166,14 +184,14 @@ class WasmSectionIterator {
if (section_code == kUnknownSectionCode) {
// Check for the known "name" section.
- uint32_t string_length = decoder_.consume_u32v("section name length");
- const byte* section_name_start = decoder_.pc();
- decoder_.consume_bytes(string_length, "section name");
+ uint32_t string_length;
+ uint32_t string_offset = wasm::consume_string(decoder_, &string_length,
+ true, "section name");
if (decoder_.failed() || decoder_.pc() > section_end_) {
- TRACE("Section name of length %u couldn't be read\n", string_length);
section_code_ = kUnknownSectionCode;
return;
}
+ const byte* section_name_start = decoder_.start() + string_offset;
payload_start_ = decoder_.pc();
TRACE(" +%d section name : \"%.*s\"\n",
@@ -184,15 +202,15 @@ class WasmSectionIterator {
strncmp(reinterpret_cast<const char*>(section_name_start),
kNameString, kNameStringLength) == 0) {
section_code = kNameSectionCode;
- } else {
- section_code = kUnknownSectionCode;
}
} else if (!IsValidSectionCode(section_code)) {
- decoder_.error(decoder_.pc(), decoder_.pc(),
- "unknown section code #0x%02x", section_code);
+ decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
+ section_code);
section_code = kUnknownSectionCode;
}
- section_code_ = static_cast<WasmSectionCode>(section_code);
+ section_code_ = decoder_.failed()
+ ? kUnknownSectionCode
+ : static_cast<SectionCode>(section_code);
TRACE("Section: %s\n", SectionName(section_code_));
if (section_code_ == kUnknownSectionCode &&
@@ -215,9 +233,8 @@ class ModuleDecoder : public Decoder {
ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
ModuleOrigin origin)
: Decoder(module_start, module_end),
- module_zone(zone),
+ module_zone_(zone),
origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
- result_.start = start_;
if (end_ < start_) {
error(start_, "end is less than start");
end_ = start_;
@@ -255,30 +272,30 @@ class ModuleDecoder : public Decoder {
// Decodes an entire module.
ModuleResult DecodeModule(bool verify_functions = true) {
pc_ = start_;
- WasmModule* module = new WasmModule(module_zone);
+ WasmModule* module = new WasmModule(module_zone_);
module->min_mem_pages = 0;
module->max_mem_pages = 0;
module->mem_export = false;
- module->origin = origin_;
+ module->set_origin(origin_);
const byte* pos = pc_;
uint32_t magic_word = consume_u32("wasm magic");
#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
if (magic_word != kWasmMagic) {
- error(pos, pos,
- "expected magic word %02x %02x %02x %02x, "
- "found %02x %02x %02x %02x",
- BYTES(kWasmMagic), BYTES(magic_word));
+ errorf(pos,
+ "expected magic word %02x %02x %02x %02x, "
+ "found %02x %02x %02x %02x",
+ BYTES(kWasmMagic), BYTES(magic_word));
}
pos = pc_;
{
uint32_t magic_version = consume_u32("wasm version");
if (magic_version != kWasmVersion) {
- error(pos, pos,
- "expected version %02x %02x %02x %02x, "
- "found %02x %02x %02x %02x",
- BYTES(kWasmVersion), BYTES(magic_version));
+ errorf(pos,
+ "expected version %02x %02x %02x %02x, "
+ "found %02x %02x %02x %02x",
+ BYTES(kWasmVersion), BYTES(magic_version));
}
}
@@ -317,9 +334,9 @@ class ModuleDecoder : public Decoder {
WasmImport* import = &module->import_table.back();
const byte* pos = pc_;
import->module_name_offset =
- consume_string(&import->module_name_length, true);
+ consume_string(&import->module_name_length, true, "module name");
import->field_name_offset =
- consume_string(&import->field_name_length, true);
+ consume_string(&import->field_name_length, true, "field name");
import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
switch (import->kind) {
@@ -379,7 +396,7 @@ class ModuleDecoder : public Decoder {
break;
}
default:
- error(pos, pos, "unknown import kind 0x%02x", import->kind);
+ errorf(pos, "unknown import kind 0x%02x", import->kind);
break;
}
}
@@ -476,7 +493,8 @@ class ModuleDecoder : public Decoder {
});
WasmExport* exp = &module->export_table.back();
- exp->name_offset = consume_string(&exp->name_length, true);
+ exp->name_offset =
+ consume_string(&exp->name_length, true, "field name");
const byte* pos = pc();
exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
switch (exp->kind) {
@@ -515,7 +533,7 @@ class ModuleDecoder : public Decoder {
break;
}
default:
- error(pos, pos, "invalid export kind 0x%02x", exp->kind);
+ errorf(pos, "invalid export kind 0x%02x", exp->kind);
break;
}
}
@@ -539,9 +557,8 @@ class ModuleDecoder : public Decoder {
DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
if (!cmp_less(*last, *it)) {
const byte* pc = start_ + it->name_offset;
- error(pc, pc,
- "Duplicate export name '%.*s' for functions %d and %d",
- it->name_length, pc, last->index, it->index);
+ errorf(pc, "Duplicate export name '%.*s' for functions %d and %d",
+ it->name_length, pc, last->index, it->index);
break;
}
}
@@ -570,14 +587,14 @@ class ModuleDecoder : public Decoder {
const byte* pos = pc();
uint32_t table_index = consume_u32v("table index");
if (table_index != 0) {
- error(pos, pos, "illegal table index %u != 0", table_index);
+ errorf(pos, "illegal table index %u != 0", table_index);
}
WasmIndirectFunctionTable* table = nullptr;
if (table_index >= module->function_tables.size()) {
- error(pos, pos, "out of bounds table index %u", table_index);
- } else {
- table = &module->function_tables[table_index];
+ errorf(pos, "out of bounds table index %u", table_index);
+ break;
}
+ table = &module->function_tables[table_index];
WasmInitExpr offset = consume_init_expr(module, kWasmI32);
uint32_t num_elem =
consume_count("number of elements", kV8MaxWasmTableEntries);
@@ -587,11 +604,12 @@ class ModuleDecoder : public Decoder {
for (uint32_t j = 0; ok() && j < num_elem; j++) {
WasmFunction* func = nullptr;
uint32_t index = consume_func_index(module, &func);
+ DCHECK_EQ(func != nullptr, ok());
+ if (!func) break;
+ DCHECK_EQ(index, func->func_index);
init->entries.push_back(index);
- if (table && index < module->functions.size()) {
- // Canonicalize signature indices during decoding.
- table->map.FindOrInsert(module->functions[index].sig);
- }
+ // Canonicalize signature indices during decoding.
+ table->map.FindOrInsert(func->sig);
}
}
@@ -603,8 +621,8 @@ class ModuleDecoder : public Decoder {
const byte* pos = pc_;
uint32_t functions_count = consume_u32v("functions count");
if (functions_count != module->num_declared_functions) {
- error(pos, pos, "function body count %u mismatch (%u expected)",
- functions_count, module->num_declared_functions);
+ errorf(pos, "function body count %u mismatch (%u expected)",
+ functions_count, module->num_declared_functions);
}
for (uint32_t i = 0; ok() && i < functions_count; ++i) {
WasmFunction* function =
@@ -651,22 +669,37 @@ class ModuleDecoder : public Decoder {
// TODO(titzer): find a way to report name errors as warnings.
// Use an inner decoder so that errors don't fail the outer decoder.
Decoder inner(start_, pc_, end_);
- uint32_t functions_count = inner.consume_u32v("functions count");
-
- for (uint32_t i = 0; inner.ok() && i < functions_count; ++i) {
- uint32_t function_name_length = 0;
- uint32_t name_offset =
- consume_string(inner, &function_name_length, false);
- uint32_t func_index = i;
- if (inner.ok() && func_index < module->functions.size()) {
- module->functions[func_index].name_offset = name_offset;
- module->functions[func_index].name_length = function_name_length;
- }
-
- uint32_t local_names_count = inner.consume_u32v("local names count");
- for (uint32_t j = 0; inner.ok() && j < local_names_count; j++) {
- uint32_t length = inner.consume_u32v("string length");
- inner.consume_bytes(length, "string");
+ // Decode all name subsections.
+ // Be lenient with their order.
+ while (inner.ok() && inner.more()) {
+ uint8_t name_type = inner.consume_u8("name type");
+ if (name_type & 0x80) inner.error("name type if not varuint7");
+
+ uint32_t name_payload_len = inner.consume_u32v("name payload length");
+ if (!inner.checkAvailable(name_payload_len)) break;
+
+ // Decode function names, ignore the rest.
+ // Local names will be decoded when needed.
+ if (name_type == NameSectionType::kFunction) {
+ uint32_t functions_count = inner.consume_u32v("functions count");
+
+ for (; inner.ok() && functions_count > 0; --functions_count) {
+ uint32_t function_index = inner.consume_u32v("function index");
+ uint32_t name_length = 0;
+ uint32_t name_offset = wasm::consume_string(inner, &name_length,
+ false, "function name");
+ // Be lenient with errors in the name section: Ignore illegal
+ // or out-of-order indexes and non-UTF8 names. You can even assign
+ // to the same function multiple times (last valid one wins).
+ if (inner.ok() && function_index < module->functions.size() &&
+ unibrow::Utf8::Validate(inner.start() + name_offset,
+ name_length)) {
+ module->functions[function_index].name_offset = name_offset;
+ module->functions[function_index].name_length = name_length;
+ }
+ }
+ } else {
+ inner.consume_bytes(name_payload_len, "name subsection payload");
}
}
// Skip the whole names section in the outer decoder.
@@ -676,8 +709,8 @@ class ModuleDecoder : public Decoder {
// ===== Remaining sections ==============================================
if (section_iter.more() && ok()) {
- error(pc(), pc(), "unexpected section: %s",
- SectionName(section_iter.section_code()));
+ errorf(pc(), "unexpected section: %s",
+ SectionName(section_iter.section_code()));
}
if (ok()) {
@@ -686,7 +719,8 @@ class ModuleDecoder : public Decoder {
const WasmModule* finished_module = module;
ModuleResult result = toResult(finished_module);
if (verify_functions && result.ok()) {
- result.MoveFrom(result_); // Copy error code and location.
+ // Copy error code and location.
+ result.MoveErrorFrom(intermediate_result_);
}
if (FLAG_dump_wasm_module) DumpModule(result);
return result;
@@ -705,7 +739,8 @@ class ModuleDecoder : public Decoder {
if (ok()) VerifyFunctionBody(0, module_env, function);
FunctionResult result;
- result.MoveFrom(result_); // Copy error code and location.
+ // Copy error code and location.
+ result.MoveErrorFrom(intermediate_result_);
result.val = function;
return result;
}
@@ -723,8 +758,8 @@ class ModuleDecoder : public Decoder {
}
private:
- Zone* module_zone;
- ModuleResult result_;
+ Zone* module_zone_;
+ Result<bool> intermediate_result_;
ModuleOrigin origin_;
uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
@@ -759,25 +794,25 @@ class ModuleDecoder : public Decoder {
case WasmInitExpr::kGlobalIndex: {
uint32_t other_index = global->init.val.global_index;
if (other_index >= index) {
- error(pos, pos,
- "invalid global index in init expression, "
- "index %u, other_index %u",
- index, other_index);
+ errorf(pos,
+ "invalid global index in init expression, "
+ "index %u, other_index %u",
+ index, other_index);
} else if (module->globals[other_index].type != global->type) {
- error(pos, pos,
- "type mismatch in global initialization "
- "(from global #%u), expected %s, got %s",
- other_index, WasmOpcodes::TypeName(global->type),
- WasmOpcodes::TypeName(module->globals[other_index].type));
+ errorf(pos,
+ "type mismatch in global initialization "
+ "(from global #%u), expected %s, got %s",
+ other_index, WasmOpcodes::TypeName(global->type),
+ WasmOpcodes::TypeName(module->globals[other_index].type));
}
break;
}
default:
if (global->type != TypeOf(module, global->init)) {
- error(pos, pos,
- "type error in global initialization, expected %s, got %s",
- WasmOpcodes::TypeName(global->type),
- WasmOpcodes::TypeName(TypeOf(module, global->init)));
+ errorf(pos,
+ "type error in global initialization, expected %s, got %s",
+ WasmOpcodes::TypeName(global->type),
+ WasmOpcodes::TypeName(TypeOf(module, global->init)));
}
}
}
@@ -836,52 +871,31 @@ class ModuleDecoder : public Decoder {
start_ + function->code_start_offset,
start_ + function->code_end_offset};
DecodeResult result = VerifyWasmCode(
- module_zone->allocator(),
+ module_zone_->allocator(),
menv == nullptr ? nullptr : menv->module_env.module, body);
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
- str << "in function " << func_name << ": ";
- str << result;
- std::string strval = str.str();
- const char* raw = strval.c_str();
- size_t len = strlen(raw);
- char* buffer = new char[len];
- strncpy(buffer, raw, len);
- buffer[len - 1] = 0;
+ str << "in function " << func_name << ": " << result.error_msg;
- // Copy error code and location.
- result_.MoveFrom(result);
- result_.error_msg.reset(buffer);
+ // Set error code and location, if this is the first error.
+ if (intermediate_result_.ok()) {
+ intermediate_result_.MoveErrorFrom(result);
+ }
}
}
- uint32_t consume_string(uint32_t* length, bool validate_utf8) {
- return consume_string(*this, length, validate_utf8);
- }
-
- // Reads a length-prefixed string, checking that it is within bounds. Returns
- // the offset of the string, and the length as an out parameter.
- uint32_t consume_string(Decoder& decoder, uint32_t* length,
- bool validate_utf8) {
- *length = decoder.consume_u32v("string length");
- uint32_t offset = decoder.pc_offset();
- const byte* string_start = decoder.pc();
- // Consume bytes before validation to guarantee that the string is not oob.
- if (*length > 0) decoder.consume_bytes(*length, "string");
- if (decoder.ok() && validate_utf8 &&
- !unibrow::Utf8::Validate(string_start, *length)) {
- decoder.error(string_start, "no valid UTF-8 string");
- }
- return offset;
+ uint32_t consume_string(uint32_t* length, bool validate_utf8,
+ const char* name) {
+ return wasm::consume_string(*this, length, validate_utf8, name);
}
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
if (sig_index >= module->signatures.size()) {
- error(pos, pos, "signature index %u out of bounds (%d signatures)",
- sig_index, static_cast<int>(module->signatures.size()));
+ errorf(pos, "signature index %u out of bounds (%d signatures)", sig_index,
+ static_cast<int>(module->signatures.size()));
*sig = nullptr;
return 0;
}
@@ -893,8 +907,7 @@ class ModuleDecoder : public Decoder {
const byte* p = pc_;
uint32_t count = consume_u32v(name);
if (count > maximum) {
- error(p, p, "%s of %u exceeds internal limit of %zu", name, count,
- maximum);
+ errorf(p, "%s of %u exceeds internal limit of %zu", name, count, maximum);
return static_cast<uint32_t>(maximum);
}
return count;
@@ -918,8 +931,8 @@ class ModuleDecoder : public Decoder {
const byte* pos = pc_;
uint32_t index = consume_u32v(name);
if (index >= vector.size()) {
- error(pos, pos, "%s %u out of bounds (%d entries)", name, index,
- static_cast<int>(vector.size()));
+ errorf(pos, "%s %u out of bounds (%d entr%s)", name, index,
+ static_cast<int>(vector.size()), vector.size() == 1 ? "y" : "ies");
*ptr = nullptr;
return 0;
}
@@ -936,23 +949,23 @@ class ModuleDecoder : public Decoder {
*initial = consume_u32v("initial size");
*has_max = false;
if (*initial > max_initial) {
- error(pos, pos,
- "initial %s size (%u %s) is larger than implementation limit (%u)",
- name, *initial, units, max_initial);
+ errorf(pos,
+ "initial %s size (%u %s) is larger than implementation limit (%u)",
+ name, *initial, units, max_initial);
}
if (flags & 1) {
*has_max = true;
pos = pc();
*maximum = consume_u32v("maximum size");
if (*maximum > max_maximum) {
- error(
- pos, pos,
+ errorf(
+ pos,
"maximum %s size (%u %s) is larger than implementation limit (%u)",
name, *maximum, units, max_maximum);
}
if (*maximum < *initial) {
- error(pos, pos, "maximum %s size (%u %s) is less than initial (%u %s)",
- name, *maximum, units, *initial, units);
+ errorf(pos, "maximum %s size (%u %s) is less than initial (%u %s)",
+ name, *maximum, units, *initial, units);
}
} else {
*has_max = false;
@@ -964,7 +977,7 @@ class ModuleDecoder : public Decoder {
const byte* pos = pc();
uint8_t value = consume_u8(name);
if (value != expected) {
- error(pos, pos, "expected %s 0x%02x, got 0x%02x", name, expected, value);
+ errorf(pos, "expected %s 0x%02x, got 0x%02x", name, expected, value);
return false;
}
return true;
@@ -977,7 +990,7 @@ class ModuleDecoder : public Decoder {
unsigned len = 0;
switch (opcode) {
case kExprGetGlobal: {
- GlobalIndexOperand operand(this, pc() - 1);
+ GlobalIndexOperand<true> operand(this, pc() - 1);
if (module->globals.size() <= operand.index) {
error("global index is out of bounds");
expr.kind = WasmInitExpr::kNone;
@@ -999,28 +1012,28 @@ class ModuleDecoder : public Decoder {
break;
}
case kExprI32Const: {
- ImmI32Operand operand(this, pc() - 1);
+ ImmI32Operand<true> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kI32Const;
expr.val.i32_const = operand.value;
len = operand.length;
break;
}
case kExprF32Const: {
- ImmF32Operand operand(this, pc() - 1);
+ ImmF32Operand<true> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kF32Const;
expr.val.f32_const = operand.value;
len = operand.length;
break;
}
case kExprI64Const: {
- ImmI64Operand operand(this, pc() - 1);
+ ImmI64Operand<true> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kI64Const;
expr.val.i64_const = operand.value;
len = operand.length;
break;
}
case kExprF64Const: {
- ImmF64Operand operand(this, pc() - 1);
+ ImmF64Operand<true> operand(this, pc() - 1);
expr.kind = WasmInitExpr::kF64Const;
expr.val.f64_const = operand.value;
len = operand.length;
@@ -1037,9 +1050,9 @@ class ModuleDecoder : public Decoder {
expr.kind = WasmInitExpr::kNone;
}
if (expected != kWasmStmt && TypeOf(module, expr) != kWasmI32) {
- error(pos, pos, "type error in init expression, expected %s, got %s",
- WasmOpcodes::TypeName(expected),
- WasmOpcodes::TypeName(TypeOf(module, expr)));
+ errorf(pos, "type error in init expression, expected %s, got %s",
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::TypeName(TypeOf(module, expr)));
}
return expr;
}
@@ -1113,80 +1126,31 @@ class ModuleDecoder : public Decoder {
// FunctionSig stores the return types first.
ValueType* buffer =
- module_zone->NewArray<ValueType>(param_count + return_count);
+ module_zone_->NewArray<ValueType>(param_count + return_count);
uint32_t b = 0;
for (uint32_t i = 0; i < return_count; ++i) buffer[b++] = returns[i];
for (uint32_t i = 0; i < param_count; ++i) buffer[b++] = params[i];
- return new (module_zone) FunctionSig(return_count, param_count, buffer);
- }
-};
-
-// Helpers for nice error messages.
-class ModuleError : public ModuleResult {
- public:
- explicit ModuleError(const char* msg) {
- error_code = kError;
- size_t len = strlen(msg) + 1;
- char* result = new char[len];
- strncpy(result, msg, len);
- result[len - 1] = 0;
- error_msg.reset(result);
+ return new (module_zone_) FunctionSig(return_count, param_count, buffer);
}
};
-// Helpers for nice error messages.
-class FunctionError : public FunctionResult {
- public:
- explicit FunctionError(const char* msg) {
- error_code = kError;
- size_t len = strlen(msg) + 1;
- char* result = new char[len];
- strncpy(result, msg, len);
- result[len - 1] = 0;
- error_msg.reset(result);
- }
-};
-
-// Find section with given section code. Return Vector of the payload, or null
-// Vector if section is not found or module bytes are invalid.
-Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
- WasmSectionCode code) {
- Decoder decoder(module_start, module_end);
-
- uint32_t magic_word = decoder.consume_u32("wasm magic");
- if (magic_word != kWasmMagic) decoder.error("wrong magic word");
-
- uint32_t magic_version = decoder.consume_u32("wasm version");
- if (magic_version != kWasmVersion) decoder.error("wrong wasm version");
-
- WasmSectionIterator section_iter(decoder);
- while (section_iter.more()) {
- if (section_iter.section_code() == code) {
- return Vector<const uint8_t>(section_iter.payload_start(),
- section_iter.payload_length());
- }
- decoder.consume_bytes(section_iter.payload_length(), "section payload");
- section_iter.advance();
- }
-
- return Vector<const uint8_t>();
-}
-
} // namespace
ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, bool verify_functions,
ModuleOrigin origin) {
HistogramTimerScope wasm_decode_module_time_scope(
- isolate->counters()->wasm_decode_module_time());
+ IsWasm(origin) ? isolate->counters()->wasm_decode_wasm_module_time()
+ : isolate->counters()->wasm_decode_asm_module_time());
size_t size = module_end - module_start;
- if (module_start > module_end) return ModuleError("start > end");
+ if (module_start > module_end) return ModuleResult::Error("start > end");
if (size >= kV8MaxWasmModuleSize)
- return ModuleError("size > maximum module size");
+ return ModuleResult::Error("size > maximum module size: %zu", size);
// TODO(bradnelson): Improve histogram handling of size_t.
- isolate->counters()->wasm_module_size_bytes()->AddSample(
- static_cast<int>(size));
+ (IsWasm(origin) ? isolate->counters()->wasm_wasm_module_size_bytes()
+ : isolate->counters()->wasm_asm_module_size_bytes())
+ ->AddSample(static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
@@ -1196,8 +1160,10 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
// TODO(titzer): this isn't accurate, since it doesn't count the data
// allocated on the C++ heap.
// https://bugs.chromium.org/p/chromium/issues/detail?id=657320
- isolate->counters()->wasm_decode_module_peak_memory_bytes()->AddSample(
- static_cast<int>(zone->allocation_size()));
+ (IsWasm(origin)
+ ? isolate->counters()->wasm_decode_wasm_module_peak_memory_bytes()
+ : isolate->counters()->wasm_decode_asm_module_peak_memory_bytes())
+ ->AddSample(static_cast<int>(zone->allocation_size()));
return result;
}
@@ -1218,51 +1184,23 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
ModuleBytesEnv* module_env,
const byte* function_start,
const byte* function_end) {
+ bool is_wasm = module_env->module_env.is_wasm();
HistogramTimerScope wasm_decode_function_time_scope(
- isolate->counters()->wasm_decode_function_time());
+ is_wasm ? isolate->counters()->wasm_decode_wasm_function_time()
+ : isolate->counters()->wasm_decode_asm_function_time());
size_t size = function_end - function_start;
- if (function_start > function_end) return FunctionError("start > end");
+ if (function_start > function_end)
+ return FunctionResult::Error("start > end");
if (size > kV8MaxWasmFunctionSize)
- return FunctionError("size > maximum function size");
- isolate->counters()->wasm_function_size_bytes()->AddSample(
- static_cast<int>(size));
+ return FunctionResult::Error("size > maximum function size: %zu", size);
+ (is_wasm ? isolate->counters()->wasm_wasm_function_size_bytes()
+ : isolate->counters()->wasm_asm_function_size_bytes())
+ ->AddSample(static_cast<int>(size));
WasmFunction* function = new WasmFunction();
ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
return decoder.DecodeSingleFunction(module_env, function);
}
-FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
- const byte* module_end) {
- // Find and decode the code section.
- Vector<const byte> code_section =
- FindSection(module_start, module_end, kCodeSectionCode);
- Decoder decoder(code_section.start(), code_section.end());
- FunctionOffsets table;
- if (!code_section.start()) {
- decoder.error("no code section");
- return decoder.toResult(std::move(table));
- }
-
- uint32_t functions_count = decoder.consume_u32v("functions count");
- // Reserve space for the entries, taking care of invalid input.
- if (functions_count < static_cast<unsigned>(code_section.length()) / 2) {
- table.reserve(functions_count);
- }
-
- int section_offset = static_cast<int>(code_section.start() - module_start);
- DCHECK_LE(0, section_offset);
- for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
- uint32_t size = decoder.consume_u32v("body size");
- int offset = static_cast<int>(section_offset + decoder.pc_offset());
- table.emplace_back(offset, static_cast<int>(size));
- DCHECK(table.back().first >= 0 && table.back().second >= 0);
- decoder.consume_bytes(size);
- }
- if (decoder.more()) decoder.error("unexpected additional bytes");
-
- return decoder.toResult(std::move(table));
-}
-
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
const byte* tables_end) {
AsmJsOffsets table;
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 446883fd6b..b29dfb196b 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -20,7 +20,7 @@ const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
const uint8_t kResizableMaximumFlag = 1;
-enum WasmSectionCode {
+enum SectionCode {
kUnknownSectionCode = 0, // code for unknown sections
kTypeSectionCode = 1, // Function signature declarations
kImportSectionCode = 2, // Import declarations
@@ -36,11 +36,13 @@ enum WasmSectionCode {
kNameSectionCode = 12, // Name section (encoded as a string)
};
+enum NameSectionType : uint8_t { kFunction = 1, kLocal = 2 };
+
inline bool IsValidSectionCode(uint8_t byte) {
return kTypeSectionCode <= byte && byte <= kDataSectionCode;
}
-const char* SectionName(WasmSectionCode code);
+const char* SectionName(SectionCode code);
typedef Result<const WasmModule*> ModuleResult;
typedef Result<WasmFunction*> FunctionResult;
@@ -75,12 +77,6 @@ V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(Isolate* isolate,
const byte* function_start,
const byte* function_end);
-// Extracts the function offset table from the wasm module bytes.
-// Returns a vector with <offset, length> entries, or failure if the wasm bytes
-// are detected as invalid. Note that this validation is not complete.
-FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
- const byte* module_end);
-
V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
const byte* end);
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 1147899ef5..1b6a81900b 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -56,6 +56,16 @@ class PatchDirectCallsHelper {
const byte* func_bytes;
};
+bool IsAtWasmDirectCallTarget(RelocIterator& it) {
+ DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+ Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ return code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ code->kind() == Code::WASM_INTERPRETER_ENTRY ||
+ code->builtin_index() == Builtins::kIllegal ||
+ code->builtin_index() == Builtins::kWasmCompileLazy;
+}
+
} // namespace
CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
@@ -123,38 +133,38 @@ bool CodeSpecialization::ApplyToWholeInstance(
for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
func_index < num_wasm_functions; ++func_index) {
Code* wasm_function = Code::cast(code_table->get(func_index));
+ if (wasm_function->builtin_index() == Builtins::kWasmCompileLazy) continue;
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
- // Patch all exported functions.
- for (auto exp : module->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Code* export_wrapper = Code::cast(code_table->get(func_index));
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
- // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
- int num_wasm_calls = 0;
- for (RelocIterator it(export_wrapper,
- RelocInfo::ModeMask(RelocInfo::CODE_TARGET));
- !it.done(); it.next()) {
- DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
- Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- // Ignore calls to other builtins like ToNumber.
- if (code->kind() != Code::WASM_FUNCTION &&
- code->kind() != Code::WASM_TO_JS_FUNCTION &&
- code->builtin_index() != Builtins::kIllegal)
- continue;
- ++num_wasm_calls;
- Code* new_code = Code::cast(code_table->get(exp.index));
- DCHECK(new_code->kind() == Code::WASM_FUNCTION ||
- new_code->kind() == Code::WASM_TO_JS_FUNCTION);
- it.rinfo()->set_target_address(new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ // Patch all exported functions (if we shall relocate direct calls).
+ if (!relocate_direct_calls_instance.is_null()) {
+ // If we patch direct calls, the instance registered for that
+ // (relocate_direct_calls_instance) should match the instance we currently
+ // patch (instance).
+ DCHECK_EQ(instance, *relocate_direct_calls_instance);
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Code* export_wrapper = Code::cast(code_table->get(func_index));
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
+ for (RelocIterator it(export_wrapper,
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET));
+ ; it.next()) {
+ DCHECK(!it.done());
+ // Ignore calls to other builtins like ToNumber.
+ if (!IsAtWasmDirectCallTarget(it)) continue;
+ Code* new_code = Code::cast(code_table->get(exp.index));
+ it.rinfo()->set_target_address(new_code->GetIsolate(),
+ new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ break;
+ }
changed = true;
+ func_index++;
}
- DCHECK_EQ(1, num_wasm_calls);
- func_index++;
+ DCHECK_EQ(code_table->length(), func_index);
}
- DCHECK_EQ(code_table->length(), func_index);
return changed;
}
@@ -189,31 +199,28 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
switch (mode) {
case RelocInfo::WASM_MEMORY_REFERENCE:
DCHECK(reloc_mem_addr);
- it.rinfo()->update_wasm_memory_reference(old_mem_start, new_mem_start,
+ it.rinfo()->update_wasm_memory_reference(code->GetIsolate(),
+ old_mem_start, new_mem_start,
icache_flush_mode);
changed = true;
break;
case RelocInfo::WASM_MEMORY_SIZE_REFERENCE:
DCHECK(reloc_mem_size);
- it.rinfo()->update_wasm_memory_size(old_mem_size, new_mem_size,
- icache_flush_mode);
+ it.rinfo()->update_wasm_memory_size(code->GetIsolate(), old_mem_size,
+ new_mem_size, icache_flush_mode);
changed = true;
break;
case RelocInfo::WASM_GLOBAL_REFERENCE:
DCHECK(reloc_globals);
it.rinfo()->update_wasm_global_reference(
- old_globals_start, new_globals_start, icache_flush_mode);
+ code->GetIsolate(), old_globals_start, new_globals_start,
+ icache_flush_mode);
changed = true;
break;
case RelocInfo::CODE_TARGET: {
DCHECK(reloc_direct_calls);
- Code* old_code =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
// Skip everything which is not a wasm call (stack checks, traps, ...).
- if (old_code->kind() != Code::WASM_FUNCTION &&
- old_code->kind() != Code::WASM_TO_JS_FUNCTION &&
- old_code->builtin_index() != Builtins::kIllegal)
- continue;
+ if (!IsAtWasmDirectCallTarget(it)) continue;
// Iterate simultaneously over the relocation information and the source
// position table. For each call in the reloc info, move the source
// position iterator forward to that position to find the byte offset of
@@ -233,7 +240,8 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
relocate_direct_calls_instance->compiled_module()
->ptr_to_code_table();
Code* new_code = Code::cast(code_table->get(called_func_index));
- it.rinfo()->set_target_address(new_code->instruction_start(),
+ it.rinfo()->set_target_address(new_code->GetIsolate(),
+ new_code->instruction_start(),
UPDATE_WRITE_BARRIER, icache_flush_mode);
changed = true;
} break;
@@ -242,7 +250,8 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
Object* old = it.rinfo()->target_object();
Handle<Object>* new_obj = objects_to_relocate.Find(old);
if (new_obj) {
- it.rinfo()->set_target_object(**new_obj, UPDATE_WRITE_BARRIER,
+ it.rinfo()->set_target_object(HeapObject::cast(**new_obj),
+ UPDATE_WRITE_BARRIER,
icache_flush_mode);
changed = true;
}
@@ -250,8 +259,8 @@ bool CodeSpecialization::ApplyToWasmCode(Code* code,
case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
DCHECK(patch_table_size);
it.rinfo()->update_wasm_function_table_size_reference(
- old_function_table_size, new_function_table_size,
- icache_flush_mode);
+ code->GetIsolate(), old_function_table_size,
+ new_function_table_size, icache_flush_mode);
changed = true;
break;
default:
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 769f57d951..adfbd0c168 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <unordered_map>
+
#include "src/assembler-inl.h"
#include "src/assert-scope.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/frames-inl.h"
+#include "src/identity-map.h"
#include "src/isolate.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-interpreter.h"
@@ -26,30 +29,72 @@ class InterpreterHandle;
InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
class InterpreterHandle {
- AccountingAllocator allocator_;
WasmInstance instance_;
WasmInterpreter interpreter_;
Isolate* isolate_;
StepAction next_step_action_ = StepNone;
int last_step_stack_depth_ = 0;
+ std::unordered_map<Address, uint32_t> activations_;
+
+ uint32_t StartActivation(Address frame_pointer) {
+ WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
+ uint32_t activation_id = thread->StartActivation();
+ DCHECK_EQ(0, activations_.count(frame_pointer));
+ activations_.insert(std::make_pair(frame_pointer, activation_id));
+ return activation_id;
+ }
+
+ void FinishActivation(Address frame_pointer, uint32_t activation_id) {
+ WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
+ thread->FinishActivation(activation_id);
+ DCHECK_EQ(1, activations_.count(frame_pointer));
+ activations_.erase(frame_pointer);
+ }
+
+ std::pair<uint32_t, uint32_t> GetActivationFrameRange(
+ WasmInterpreter::Thread* thread, Address frame_pointer) {
+ DCHECK_EQ(1, activations_.count(frame_pointer));
+ uint32_t activation_id = activations_.find(frame_pointer)->second;
+ uint32_t num_activations = static_cast<uint32_t>(activations_.size() - 1);
+ uint32_t frame_base = thread->ActivationFrameBase(activation_id);
+ uint32_t frame_limit = activation_id == num_activations
+ ? thread->GetFrameCount()
+ : thread->ActivationFrameBase(activation_id + 1);
+ DCHECK_LE(frame_base, frame_limit);
+ DCHECK_LE(frame_limit, thread->GetFrameCount());
+ return {frame_base, frame_limit};
+ }
public:
// Initialize in the right order, using helper methods to make this possible.
// WasmInterpreter has to be allocated in place, since it is not movable.
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: instance_(debug_info->wasm_instance()->compiled_module()->module()),
- interpreter_(GetBytesEnv(&instance_, debug_info), &allocator_),
+ interpreter_(isolate, GetBytesEnv(&instance_, debug_info)),
isolate_(isolate) {
- if (debug_info->wasm_instance()->has_memory_buffer()) {
- JSArrayBuffer* mem_buffer = debug_info->wasm_instance()->memory_buffer();
- instance_.mem_start =
- reinterpret_cast<byte*>(mem_buffer->backing_store());
- CHECK(mem_buffer->byte_length()->ToUint32(&instance_.mem_size));
+ DisallowHeapAllocation no_gc;
+
+ WasmInstanceObject* instance = debug_info->wasm_instance();
+
+ // Store a global handle to the wasm instance in the interpreter.
+ interpreter_.SetInstanceObject(instance);
+
+ // Set memory start pointer and size.
+ instance_.mem_start = nullptr;
+ instance_.mem_size = 0;
+ if (instance->has_memory_buffer()) {
+ UpdateMemory(instance->memory_buffer());
} else {
DCHECK_EQ(0, instance_.module->min_mem_pages);
- instance_.mem_start = nullptr;
- instance_.mem_size = 0;
}
+
+ // Set pointer to globals storage.
+ instance_.globals_start =
+ debug_info->wasm_instance()->compiled_module()->GetGlobalsStartOrNull();
+ }
+
+ ~InterpreterHandle() {
+ DCHECK_EQ(0, activations_.size());
}
static ModuleBytesEnv GetBytesEnv(WasmInstance* instance,
@@ -77,7 +122,11 @@ class InterpreterHandle {
return interpreter()->GetThread(0)->GetFrameCount();
}
- void Execute(uint32_t func_index, uint8_t* arg_buffer) {
+ // Returns true if exited regularly, false if a trap/exception occured and was
+ // not handled inside this activation. In the latter case, a pending exception
+ // will have been set on the isolate.
+ bool Execute(Address frame_pointer, uint32_t func_index,
+ uint8_t* arg_buffer) {
DCHECK_GE(module()->functions.size(), func_index);
FunctionSig* sig = module()->functions[func_index].sig;
DCHECK_GE(kMaxInt, sig->parameter_count());
@@ -85,11 +134,11 @@ class InterpreterHandle {
ScopedVector<WasmVal> wasm_args(num_params);
uint8_t* arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_params; ++i) {
- int param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
+ uint32_t param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
#define CASE_ARG_TYPE(type, ctype) \
case type: \
DCHECK_EQ(param_size, sizeof(ctype)); \
- wasm_args[i] = WasmVal(*reinterpret_cast<ctype*>(arg_buf_ptr)); \
+ wasm_args[i] = WasmVal(ReadUnalignedValue<ctype>(arg_buf_ptr)); \
break;
switch (sig->GetParam(i)) {
CASE_ARG_TYPE(kWasmI32, uint32_t)
@@ -100,16 +149,13 @@ class InterpreterHandle {
default:
UNREACHABLE();
}
- arg_buf_ptr += RoundUpToMultipleOfPowOf2(param_size, 8);
+ arg_buf_ptr += param_size;
}
+ uint32_t activation_id = StartActivation(frame_pointer);
+
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- // We do not support reentering an already running interpreter at the moment
- // (like INTERPRETER -> JS -> WASM -> INTERPRETER).
- DCHECK(thread->state() == WasmInterpreter::STOPPED ||
- thread->state() == WasmInterpreter::FINISHED);
- thread->Reset();
- thread->PushFrame(&module()->functions[func_index], wasm_args.start());
+ thread->InitFrame(&module()->functions[func_index], wasm_args.start());
bool finished = false;
while (!finished) {
// TODO(clemensh): Add occasional StackChecks.
@@ -122,12 +168,23 @@ class InterpreterHandle {
// Perfect, just break the switch and exit the loop.
finished = true;
break;
- case WasmInterpreter::State::TRAPPED:
- // TODO(clemensh): Generate appropriate JS exception.
- UNIMPLEMENTED();
- break;
- // STOPPED and RUNNING should never occur here.
+ case WasmInterpreter::State::TRAPPED: {
+ int message_id =
+ WasmOpcodes::TrapReasonToMessageId(thread->GetTrapReason());
+ Handle<Object> exception = isolate_->factory()->NewWasmRuntimeError(
+ static_cast<MessageTemplate::Template>(message_id));
+ isolate_->Throw(*exception);
+ // Handle this exception. Return without trying to read back the
+ // return value.
+ auto result = thread->HandleException(isolate_);
+ return result == WasmInterpreter::Thread::HANDLED;
+ } break;
case WasmInterpreter::State::STOPPED:
+ // An exception happened, and the current activation was unwound.
+ DCHECK_EQ(thread->ActivationFrameBase(activation_id),
+ thread->GetFrameCount());
+ return false;
+ // RUNNING should never occur here.
case WasmInterpreter::State::RUNNING:
default:
UNREACHABLE();
@@ -143,7 +200,7 @@ class InterpreterHandle {
#define CASE_RET_TYPE(type, ctype) \
case type: \
DCHECK_EQ(1 << ElementSizeLog2Of(sig->GetReturn(0)), sizeof(ctype)); \
- *reinterpret_cast<ctype*>(arg_buffer) = ret_val.to<ctype>(); \
+ WriteUnalignedValue<ctype>(arg_buffer, ret_val.to<ctype>()); \
break;
switch (sig->GetReturn(0)) {
CASE_RET_TYPE(kWasmI32, uint32_t)
@@ -155,6 +212,10 @@ class InterpreterHandle {
UNREACHABLE();
}
}
+
+ FinishActivation(frame_pointer, activation_id);
+
+ return true;
}
WasmInterpreter::State ContinueExecution(WasmInterpreter::Thread* thread) {
@@ -250,34 +311,64 @@ class InterpreterHandle {
std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
Address frame_pointer) {
- // TODO(clemensh): Use frame_pointer.
- USE(frame_pointer);
-
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
- std::vector<std::pair<uint32_t, int>> stack(thread->GetFrameCount());
- for (int i = 0, e = thread->GetFrameCount(); i < e; ++i) {
- wasm::InterpretedFrame frame = thread->GetFrame(i);
- stack[i] = {frame.function()->func_index, frame.pc()};
+
+ std::pair<uint32_t, uint32_t> frame_range =
+ GetActivationFrameRange(thread, frame_pointer);
+
+ std::vector<std::pair<uint32_t, int>> stack;
+ stack.reserve(frame_range.second - frame_range.first);
+ for (uint32_t fp = frame_range.first; fp < frame_range.second; ++fp) {
+ wasm::InterpretedFrame frame = thread->GetFrame(fp);
+ stack.emplace_back(frame.function()->func_index, frame.pc());
}
return stack;
}
std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
Address frame_pointer, int idx) {
- // TODO(clemensh): Use frame_pointer.
- USE(frame_pointer);
-
DCHECK_EQ(1, interpreter()->GetThreadCount());
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
- return std::unique_ptr<wasm::InterpretedFrame>(
- new wasm::InterpretedFrame(thread->GetMutableFrame(idx)));
+
+ std::pair<uint32_t, uint32_t> frame_range =
+ GetActivationFrameRange(thread, frame_pointer);
+ DCHECK_LE(0, idx);
+ DCHECK_GT(frame_range.second - frame_range.first, idx);
+
+ return std::unique_ptr<wasm::InterpretedFrame>(new wasm::InterpretedFrame(
+ thread->GetMutableFrame(frame_range.first + idx)));
+ }
+
+ void Unwind(Address frame_pointer) {
+ // Find the current activation.
+ DCHECK_EQ(1, activations_.count(frame_pointer));
+ // Activations must be properly stacked:
+ DCHECK_EQ(activations_.size() - 1, activations_[frame_pointer]);
+ uint32_t activation_id = static_cast<uint32_t>(activations_.size() - 1);
+
+ // Unwind the frames of the current activation if not already unwound.
+ WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+ if (static_cast<uint32_t>(thread->GetFrameCount()) >
+ thread->ActivationFrameBase(activation_id)) {
+ using ExceptionResult = WasmInterpreter::Thread::ExceptionHandlingResult;
+ ExceptionResult result = thread->HandleException(isolate_);
+ // TODO(wasm): Handle exceptions caught in wasm land.
+ CHECK_EQ(ExceptionResult::UNWOUND, result);
+ }
+
+ FinishActivation(frame_pointer, activation_id);
}
uint64_t NumInterpretedCalls() {
DCHECK_EQ(1, interpreter()->GetThreadCount());
return interpreter()->GetThread(0)->NumInterpretedCalls();
}
+
+ void UpdateMemory(JSArrayBuffer* new_memory) {
+ instance_.mem_start = reinterpret_cast<byte*>(new_memory->backing_store());
+ CHECK(new_memory->byte_length()->ToUint32(&instance_.mem_size));
+ }
};
InterpreterHandle* GetOrCreateInterpreterHandle(
@@ -324,25 +415,28 @@ Handle<FixedArray> GetOrCreateInterpretedFunctions(
return new_arr;
}
-void RedirectCallsitesInCode(Code* code, Code* old_target, Code* new_target) {
+using CodeRelocationMap = IdentityMap<Handle<Code>, FreeStoreAllocationPolicy>;
+
+void RedirectCallsitesInCode(Code* code, CodeRelocationMap& map) {
DisallowHeapAllocation no_gc;
for (RelocIterator it(code, RelocInfo::kCodeTargetMask); !it.done();
it.next()) {
DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target != old_target) continue;
- it.rinfo()->set_target_address(new_target->instruction_start());
+ Handle<Code>* new_target = map.Find(target);
+ if (!new_target) continue;
+ it.rinfo()->set_target_address(code->GetIsolate(),
+ (*new_target)->instruction_start());
}
}
void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
- Code* old_target, Code* new_target) {
+ CodeRelocationMap& map) {
DisallowHeapAllocation no_gc;
// Redirect all calls in wasm functions.
FixedArray* code_table = instance->compiled_module()->ptr_to_code_table();
for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
- RedirectCallsitesInCode(Code::cast(code_table->get(i)), old_target,
- new_target);
+ RedirectCallsitesInCode(Code::cast(code_table->get(i)), map);
}
// Redirect all calls in exported functions.
@@ -352,7 +446,7 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
if (weak_function->cleared()) continue;
Code* code = JSFunction::cast(weak_function->value())->code();
- RedirectCallsitesInCode(code, old_target, new_target);
+ RedirectCallsitesInCode(code, map);
}
}
@@ -392,44 +486,49 @@ void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
InterpreterHandle* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
- RedirectToInterpreter(debug_info, func_index);
+ RedirectToInterpreter(debug_info, Vector<int>(&func_index, 1));
const WasmFunction* func = &handle->module()->functions[func_index];
handle->interpreter()->SetBreakpoint(func, offset, true);
}
void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
- int func_index) {
+ Vector<int> func_indexes) {
Isolate* isolate = debug_info->GetIsolate();
- DCHECK_LE(0, func_index);
- DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
- func_index);
- Handle<FixedArray> interpreted_functions =
- GetOrCreateInterpretedFunctions(isolate, debug_info);
- if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) return;
-
// Ensure that the interpreter is instantiated.
GetOrCreateInterpreterHandle(isolate, debug_info);
+ Handle<FixedArray> interpreted_functions =
+ GetOrCreateInterpretedFunctions(isolate, debug_info);
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
- Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
- isolate, func_index,
- instance->compiled_module()->module()->functions[func_index].sig,
- instance);
-
Handle<FixedArray> code_table = instance->compiled_module()->code_table();
- Handle<Code> old_code(Code::cast(code_table->get(func_index)), isolate);
- interpreted_functions->set(func_index, *new_code);
-
- RedirectCallsitesInInstance(isolate, *instance, *old_code, *new_code);
+ CodeRelocationMap code_to_relocate(isolate->heap());
+ for (int func_index : func_indexes) {
+ DCHECK_LE(0, func_index);
+ DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
+ func_index);
+ if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
+
+ Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
+ isolate, func_index,
+ instance->compiled_module()->module()->functions[func_index].sig,
+ instance);
+
+ Code* old_code = Code::cast(code_table->get(func_index));
+ interpreted_functions->set(func_index, *new_code);
+ DCHECK_NULL(code_to_relocate.Find(old_code));
+ code_to_relocate.Set(old_code, new_code);
+ }
+ RedirectCallsitesInInstance(isolate, *instance, code_to_relocate);
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
GetInterpreterHandle(this)->PrepareStep(step_action);
}
-void WasmDebugInfo::RunInterpreter(int func_index, uint8_t* arg_buffer) {
+bool WasmDebugInfo::RunInterpreter(Address frame_pointer, int func_index,
+ uint8_t* arg_buffer) {
DCHECK_LE(0, func_index);
- GetInterpreterHandle(this)->Execute(static_cast<uint32_t>(func_index),
- arg_buffer);
+ return GetInterpreterHandle(this)->Execute(
+ frame_pointer, static_cast<uint32_t>(func_index), arg_buffer);
}
std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
@@ -442,7 +541,17 @@ std::unique_ptr<wasm::InterpretedFrame> WasmDebugInfo::GetInterpretedFrame(
return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
}
+void WasmDebugInfo::Unwind(Address frame_pointer) {
+ return GetInterpreterHandle(this)->Unwind(frame_pointer);
+}
+
uint64_t WasmDebugInfo::NumInterpretedCalls() {
auto handle = GetInterpreterHandleOrNull(this);
return handle ? handle->NumInterpretedCalls() : 0;
}
+
+void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
+ InterpreterHandle* interp_handle = GetInterpreterHandleOrNull(this);
+ if (!interp_handle) return;
+ interp_handle->UpdateMemory(new_memory);
+}
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index f32b5e617b..66e4172850 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -6,7 +6,9 @@
#include "src/wasm/wasm-interpreter.h"
+#include "src/assembler-inl.h"
#include "src/conversions.h"
+#include "src/identity-map.h"
#include "src/objects-inl.h"
#include "src/utils.h"
#include "src/wasm/decoder.h"
@@ -15,6 +17,7 @@
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -34,6 +37,9 @@ namespace wasm {
#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
+#define WASM_CTYPES(V) \
+ V(I32, int32_t) V(I64, int64_t) V(F32, float) V(F64, double)
+
#define FOREACH_SIMPLE_BINOP(V) \
V(I32Add, uint32_t, +) \
V(I32Sub, uint32_t, -) \
@@ -167,7 +173,9 @@ namespace wasm {
V(F32Sqrt, float) \
V(F64Sqrt, double)
-static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
+namespace {
+
+inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapDivByZero;
return 0;
@@ -179,8 +187,7 @@ static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
return a / b;
}
-static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
- TrapReason* trap) {
+inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapDivByZero;
return 0;
@@ -188,7 +195,7 @@ static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
return a / b;
}
-static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
+inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapRemByZero;
return 0;
@@ -197,8 +204,7 @@ static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
return a % b;
}
-static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
- TrapReason* trap) {
+inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapRemByZero;
return 0;
@@ -206,20 +212,19 @@ static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
return a % b;
}
-static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
+inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
return a << (b & 0x1f);
}
-static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b,
- TrapReason* trap) {
+inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
return a >> (b & 0x1f);
}
-static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
+inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
return a >> (b & 0x1f);
}
-static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
+inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapDivByZero;
return 0;
@@ -231,8 +236,7 @@ static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
return a / b;
}
-static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
- TrapReason* trap) {
+inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapDivByZero;
return 0;
@@ -240,7 +244,7 @@ static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
return a / b;
}
-static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
+inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapRemByZero;
return 0;
@@ -249,8 +253,7 @@ static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
return a % b;
}
-static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
- TrapReason* trap) {
+inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapRemByZero;
return 0;
@@ -258,65 +261,63 @@ static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
return a % b;
}
-static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
+inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
return a << (b & 0x3f);
}
-static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b,
- TrapReason* trap) {
+inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
return a >> (b & 0x3f);
}
-static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
+inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
return a >> (b & 0x3f);
}
-static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
+inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
uint32_t shift = (b & 0x1f);
return (a >> shift) | (a << (32 - shift));
}
-static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
+inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
uint32_t shift = (b & 0x1f);
return (a << shift) | (a >> (32 - shift));
}
-static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
+inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
uint32_t shift = (b & 0x3f);
return (a >> shift) | (a << (64 - shift));
}
-static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
+inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
uint32_t shift = (b & 0x3f);
return (a << shift) | (a >> (64 - shift));
}
-static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
+inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
return JSMin(a, b);
}
-static inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
+inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
return JSMax(a, b);
}
-static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
+inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
return copysignf(a, b);
}
-static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
+inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
return JSMin(a, b);
}
-static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
+inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
return JSMax(a, b);
}
-static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
+inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
return copysign(a, b);
}
-static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
- TrapReason* trap) {
+inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) return 0;
if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
return std::numeric_limits<int32_t>::min();
@@ -324,131 +325,114 @@ static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
return a / b;
}
-static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b,
- TrapReason* trap) {
+inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
if (b == 0) return 0;
return a / b;
}
-static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b,
- TrapReason* trap) {
+inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) return 0;
if (b == -1) return 0;
return a % b;
}
-static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b,
- TrapReason* trap) {
+inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
if (b == 0) return 0;
return a % b;
}
-static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
+inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
return DoubleToInt32(a);
}
-static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
+inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
return DoubleToUint32(a);
}
-static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
+inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
return DoubleToInt32(a);
}
-static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
+inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
return DoubleToUint32(a);
}
-static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
+int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
return base::bits::CountLeadingZeros32(val);
}
-static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
+uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
return base::bits::CountTrailingZeros32(val);
}
-static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
+uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
return word32_popcnt_wrapper(&val);
}
-static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
+inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
return val == 0 ? 1 : 0;
}
-static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
+int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
return base::bits::CountLeadingZeros64(val);
}
-static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
+inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
return base::bits::CountTrailingZeros64(val);
}
-static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
+inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
return word64_popcnt_wrapper(&val);
}
-static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
+inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
return val == 0 ? 1 : 0;
}
-static inline float ExecuteF32Abs(float a, TrapReason* trap) {
+inline float ExecuteF32Abs(float a, TrapReason* trap) {
return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
}
-static inline float ExecuteF32Neg(float a, TrapReason* trap) {
+inline float ExecuteF32Neg(float a, TrapReason* trap) {
return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
}
-static inline float ExecuteF32Ceil(float a, TrapReason* trap) {
- return ceilf(a);
-}
+inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
-static inline float ExecuteF32Floor(float a, TrapReason* trap) {
- return floorf(a);
-}
+inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
-static inline float ExecuteF32Trunc(float a, TrapReason* trap) {
- return truncf(a);
-}
+inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
-static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
+inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
return nearbyintf(a);
}
-static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
+inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
float result = sqrtf(a);
return result;
}
-static inline double ExecuteF64Abs(double a, TrapReason* trap) {
+inline double ExecuteF64Abs(double a, TrapReason* trap) {
return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
}
-static inline double ExecuteF64Neg(double a, TrapReason* trap) {
+inline double ExecuteF64Neg(double a, TrapReason* trap) {
return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
}
-static inline double ExecuteF64Ceil(double a, TrapReason* trap) {
- return ceil(a);
-}
+inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
-static inline double ExecuteF64Floor(double a, TrapReason* trap) {
- return floor(a);
-}
+inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
-static inline double ExecuteF64Trunc(double a, TrapReason* trap) {
- return trunc(a);
-}
+inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
-static inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
+inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
return nearbyint(a);
}
-static inline double ExecuteF64Sqrt(double a, TrapReason* trap) {
- return sqrt(a);
-}
+inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
-static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
+int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
// The upper bound is (INT32_MAX + 1), which is the lowest float-representable
// number above INT32_MAX which cannot be represented as int32.
float upper_bound = 2147483648.0f;
@@ -463,7 +447,7 @@ static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
return 0;
}
-static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
+int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
// The upper bound is (INT32_MAX + 1), which is the lowest double-
// representable number above INT32_MAX which cannot be represented as int32.
double upper_bound = 2147483648.0;
@@ -477,7 +461,7 @@ static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
return 0;
}
-static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
+uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
// The upper bound is (UINT32_MAX + 1), which is the lowest
// float-representable number above UINT32_MAX which cannot be represented as
// uint32.
@@ -490,7 +474,7 @@ static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
return 0;
}
-static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
+uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
// The upper bound is (UINT32_MAX + 1), which is the lowest
// double-representable number above UINT32_MAX which cannot be represented as
// uint32.
@@ -503,11 +487,11 @@ static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
return 0;
}
-static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
+inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
return static_cast<uint32_t>(a & 0xFFFFFFFF);
}
-static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
+int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
int64_t output;
if (!float32_to_int64_wrapper(&a, &output)) {
*trap = kTrapFloatUnrepresentable;
@@ -515,7 +499,7 @@ static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
return output;
}
-static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
+int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
int64_t output;
if (!float64_to_int64_wrapper(&a, &output)) {
*trap = kTrapFloatUnrepresentable;
@@ -523,7 +507,7 @@ static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
return output;
}
-static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
+uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
uint64_t output;
if (!float32_to_uint64_wrapper(&a, &output)) {
*trap = kTrapFloatUnrepresentable;
@@ -531,7 +515,7 @@ static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
return output;
}
-static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
+uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
uint64_t output;
if (!float64_to_uint64_wrapper(&a, &output)) {
*trap = kTrapFloatUnrepresentable;
@@ -539,114 +523,134 @@ static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
return output;
}
-static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
+inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<int64_t>(a);
}
-static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
+inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
return static_cast<uint64_t>(a);
}
-static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
+inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<float>(a);
}
-static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
+inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
return static_cast<float>(a);
}
-static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
+inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
float output;
int64_to_float32_wrapper(&a, &output);
return output;
}
-static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
+inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
float output;
uint64_to_float32_wrapper(&a, &output);
return output;
}
-static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
+inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
return static_cast<float>(a);
}
-static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
+inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
return bit_cast<float>(a);
}
-static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
+inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
return static_cast<double>(a);
}
-static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
+inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
return static_cast<double>(a);
}
-static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
+inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
double output;
int64_to_float64_wrapper(&a, &output);
return output;
}
-static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
+inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
double output;
uint64_to_float64_wrapper(&a, &output);
return output;
}
-static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
+inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
return static_cast<double>(a);
}
-static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
+inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
return bit_cast<double>(a);
}
-static inline int32_t ExecuteI32ReinterpretF32(WasmVal a) {
+inline int32_t ExecuteI32ReinterpretF32(WasmVal a) {
return a.to_unchecked<int32_t>();
}
-static inline int64_t ExecuteI64ReinterpretF64(WasmVal a) {
+inline int64_t ExecuteI64ReinterpretF64(WasmVal a) {
return a.to_unchecked<int64_t>();
}
-static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
- WasmInstance* instance) {
+inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
+ MaybeHandle<WasmInstanceObject> instance_obj,
+ WasmInstance* instance) {
+ DCHECK_EQ(0, instance->mem_size % WasmModule::kPageSize);
+ uint32_t old_pages = instance->mem_size / WasmModule::kPageSize;
+
+ // If an instance is set, execute GrowMemory on the instance. This will also
+ // update the WasmInstance struct used here.
+ if (!instance_obj.is_null()) {
+ Isolate* isolate = instance_obj.ToHandleChecked()->GetIsolate();
+ int32_t ret = WasmInstanceObject::GrowMemory(
+ isolate, instance_obj.ToHandleChecked(), delta_pages);
+ // Some sanity checks.
+ DCHECK_EQ(ret == -1 ? old_pages : old_pages + delta_pages,
+ instance->mem_size / WasmModule::kPageSize);
+ DCHECK(ret == -1 || static_cast<uint32_t>(ret) == old_pages);
+ return ret;
+ }
+
// TODO(ahaas): Move memory allocation to wasm-module.cc for better
// encapsulation.
if (delta_pages > FLAG_wasm_max_mem_pages ||
delta_pages > instance->module->max_mem_pages) {
return -1;
}
- uint32_t old_size = instance->mem_size;
- uint32_t new_size;
+
+ uint32_t new_pages = old_pages + delta_pages;
+ if (new_pages > FLAG_wasm_max_mem_pages ||
+ new_pages > instance->module->max_mem_pages) {
+ return -1;
+ }
+
byte* new_mem_start;
if (instance->mem_size == 0) {
// TODO(gdeepti): Fix bounds check to take into account size of memtype.
- new_size = delta_pages * wasm::WasmModule::kPageSize;
- new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
- if (!new_mem_start) {
- return -1;
- }
+ new_mem_start = static_cast<byte*>(
+ calloc(new_pages * WasmModule::kPageSize, sizeof(byte)));
+ if (!new_mem_start) return -1;
} else {
DCHECK_NOT_NULL(instance->mem_start);
- new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
- if (new_size / wasm::WasmModule::kPageSize > FLAG_wasm_max_mem_pages ||
- new_size / wasm::WasmModule::kPageSize >
- instance->module->max_mem_pages) {
- return -1;
- }
- new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
- if (!new_mem_start) {
- return -1;
+ if (EnableGuardRegions()) {
+ v8::base::OS::Unprotect(instance->mem_start,
+ new_pages * WasmModule::kPageSize);
+ new_mem_start = instance->mem_start;
+ } else {
+ new_mem_start = static_cast<byte*>(
+ realloc(instance->mem_start, new_pages * WasmModule::kPageSize));
+ if (!new_mem_start) return -1;
}
// Zero initializing uninitialized memory from realloc
- memset(new_mem_start + old_size, 0, new_size - old_size);
+ memset(new_mem_start + old_pages * WasmModule::kPageSize, 0,
+ delta_pages * WasmModule::kPageSize);
}
instance->mem_start = new_mem_start;
- instance->mem_size = new_size;
- return static_cast<int32_t>(old_size / WasmModule::kPageSize);
+ instance->mem_size = new_pages * WasmModule::kPageSize;
+ return static_cast<int32_t>(old_pages);
}
enum InternalOpcode {
@@ -655,7 +659,7 @@ enum InternalOpcode {
#undef DECL_INTERNAL_ENUM
};
-static const char* OpcodeName(uint32_t val) {
+const char* OpcodeName(uint32_t val) {
switch (val) {
#define DECL_INTERNAL_CASE(name, value) \
case kInternal##name: \
@@ -666,7 +670,29 @@ static const char* OpcodeName(uint32_t val) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
}
-static const int kRunSteps = 1000;
+// Unwrap a wasm to js wrapper, return the callable heap object.
+// If the wrapper would throw a TypeError, return a null handle.
+Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
+ Handle<Code> js_wrapper) {
+ DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, js_wrapper->kind());
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*js_wrapper, mask); !it.done(); it.next()) {
+ HeapObject* obj = it.rinfo()->target_object();
+ if (!obj->IsCallable()) continue;
+#ifdef DEBUG
+ // There should only be this one reference to a callable object.
+ for (it.next(); !it.done(); it.next()) {
+ HeapObject* other = it.rinfo()->target_object();
+ DCHECK(!other->IsCallable());
+ }
+#endif
+ return handle(obj, isolate);
+ }
+ // If we did not find a callable object, then there must be a reference to
+ // the WasmThrowTypeError runtime function.
+ // TODO(clemensh): Check that this is the case.
+ return Handle<HeapObject>::null();
+}
// A helper class to compute the control transfers for each bytecode offset.
// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
@@ -780,22 +806,22 @@ class ControlTransfers : public ZoneObject {
break;
}
case kExprBr: {
- BreakDepthOperand operand(&i, i.pc());
+ BreakDepthOperand<false> operand(&i, i.pc());
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
c->Ref(&map_, start, i.pc());
break;
}
case kExprBrIf: {
- BreakDepthOperand operand(&i, i.pc());
+ BreakDepthOperand<false> operand(&i, i.pc());
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
c->Ref(&map_, start, i.pc());
break;
}
case kExprBrTable: {
- BranchTableOperand operand(&i, i.pc());
- BranchTableIterator iterator(&i, operand);
+ BranchTableOperand<false> operand(&i, i.pc());
+ BranchTableIterator<false> iterator(&i, operand);
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
operand.table_count);
while (iterator.has_next()) {
@@ -836,36 +862,109 @@ struct InterpreterCode {
const byte* at(pc_t pc) { return start + pc; }
};
+struct ExternalCallResult {
+ enum Type {
+ // The function should be executed inside this interpreter.
+ INTERNAL,
+ // For indirect calls: Table or function does not exist.
+ INVALID_FUNC,
+ // For indirect calls: Signature does not match expected signature.
+ SIGNATURE_MISMATCH,
+ // The function was executed and returned normally.
+ EXTERNAL_RETURNED,
+ // The function was executed, threw an exception, and the stack was unwound.
+ EXTERNAL_UNWOUND
+ };
+ Type type;
+ // If type is INTERNAL, this field holds the function to call internally.
+ InterpreterCode* interpreter_code;
+
+ ExternalCallResult(Type type) : type(type) { // NOLINT
+ DCHECK_NE(INTERNAL, type);
+ }
+ ExternalCallResult(Type type, InterpreterCode* code)
+ : type(type), interpreter_code(code) {
+ DCHECK_EQ(INTERNAL, type);
+ }
+};
+
// The main storage for interpreter code. It maps {WasmFunction} to the
// metadata needed to execute each function.
class CodeMap {
- public:
Zone* zone_;
const WasmModule* module_;
ZoneVector<InterpreterCode> interpreter_code_;
+ // Global handle to the wasm instance.
+ Handle<WasmInstanceObject> instance_;
+ // Global handle to array of unwrapped imports.
+ Handle<FixedArray> imported_functions_;
+ // Map from WASM_TO_JS wrappers to unwrapped imports (indexes into
+ // imported_functions_).
+ IdentityMap<int, ZoneAllocationPolicy> unwrapped_imports_;
- CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
- : zone_(zone), module_(module), interpreter_code_(zone) {
+ public:
+ CodeMap(Isolate* isolate, const WasmModule* module,
+ const uint8_t* module_start, Zone* zone)
+ : zone_(zone),
+ module_(module),
+ interpreter_code_(zone),
+ unwrapped_imports_(isolate->heap(), ZoneAllocationPolicy(zone)) {
if (module == nullptr) return;
- for (size_t i = 0; i < module->functions.size(); ++i) {
- const WasmFunction* function = &module->functions[i];
- const byte* code_start = module_start + function->code_start_offset;
- const byte* code_end = module_start + function->code_end_offset;
- AddFunction(function, code_start, code_end);
+ interpreter_code_.reserve(module->functions.size());
+ for (const WasmFunction& function : module->functions) {
+ if (function.imported) {
+ DCHECK_EQ(function.code_start_offset, function.code_end_offset);
+ AddFunction(&function, nullptr, nullptr);
+ } else {
+ const byte* code_start = module_start + function.code_start_offset;
+ const byte* code_end = module_start + function.code_end_offset;
+ AddFunction(&function, code_start, code_end);
+ }
}
}
- InterpreterCode* FindCode(const WasmFunction* function) {
- if (function->func_index < interpreter_code_.size()) {
- InterpreterCode* code = &interpreter_code_[function->func_index];
- DCHECK_EQ(function, code->function);
- return Preprocess(code);
- }
- return nullptr;
+ ~CodeMap() {
+ // Destroy the global handles.
+ // Cast the location, not the handle, because the handle cast might access
+ // the object behind the handle.
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(instance_.location()));
+ GlobalHandles::Destroy(
+ reinterpret_cast<Object**>(imported_functions_.location()));
+ }
+
+ const WasmModule* module() const { return module_; }
+ bool has_instance() const { return !instance_.is_null(); }
+ Handle<WasmInstanceObject> instance() const {
+ DCHECK(has_instance());
+ return instance_;
+ }
+ MaybeHandle<WasmInstanceObject> maybe_instance() const {
+ return has_instance() ? instance_ : MaybeHandle<WasmInstanceObject>();
+ }
+
+ void SetInstanceObject(WasmInstanceObject* instance) {
+ // Only set the instance once (otherwise we have to destroy the global
+ // handle first).
+ DCHECK(instance_.is_null());
+ DCHECK_EQ(instance->module(), module_);
+ instance_ = instance->GetIsolate()->global_handles()->Create(instance);
+ }
+
+ Code* GetImportedFunction(uint32_t function_index) {
+ DCHECK(!instance_.is_null());
+ DCHECK_GT(module_->num_imported_functions, function_index);
+ FixedArray* code_table = instance_->compiled_module()->ptr_to_code_table();
+ return Code::cast(code_table->get(static_cast<int>(function_index)));
+ }
+
+ InterpreterCode* GetCode(const WasmFunction* function) {
+ InterpreterCode* code = GetCode(function->func_index);
+ DCHECK_EQ(function, code->function);
+ return code;
}
InterpreterCode* GetCode(uint32_t function_index) {
- CHECK_LT(function_index, interpreter_code_.size());
+ DCHECK_LT(function_index, interpreter_code_.size());
return Preprocess(&interpreter_code_[function_index]);
}
@@ -880,7 +979,8 @@ class CodeMap {
}
InterpreterCode* Preprocess(InterpreterCode* code) {
- if (code->targets == nullptr && code->start) {
+ DCHECK_EQ(code->function->imported, code->start == nullptr);
+ if (code->targets == nullptr && code->start != nullptr) {
// Compute the control targets map and the local declarations.
CHECK(DecodeLocalDecls(&code->locals, code->start, code->end));
code->targets = new (zone_) ControlTransfers(
@@ -889,8 +989,8 @@ class CodeMap {
return code;
}
- int AddFunction(const WasmFunction* function, const byte* code_start,
- const byte* code_end) {
+ void AddFunction(const WasmFunction* function, const byte* code_start,
+ const byte* code_end) {
InterpreterCode code = {
function, BodyLocalDecls(zone_), code_start,
code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
@@ -898,33 +998,133 @@ class CodeMap {
DCHECK_EQ(interpreter_code_.size(), function->func_index);
interpreter_code_.push_back(code);
- return static_cast<int>(interpreter_code_.size()) - 1;
}
- bool SetFunctionCode(const WasmFunction* function, const byte* start,
+ void SetFunctionCode(const WasmFunction* function, const byte* start,
const byte* end) {
- InterpreterCode* code = FindCode(function);
- if (code == nullptr) return false;
+ DCHECK_LT(function->func_index, interpreter_code_.size());
+ InterpreterCode* code = &interpreter_code_[function->func_index];
+ DCHECK_EQ(function, code->function);
code->targets = nullptr;
code->orig_start = start;
code->orig_end = end;
code->start = const_cast<byte*>(start);
code->end = const_cast<byte*>(end);
Preprocess(code);
- return true;
+ }
+
+ // Returns a callable object if the imported function has a JS-compatible
+ // signature, or a null handle otherwise.
+ Handle<HeapObject> GetCallableObjectForJSImport(Isolate* isolate,
+ Handle<Code> code) {
+ DCHECK_EQ(Code::WASM_TO_JS_FUNCTION, code->kind());
+ int* unwrapped_index = unwrapped_imports_.Find(code);
+ if (unwrapped_index) {
+ return handle(
+ HeapObject::cast(imported_functions_->get(*unwrapped_index)),
+ isolate);
+ }
+ Handle<HeapObject> called_obj = UnwrapWasmToJSWrapper(isolate, code);
+ if (!called_obj.is_null()) {
+ // Cache the unwrapped callable object.
+ if (imported_functions_.is_null()) {
+ // This is the first call to an imported function. Allocate the
+ // FixedArray to cache unwrapped objects.
+ constexpr int kInitialCacheSize = 8;
+ Handle<FixedArray> new_imported_functions =
+ isolate->factory()->NewFixedArray(kInitialCacheSize, TENURED);
+ // First entry: Number of occupied slots.
+ new_imported_functions->set(0, Smi::kZero);
+ imported_functions_ =
+ isolate->global_handles()->Create(*new_imported_functions);
+ }
+ int this_idx = Smi::cast(imported_functions_->get(0))->value() + 1;
+ if (this_idx == imported_functions_->length()) {
+ Handle<FixedArray> new_imported_functions =
+ isolate->factory()->CopyFixedArrayAndGrow(imported_functions_,
+ this_idx / 2, TENURED);
+ // Update the existing global handle:
+ *imported_functions_.location() = *new_imported_functions;
+ }
+ DCHECK_GT(imported_functions_->length(), this_idx);
+ DCHECK(imported_functions_->get(this_idx)->IsUndefined(isolate));
+ imported_functions_->set(0, Smi::FromInt(this_idx));
+ imported_functions_->set(this_idx, *called_obj);
+ unwrapped_imports_.Set(code, this_idx);
+ }
+ return called_obj;
}
};
-namespace {
+Handle<Object> WasmValToNumber(Factory* factory, WasmVal val,
+ wasm::ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ return factory->NewNumberFromInt(val.to<int32_t>());
+ case kWasmI64:
+ // wasm->js and js->wasm is illegal for i64 type.
+ UNREACHABLE();
+ return Handle<Object>::null();
+ case kWasmF32:
+ return factory->NewNumber(val.to<float>());
+ case kWasmF64:
+ return factory->NewNumber(val.to<double>());
+ default:
+ // TODO(wasm): Implement simd.
+ UNIMPLEMENTED();
+ return Handle<Object>::null();
+ }
+}
+
+// Convert JS value to WebAssembly, spec here:
+// https://github.com/WebAssembly/design/blob/master/JS.md#towebassemblyvalue
+WasmVal ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
+ wasm::ValueType type) {
+ switch (type) {
+ case kWasmI32: {
+ MaybeHandle<Object> maybe_i32 = Object::ToInt32(isolate, value);
+ // TODO(clemensh): Handle failure here (unwind).
+ int32_t value;
+ CHECK(maybe_i32.ToHandleChecked()->ToInt32(&value));
+ return WasmVal(value);
+ }
+ case kWasmI64:
+ // If the signature contains i64, a type error was thrown before.
+ UNREACHABLE();
+ case kWasmF32: {
+ MaybeHandle<Object> maybe_number = Object::ToNumber(value);
+ // TODO(clemensh): Handle failure here (unwind).
+ return WasmVal(
+ static_cast<float>(maybe_number.ToHandleChecked()->Number()));
+ }
+ case kWasmF64: {
+ MaybeHandle<Object> maybe_number = Object::ToNumber(value);
+ // TODO(clemensh): Handle failure here (unwind).
+ return WasmVal(maybe_number.ToHandleChecked()->Number());
+ }
+ default:
+ // TODO(wasm): Handle simd.
+ UNIMPLEMENTED();
+ return WasmVal();
+ }
+}
+
// Responsible for executing code directly.
class ThreadImpl {
+ struct Activation {
+ uint32_t fp;
+ uint32_t sp;
+ Activation(uint32_t fp, uint32_t sp) : fp(fp), sp(sp) {}
+ };
+
public:
ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
: codemap_(codemap),
instance_(instance),
stack_(zone),
frames_(zone),
- blocks_(zone) {}
+ blocks_(zone),
+ activations_(zone) {}
//==========================================================================
// Implementation of public interface for WasmInterpreter::Thread.
@@ -932,41 +1132,31 @@ class ThreadImpl {
WasmInterpreter::State state() { return state_; }
- void PushFrame(const WasmFunction* function, WasmVal* args) {
- InterpreterCode* code = codemap()->FindCode(function);
- CHECK_NOT_NULL(code);
- ++num_interpreted_calls_;
- frames_.push_back({code, 0, 0, stack_.size()});
+ void InitFrame(const WasmFunction* function, WasmVal* args) {
+ DCHECK_EQ(current_activation().fp, frames_.size());
+ InterpreterCode* code = codemap()->GetCode(function);
for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
stack_.push_back(args[i]);
}
- frames_.back().ret_pc = InitLocals(code);
- blocks_.push_back(
- {0, stack_.size(), frames_.size(),
- static_cast<uint32_t>(code->function->sig->return_count())});
- TRACE(" => PushFrame(#%u @%zu)\n", code->function->func_index,
- frames_.back().ret_pc);
+ PushFrame(code);
}
- WasmInterpreter::State Run() {
- do {
+ WasmInterpreter::State Run(int num_steps = -1) {
+ DCHECK(state_ == WasmInterpreter::STOPPED ||
+ state_ == WasmInterpreter::PAUSED);
+ DCHECK(num_steps == -1 || num_steps > 0);
+ if (num_steps == -1) {
TRACE(" => Run()\n");
- if (state_ == WasmInterpreter::STOPPED ||
- state_ == WasmInterpreter::PAUSED) {
- state_ = WasmInterpreter::RUNNING;
- Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps);
- }
- } while (state_ == WasmInterpreter::STOPPED);
- return state_;
- }
-
- WasmInterpreter::State Step() {
- TRACE(" => Step()\n");
- if (state_ == WasmInterpreter::STOPPED ||
- state_ == WasmInterpreter::PAUSED) {
- state_ = WasmInterpreter::RUNNING;
- Execute(frames_.back().code, frames_.back().ret_pc, 1);
+ } else if (num_steps == 1) {
+ TRACE(" => Step()\n");
+ } else {
+ TRACE(" => Run(%d)\n", num_steps);
}
+ state_ = WasmInterpreter::RUNNING;
+ Execute(frames_.back().code, frames_.back().pc, num_steps);
+ // If state_ is STOPPED, the current activation must be fully unwound.
+ DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
+ current_activation().fp == frames_.size());
return state_;
}
@@ -991,21 +1181,26 @@ class ThreadImpl {
DCHECK_LE(0, index);
DCHECK_GT(frames_.size(), index);
Frame* frame = &frames_[index];
- DCHECK_GE(kMaxInt, frame->ret_pc);
+ DCHECK_GE(kMaxInt, frame->pc);
DCHECK_GE(kMaxInt, frame->sp);
DCHECK_GE(kMaxInt, frame->llimit());
- return frame_cons(frame->code->function, static_cast<int>(frame->ret_pc),
+ return frame_cons(frame->code->function, static_cast<int>(frame->pc),
static_cast<int>(frame->sp),
static_cast<int>(frame->llimit()));
}
- WasmVal GetReturnValue(int index) {
+ WasmVal GetReturnValue(uint32_t index) {
if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
- CHECK_EQ(WasmInterpreter::FINISHED, state_);
- CHECK_LT(static_cast<size_t>(index), stack_.size());
- return stack_[index];
+ DCHECK_EQ(WasmInterpreter::FINISHED, state_);
+ Activation act = current_activation();
+ // Current activation must be finished.
+ DCHECK_EQ(act.fp, frames_.size());
+ DCHECK_GT(stack_.size(), act.sp + index);
+ return stack_[act.sp + index];
}
+ TrapReason GetTrapReason() { return trap_reason_; }
+
pc_t GetBreakpointPc() { return break_pc_; }
bool PossibleNondeterminism() { return possible_nondeterminism_; }
@@ -1016,12 +1211,62 @@ class ThreadImpl {
void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
+ uint32_t NumActivations() {
+ return static_cast<uint32_t>(activations_.size());
+ }
+
+ uint32_t StartActivation() {
+ TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
+ // If you use activations, use them consistently:
+ DCHECK_IMPLIES(activations_.empty(), frames_.empty());
+ DCHECK_IMPLIES(activations_.empty(), stack_.empty());
+ uint32_t activation_id = static_cast<uint32_t>(activations_.size());
+ activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
+ static_cast<uint32_t>(stack_.size()));
+ state_ = WasmInterpreter::STOPPED;
+ return activation_id;
+ }
+
+ void FinishActivation(uint32_t id) {
+ TRACE("----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
+ DCHECK_LT(0, activations_.size());
+ DCHECK_EQ(activations_.size() - 1, id);
+ // Stack height must match the start of this activation (otherwise unwind
+ // first).
+ DCHECK_EQ(activations_.back().fp, frames_.size());
+ DCHECK_LE(activations_.back().sp, stack_.size());
+ stack_.resize(activations_.back().sp);
+ activations_.pop_back();
+ }
+
+ uint32_t ActivationFrameBase(uint32_t id) {
+ DCHECK_GT(activations_.size(), id);
+ return activations_[id].fp;
+ }
+
+ // Handle a thrown exception. Returns whether the exception was handled inside
+ // the current activation. Unwinds the interpreted stack accordingly.
+ WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
+ Isolate* isolate) {
+ DCHECK(isolate->has_pending_exception());
+ // TODO(wasm): Add wasm exception handling (would return true).
+ USE(isolate->pending_exception());
+ TRACE("----- UNWIND -----\n");
+ DCHECK_LT(0, activations_.size());
+ Activation& act = activations_.back();
+ DCHECK_LE(act.fp, frames_.size());
+ frames_.resize(act.fp);
+ DCHECK_LE(act.sp, stack_.size());
+ stack_.resize(act.sp);
+ state_ = WasmInterpreter::STOPPED;
+ return WasmInterpreter::Thread::UNWOUND;
+ }
+
private:
// Entries on the stack of functions being evaluated.
struct Frame {
InterpreterCode* code;
- pc_t call_pc;
- pc_t ret_pc;
+ pc_t pc;
sp_t sp;
// Limit of parameters.
@@ -1048,6 +1293,9 @@ class ThreadImpl {
bool possible_nondeterminism_ = false;
uint8_t break_flags_ = 0; // a combination of WasmInterpreter::BreakFlag
uint64_t num_interpreted_calls_ = 0;
+ // Store the stack height of each activation (for unwind and frame
+ // inspection).
+ ZoneVector<Activation> activations_;
CodeMap* codemap() { return codemap_; }
WasmInstance* instance() { return instance_; }
@@ -1060,40 +1308,31 @@ class ThreadImpl {
}
// Push a frame with arguments already on the stack.
- void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
- CHECK_NOT_NULL(code);
- DCHECK(!frames_.empty());
+ void PushFrame(InterpreterCode* code) {
+ DCHECK_NOT_NULL(code);
++num_interpreted_calls_;
- frames_.back().call_pc = call_pc;
- frames_.back().ret_pc = ret_pc;
size_t arity = code->function->sig->parameter_count();
- DCHECK_GE(stack_.size(), arity);
// The parameters will overlap the arguments already on the stack.
- frames_.push_back({code, 0, 0, stack_.size() - arity});
+ DCHECK_GE(stack_.size(), arity);
+ frames_.push_back({code, 0, stack_.size() - arity});
blocks_.push_back(
{0, stack_.size(), frames_.size(),
static_cast<uint32_t>(code->function->sig->return_count())});
- frames_.back().ret_pc = InitLocals(code);
- TRACE(" => push func#%u @%zu\n", code->function->func_index,
- frames_.back().ret_pc);
+ frames_.back().pc = InitLocals(code);
+ TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
+ code->function->func_index, frames_.back().pc);
}
pc_t InitLocals(InterpreterCode* code) {
for (auto p : code->locals.type_list) {
WasmVal val;
switch (p) {
- case kWasmI32:
- val = WasmVal(static_cast<int32_t>(0));
- break;
- case kWasmI64:
- val = WasmVal(static_cast<int64_t>(0));
- break;
- case kWasmF32:
- val = WasmVal(static_cast<float>(0));
- break;
- case kWasmF64:
- val = WasmVal(static_cast<double>(0));
- break;
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ val = WasmVal(static_cast<ctype>(0)); \
+ break;
+ WASM_CTYPES(CASE_TYPE)
+#undef CASE_TYPE
default:
UNREACHABLE();
break;
@@ -1104,9 +1343,8 @@ class ThreadImpl {
}
void CommitPc(pc_t pc) {
- if (!frames_.empty()) {
- frames_.back().ret_pc = pc;
- }
+ DCHECK(!frames_.empty());
+ frames_.back().pc = pc;
}
bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
@@ -1130,7 +1368,24 @@ class ThreadImpl {
return LookupTarget(code, pc);
}
- bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
+ pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
+ switch (code->orig_start[pc]) {
+ case kExprCallFunction: {
+ CallFunctionOperand<false> operand(decoder, code->at(pc));
+ return pc + 1 + operand.length;
+ }
+ case kExprCallIndirect: {
+ CallIndirectOperand<false> operand(decoder, code->at(pc));
+ return pc + 1 + operand.length;
+ }
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
+
+ bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
+ size_t arity) {
DCHECK_GT(frames_.size(), 0);
// Pop all blocks for this frame.
while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
@@ -1139,28 +1394,37 @@ class ThreadImpl {
sp_t dest = frames_.back().sp;
frames_.pop_back();
- if (frames_.size() == 0) {
+ if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
state_ = WasmInterpreter::FINISHED;
- DoStackTransfer(0, arity);
+ DoStackTransfer(dest, arity);
TRACE(" => finish\n");
return false;
} else {
// Return to caller frame.
Frame* top = &frames_.back();
*code = top->code;
- *pc = top->ret_pc;
+ decoder->Reset((*code)->start, (*code)->end);
+ *pc = ReturnPc(decoder, *code, top->pc);
*limit = top->code->end - top->code->start;
- TRACE(" => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
+ TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
+ (*code)->function->func_index, *pc);
DoStackTransfer(dest, arity);
return true;
}
}
- void DoCall(InterpreterCode* target, pc_t* pc, pc_t ret_pc, pc_t* limit) {
- PushFrame(target, *pc, ret_pc);
- *pc = frames_.back().ret_pc;
+ // Returns true if the call was successful, false if the stack check failed
+ // and the current activation was fully unwound.
+ bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
+ pc_t* limit) WARN_UNUSED_RESULT {
+ frames_.back().pc = *pc;
+ PushFrame(target);
+ if (!DoStackCheck()) return false;
+ *pc = frames_.back().pc;
*limit = target->end - target->start;
+ decoder->Reset(target->start, target->end);
+ return true;
}
// Copies {arity} values on the top of the stack down the stack to {dest},
@@ -1180,13 +1444,17 @@ class ThreadImpl {
stack_.resize(stack_.size() - pop_count);
}
+ template <typename mtype>
+ inline bool BoundsCheck(uint32_t mem_size, uint32_t offset, uint32_t index) {
+ return sizeof(mtype) <= mem_size && offset <= mem_size - sizeof(mtype) &&
+ index <= mem_size - sizeof(mtype) - offset;
+ }
+
template <typename ctype, typename mtype>
bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) {
- MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
+ MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
- size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
- if (operand.offset > effective_mem_size ||
- index > (effective_mem_size - operand.offset)) {
+ if (!BoundsCheck<mtype>(instance()->mem_size, operand.offset, index)) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
@@ -1201,13 +1469,11 @@ class ThreadImpl {
template <typename ctype, typename mtype>
bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
int& len) {
- MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
+ MemoryAccessOperand<false> operand(decoder, code->at(pc), sizeof(ctype));
WasmVal val = Pop();
uint32_t index = Pop().to<uint32_t>();
- size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
- if (operand.offset > effective_mem_size ||
- index > (effective_mem_size - operand.offset)) {
+ if (!BoundsCheck<mtype>(instance()->mem_size, operand.offset, index)) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
@@ -1223,15 +1489,46 @@ class ThreadImpl {
return true;
}
+ // Check if our control stack (frames_) exceeds the limit. Trigger stack
+ // overflow if it does, and unwinding the current frame.
+ // Returns true if execution can continue, false if the current activation was
+ // fully unwound.
+ // Do call this function immediately *after* pushing a new frame. The pc of
+ // the top frame will be reset to 0 if the stack check fails.
+ bool DoStackCheck() WARN_UNUSED_RESULT {
+ // Sum up the size of all dynamically growing structures.
+ if (V8_LIKELY(frames_.size() <= kV8MaxWasmInterpretedStackSize)) {
+ return true;
+ }
+ if (!codemap()->has_instance()) {
+ // In test mode: Just abort.
+ FATAL("wasm interpreter: stack overflow");
+ }
+ // The pc of the top frame is initialized to the first instruction. We reset
+ // it to 0 here such that we report the same position as in compiled code.
+ frames_.back().pc = 0;
+ Isolate* isolate = codemap()->instance()->GetIsolate();
+ HandleScope handle_scope(isolate);
+ isolate->StackOverflow();
+ return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
+ }
+
void Execute(InterpreterCode* code, pc_t pc, int max) {
Decoder decoder(code->start, code->end);
pc_t limit = code->end - code->start;
- while (--max >= 0) {
-#define PAUSE_IF_BREAK_FLAG(flag) \
- if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) max = 0;
+ bool hit_break = false;
+
+ while (true) {
+#define PAUSE_IF_BREAK_FLAG(flag) \
+ if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \
+ hit_break = true; \
+ max = 0; \
+ }
DCHECK_GT(limit, pc);
+ DCHECK_NOT_NULL(code->start);
+ // Do first check for a breakpoint, in order to set hit_break correctly.
const char* skip = " ";
int len = 1;
byte opcode = code->start[pc];
@@ -1246,10 +1543,15 @@ class ThreadImpl {
WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
TraceValueStack();
TRACE("\n");
+ hit_break = true;
break;
}
}
+ // If max is 0, break. If max is positive (a limit is set), decrement it.
+ if (max == 0) break;
+ if (max > 0) --max;
+
USE(skip);
TRACE("@%-3zu: %s%-24s:", pc, skip,
WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
@@ -1260,19 +1562,19 @@ class ThreadImpl {
case kExprNop:
break;
case kExprBlock: {
- BlockTypeOperand operand(&decoder, code->at(pc));
+ BlockTypeOperand<false> operand(&decoder, code->at(pc));
blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
len = 1 + operand.length;
break;
}
case kExprLoop: {
- BlockTypeOperand operand(&decoder, code->at(pc));
+ BlockTypeOperand<false> operand(&decoder, code->at(pc));
blocks_.push_back({pc, stack_.size(), frames_.size(), 0});
len = 1 + operand.length;
break;
}
case kExprIf: {
- BlockTypeOperand operand(&decoder, code->at(pc));
+ BlockTypeOperand<false> operand(&decoder, code->at(pc));
WasmVal cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
@@ -1300,13 +1602,13 @@ class ThreadImpl {
break;
}
case kExprBr: {
- BreakDepthOperand operand(&decoder, code->at(pc));
+ BreakDepthOperand<false> operand(&decoder, code->at(pc));
len = DoBreak(code, pc, operand.depth);
TRACE(" br => @%zu\n", pc + len);
break;
}
case kExprBrIf: {
- BreakDepthOperand operand(&decoder, code->at(pc));
+ BreakDepthOperand<false> operand(&decoder, code->at(pc));
WasmVal cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -1319,8 +1621,8 @@ class ThreadImpl {
break;
}
case kExprBrTable: {
- BranchTableOperand operand(&decoder, code->at(pc));
- BranchTableIterator iterator(&decoder, operand);
+ BranchTableOperand<false> operand(&decoder, code->at(pc));
+ BranchTableIterator<false> iterator(&decoder, operand);
uint32_t key = Pop().to<uint32_t>();
uint32_t depth = 0;
if (key >= operand.table_count) key = operand.table_count;
@@ -1334,58 +1636,56 @@ class ThreadImpl {
}
case kExprReturn: {
size_t arity = code->function->sig->return_count();
- if (!DoReturn(&code, &pc, &limit, arity)) return;
- decoder.Reset(code->start, code->end);
+ if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
PAUSE_IF_BREAK_FLAG(AfterReturn);
continue;
}
case kExprUnreachable: {
- DoTrap(kTrapUnreachable, pc);
- return CommitPc(pc);
+ return DoTrap(kTrapUnreachable, pc);
}
case kExprEnd: {
blocks_.pop_back();
break;
}
case kExprI32Const: {
- ImmI32Operand operand(&decoder, code->at(pc));
+ ImmI32Operand<false> operand(&decoder, code->at(pc));
Push(pc, WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
- ImmI64Operand operand(&decoder, code->at(pc));
+ ImmI64Operand<false> operand(&decoder, code->at(pc));
Push(pc, WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
- ImmF32Operand operand(&decoder, code->at(pc));
+ ImmF32Operand<false> operand(&decoder, code->at(pc));
Push(pc, WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
- ImmF64Operand operand(&decoder, code->at(pc));
+ ImmF64Operand<false> operand(&decoder, code->at(pc));
Push(pc, WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprGetLocal: {
- LocalIndexOperand operand(&decoder, code->at(pc));
+ LocalIndexOperand<false> operand(&decoder, code->at(pc));
Push(pc, stack_[frames_.back().sp + operand.index]);
len = 1 + operand.length;
break;
}
case kExprSetLocal: {
- LocalIndexOperand operand(&decoder, code->at(pc));
+ LocalIndexOperand<false> operand(&decoder, code->at(pc));
WasmVal val = Pop();
stack_[frames_.back().sp + operand.index] = val;
len = 1 + operand.length;
break;
}
case kExprTeeLocal: {
- LocalIndexOperand operand(&decoder, code->at(pc));
+ LocalIndexOperand<false> operand(&decoder, code->at(pc));
WasmVal val = Pop();
stack_[frames_.back().sp + operand.index] = val;
Push(pc, val);
@@ -1397,78 +1697,97 @@ class ThreadImpl {
break;
}
case kExprCallFunction: {
- CallFunctionOperand operand(&decoder, code->at(pc));
+ CallFunctionOperand<false> operand(&decoder, code->at(pc));
InterpreterCode* target = codemap()->GetCode(operand.index);
- DoCall(target, &pc, pc + 1 + operand.length, &limit);
+ if (target->function->imported) {
+ CommitPc(pc);
+ ExternalCallResult result =
+ CallImportedFunction(target->function->func_index);
+ switch (result.type) {
+ case ExternalCallResult::INTERNAL:
+ // The import is a function of this instance. Call it directly.
+ target = result.interpreter_code;
+ DCHECK(!target->function->imported);
+ break;
+ case ExternalCallResult::INVALID_FUNC:
+ case ExternalCallResult::SIGNATURE_MISMATCH:
+ // Direct calls are checked statically.
+ UNREACHABLE();
+ case ExternalCallResult::EXTERNAL_RETURNED:
+ PAUSE_IF_BREAK_FLAG(AfterCall);
+ len = 1 + operand.length;
+ break;
+ case ExternalCallResult::EXTERNAL_UNWOUND:
+ return;
+ }
+ if (result.type != ExternalCallResult::INTERNAL) break;
+ }
+ // Execute an internal call.
+ if (!DoCall(&decoder, target, &pc, &limit)) return;
code = target;
- decoder.Reset(code->start, code->end);
PAUSE_IF_BREAK_FLAG(AfterCall);
- continue;
- }
+ continue; // don't bump pc
+ } break;
case kExprCallIndirect: {
- CallIndirectOperand operand(&decoder, code->at(pc));
+ CallIndirectOperand<false> operand(&decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->function_tables.size(), 1u);
- InterpreterCode* target = codemap()->GetIndirectCode(0, entry_index);
- if (target == nullptr) {
- return DoTrap(kTrapFuncInvalid, pc);
- } else if (target->function->sig_index != operand.index) {
- // If not an exact match, we have to do a canonical check.
- // TODO(titzer): make this faster with some kind of caching?
- const WasmIndirectFunctionTable* table =
- &module()->function_tables[0];
- int function_key = table->map.Find(target->function->sig);
- if (function_key < 0 ||
- (function_key !=
- table->map.Find(module()->signatures[operand.index]))) {
+ ExternalCallResult result =
+ CallIndirectFunction(0, entry_index, operand.index);
+ switch (result.type) {
+ case ExternalCallResult::INTERNAL:
+ // The import is a function of this instance. Call it directly.
+ if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
+ return;
+ code = result.interpreter_code;
+ PAUSE_IF_BREAK_FLAG(AfterCall);
+ continue; // don't bump pc
+ case ExternalCallResult::INVALID_FUNC:
+ return DoTrap(kTrapFuncInvalid, pc);
+ case ExternalCallResult::SIGNATURE_MISMATCH:
return DoTrap(kTrapFuncSigMismatch, pc);
- }
+ case ExternalCallResult::EXTERNAL_RETURNED:
+ PAUSE_IF_BREAK_FLAG(AfterCall);
+ len = 1 + operand.length;
+ break;
+ case ExternalCallResult::EXTERNAL_UNWOUND:
+ return;
}
-
- DoCall(target, &pc, pc + 1 + operand.length, &limit);
- code = target;
- decoder.Reset(code->start, code->end);
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue;
- }
+ } break;
case kExprGetGlobal: {
- GlobalIndexOperand operand(&decoder, code->at(pc));
+ GlobalIndexOperand<false> operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
byte* ptr = instance()->globals_start + global->offset;
- ValueType type = global->type;
WasmVal val;
- if (type == kWasmI32) {
- val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
- } else if (type == kWasmI64) {
- val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
- } else if (type == kWasmF32) {
- val = WasmVal(*reinterpret_cast<float*>(ptr));
- } else if (type == kWasmF64) {
- val = WasmVal(*reinterpret_cast<double*>(ptr));
- } else {
- UNREACHABLE();
+ switch (global->type) {
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ val = WasmVal(*reinterpret_cast<ctype*>(ptr)); \
+ break;
+ WASM_CTYPES(CASE_TYPE)
+#undef CASE_TYPE
+ default:
+ UNREACHABLE();
}
Push(pc, val);
len = 1 + operand.length;
break;
}
case kExprSetGlobal: {
- GlobalIndexOperand operand(&decoder, code->at(pc));
+ GlobalIndexOperand<false> operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
byte* ptr = instance()->globals_start + global->offset;
- ValueType type = global->type;
WasmVal val = Pop();
- if (type == kWasmI32) {
- *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
- } else if (type == kWasmI64) {
- *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
- } else if (type == kWasmF32) {
- *reinterpret_cast<float*>(ptr) = val.to<float>();
- } else if (type == kWasmF64) {
- *reinterpret_cast<double*>(ptr) = val.to<double>();
- } else {
- UNREACHABLE();
+ switch (global->type) {
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ *reinterpret_cast<ctype*>(ptr) = val.to<ctype>(); \
+ break;
+ WASM_CTYPES(CASE_TYPE)
+#undef CASE_TYPE
+ default:
+ UNREACHABLE();
}
len = 1 + operand.length;
break;
@@ -1517,7 +1836,7 @@ class ThreadImpl {
case kExpr##name: { \
uint32_t index = Pop().to<uint32_t>(); \
ctype result; \
- if (index >= (instance()->mem_size - sizeof(mtype))) { \
+ if (!BoundsCheck<mtype>(instance()->mem_size, 0, index)) { \
result = defval; \
} else { \
byte* addr = instance()->mem_start + index; \
@@ -1542,7 +1861,7 @@ class ThreadImpl {
case kExpr##name: { \
WasmVal val = Pop(); \
uint32_t index = Pop().to<uint32_t>(); \
- if (index < (instance()->mem_size - sizeof(mtype))) { \
+ if (BoundsCheck<mtype>(instance()->mem_size, 0, index)) { \
byte* addr = instance()->mem_start + index; \
/* TODO(titzer): alignment for asmjs store mem? */ \
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
@@ -1558,14 +1877,15 @@ class ThreadImpl {
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
case kExprGrowMemory: {
- MemoryIndexOperand operand(&decoder, code->at(pc));
+ MemoryIndexOperand<false> operand(&decoder, code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
+ Push(pc, WasmVal(ExecuteGrowMemory(
+ delta_pages, codemap_->maybe_instance(), instance())));
len = 1 + operand.length;
break;
}
case kExprMemorySize: {
- MemoryIndexOperand operand(&decoder, code->at(pc));
+ MemoryIndexOperand<false> operand(&decoder, code->at(pc));
Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
WasmModule::kPageSize)));
len = 1 + operand.length;
@@ -1656,16 +1976,15 @@ class ThreadImpl {
if (pc == limit) {
// Fell off end of code; do an implicit return.
TRACE("@%-3zu: ImplicitReturn\n", pc);
- if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
+ if (!DoReturn(&decoder, &code, &pc, &limit,
+ code->function->sig->return_count()))
return;
- decoder.Reset(code->start, code->end);
PAUSE_IF_BREAK_FLAG(AfterReturn);
}
}
- // Set break_pc_, even though we might have stopped because max was reached.
- // We don't want to stop after executing zero instructions next time.
- break_pc_ = pc;
+
state_ = WasmInterpreter::PAUSED;
+ break_pc_ = hit_break ? pc : kInvalidPc;
CommitPc(pc);
}
@@ -1694,7 +2013,8 @@ class ThreadImpl {
void Push(pc_t pc, WasmVal val) {
// TODO(titzer): store PC as well?
- if (val.type != kWasmStmt) stack_.push_back(val);
+ DCHECK_NE(kWasmStmt, val.type);
+ stack_.push_back(val);
}
void TraceStack(const char* phase, pc_t pc) {
@@ -1744,6 +2064,174 @@ class ThreadImpl {
}
#endif // DEBUG
}
+
+ ExternalCallResult TryHandleException(Isolate* isolate) {
+ if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
+ return {ExternalCallResult::EXTERNAL_UNWOUND};
+ }
+ return {ExternalCallResult::EXTERNAL_RETURNED};
+ }
+
+ ExternalCallResult CallCodeObject(Isolate* isolate, Handle<Code> code,
+ FunctionSig* signature) {
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+
+ if (code->kind() == Code::WASM_FUNCTION) {
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK_EQ(2, deopt_data->length());
+ WasmInstanceObject* target_instance =
+ WasmInstanceObject::cast(WeakCell::cast(deopt_data->get(0))->value());
+ if (target_instance != *codemap()->instance()) {
+ // TODO(wasm): Implement calling functions of other instances/modules.
+ UNIMPLEMENTED();
+ }
+ int target_func_idx = Smi::cast(deopt_data->get(1))->value();
+ DCHECK_LE(0, target_func_idx);
+ return {ExternalCallResult::INTERNAL,
+ codemap()->GetCode(target_func_idx)};
+ }
+
+ Handle<HeapObject> target =
+ codemap()->GetCallableObjectForJSImport(isolate, code);
+
+ if (target.is_null()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kWasmTrapTypeError));
+ return TryHandleException(isolate);
+ }
+
+#if DEBUG
+ std::ostringstream oss;
+ target->HeapObjectShortPrint(oss);
+ TRACE(" => Calling imported function %s\n", oss.str().c_str());
+#endif
+
+ int num_args = static_cast<int>(signature->parameter_count());
+
+ // Get all arguments as JS values.
+ std::vector<Handle<Object>> args;
+ args.reserve(num_args);
+ WasmVal* wasm_args = stack_.data() + (stack_.size() - num_args);
+ for (int i = 0; i < num_args; ++i) {
+ args.push_back(WasmValToNumber(isolate->factory(), wasm_args[i],
+ signature->GetParam(i)));
+ }
+
+ // The receiver is the global proxy if in sloppy mode (default), undefined
+ // if in strict mode.
+ Handle<Object> receiver = isolate->global_proxy();
+ if (target->IsJSFunction() &&
+ is_strict(JSFunction::cast(*target)->shared()->language_mode())) {
+ receiver = isolate->factory()->undefined_value();
+ }
+
+ MaybeHandle<Object> maybe_retval =
+ Execution::Call(isolate, target, receiver, num_args, args.data());
+ if (maybe_retval.is_null()) return TryHandleException(isolate);
+
+ Handle<Object> retval = maybe_retval.ToHandleChecked();
+ // Pop arguments off the stack.
+ stack_.resize(stack_.size() - num_args);
+ if (signature->return_count() > 0) {
+ // TODO(wasm): Handle multiple returns.
+ DCHECK_EQ(1, signature->return_count());
+ stack_.push_back(
+ ToWebAssemblyValue(isolate, retval, signature->GetReturn()));
+ }
+ return {ExternalCallResult::EXTERNAL_RETURNED};
+ }
+
+ ExternalCallResult CallImportedFunction(uint32_t function_index) {
+ // Use a new HandleScope to avoid leaking / accumulating handles in the
+ // outer scope.
+ Isolate* isolate = codemap()->instance()->GetIsolate();
+ HandleScope handle_scope(isolate);
+
+ Handle<Code> target(codemap()->GetImportedFunction(function_index),
+ isolate);
+ return CallCodeObject(isolate, target,
+ codemap()->module()->functions[function_index].sig);
+ }
+
+ ExternalCallResult CallIndirectFunction(uint32_t table_index,
+ uint32_t entry_index,
+ uint32_t sig_index) {
+ if (!codemap()->has_instance()) {
+ // No instance. Rely on the information stored in the WasmModule.
+ // TODO(wasm): This is only needed for testing. Refactor testing to use
+ // the same paths as production.
+ InterpreterCode* code =
+ codemap()->GetIndirectCode(table_index, entry_index);
+ if (!code) return {ExternalCallResult::INVALID_FUNC};
+ if (code->function->sig_index != sig_index) {
+ // If not an exact match, we have to do a canonical check.
+ // TODO(titzer): make this faster with some kind of caching?
+ const WasmIndirectFunctionTable* table =
+ &module()->function_tables[table_index];
+ int function_key = table->map.Find(code->function->sig);
+ if (function_key < 0 ||
+ (function_key !=
+ table->map.Find(module()->signatures[sig_index]))) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
+ }
+ }
+ return {ExternalCallResult::INTERNAL, code};
+ }
+
+ WasmCompiledModule* compiled_module =
+ codemap()->instance()->compiled_module();
+ Isolate* isolate = compiled_module->GetIsolate();
+
+ Code* target;
+ {
+ DisallowHeapAllocation no_gc;
+ // Get function to be called directly from the live instance to see latest
+ // changes to the tables.
+
+ // Canonicalize signature index.
+ // TODO(titzer): make this faster with some kind of caching?
+ const WasmIndirectFunctionTable* table =
+ &module()->function_tables[table_index];
+ FunctionSig* sig = module()->signatures[sig_index];
+ uint32_t canonical_sig_index = table->map.Find(sig);
+
+ // Check signature.
+ FixedArray* sig_tables = compiled_module->ptr_to_signature_tables();
+ if (table_index >= static_cast<uint32_t>(sig_tables->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ FixedArray* sig_table =
+ FixedArray::cast(sig_tables->get(static_cast<int>(table_index)));
+ if (entry_index >= static_cast<uint32_t>(sig_table->length())) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
+ int found_sig =
+ Smi::cast(sig_table->get(static_cast<int>(entry_index)))->value();
+ if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
+ }
+
+ // Get code object.
+ FixedArray* fun_tables = compiled_module->ptr_to_function_tables();
+ DCHECK_EQ(sig_tables->length(), fun_tables->length());
+ FixedArray* fun_table =
+ FixedArray::cast(fun_tables->get(static_cast<int>(table_index)));
+ DCHECK_EQ(sig_table->length(), fun_table->length());
+ target = Code::cast(fun_table->get(static_cast<int>(entry_index)));
+ }
+
+ // Call the code object. Use a new HandleScope to avoid leaking /
+ // accumulating handles in the outer scope.
+ HandleScope handle_scope(isolate);
+ FunctionSig* signature =
+ &codemap()->module()->signatures[table_index][sig_index];
+ return CallCodeObject(isolate, handle(target, isolate), signature);
+ }
+
+ inline Activation current_activation() {
+ return activations_.empty() ? Activation(0, 0) : activations_.back();
+ }
};
// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
@@ -1753,9 +2241,10 @@ class ThreadImpl {
WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
return reinterpret_cast<WasmInterpreter::Thread*>(impl);
}
-static ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
+ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
return reinterpret_cast<ThreadImpl*>(thread);
}
+
} // namespace
//============================================================================
@@ -1767,18 +2256,19 @@ static ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
WasmInterpreter::State WasmInterpreter::Thread::state() {
return ToImpl(this)->state();
}
-void WasmInterpreter::Thread::PushFrame(const WasmFunction* function,
+void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
WasmVal* args) {
- return ToImpl(this)->PushFrame(function, args);
+ ToImpl(this)->InitFrame(function, args);
}
-WasmInterpreter::State WasmInterpreter::Thread::Run() {
- return ToImpl(this)->Run();
-}
-WasmInterpreter::State WasmInterpreter::Thread::Step() {
- return ToImpl(this)->Step();
+WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
+ return ToImpl(this)->Run(num_steps);
}
void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
+WasmInterpreter::Thread::ExceptionHandlingResult
+WasmInterpreter::Thread::HandleException(Isolate* isolate) {
+ return ToImpl(this)->HandleException(isolate);
+}
pc_t WasmInterpreter::Thread::GetBreakpointPc() {
return ToImpl(this)->GetBreakpointPc();
}
@@ -1799,6 +2289,9 @@ InterpretedFrame WasmInterpreter::Thread::GetMutableFrame(int index) {
WasmVal WasmInterpreter::Thread::GetReturnValue(int index) {
return ToImpl(this)->GetReturnValue(index);
}
+TrapReason WasmInterpreter::Thread::GetTrapReason() {
+ return ToImpl(this)->GetTrapReason();
+}
bool WasmInterpreter::Thread::PossibleNondeterminism() {
return ToImpl(this)->PossibleNondeterminism();
}
@@ -1811,6 +2304,18 @@ void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
void WasmInterpreter::Thread::ClearBreakFlags() {
ToImpl(this)->ClearBreakFlags();
}
+uint32_t WasmInterpreter::Thread::NumActivations() {
+ return ToImpl(this)->NumActivations();
+}
+uint32_t WasmInterpreter::Thread::StartActivation() {
+ return ToImpl(this)->StartActivation();
+}
+void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
+ ToImpl(this)->FinishActivation(id);
+}
+uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
+ return ToImpl(this)->ActivationFrameBase(id);
+}
//============================================================================
// The implementation details of the interpreter.
@@ -1824,28 +2329,27 @@ class WasmInterpreterInternals : public ZoneObject {
CodeMap codemap_;
ZoneVector<ThreadImpl> threads_;
- WasmInterpreterInternals(Zone* zone, const ModuleBytesEnv& env)
+ WasmInterpreterInternals(Isolate* isolate, Zone* zone,
+ const ModuleBytesEnv& env)
: instance_(env.module_env.instance),
module_bytes_(env.wire_bytes.start(), env.wire_bytes.end(), zone),
codemap_(
+ isolate,
env.module_env.instance ? env.module_env.instance->module : nullptr,
module_bytes_.data(), zone),
threads_(zone) {
threads_.emplace_back(zone, &codemap_, env.module_env.instance);
}
-
- void Delete() { threads_.clear(); }
};
//============================================================================
// Implementation of the public interface of the interpreter.
//============================================================================
-WasmInterpreter::WasmInterpreter(const ModuleBytesEnv& env,
- AccountingAllocator* allocator)
- : zone_(allocator, ZONE_NAME),
- internals_(new (&zone_) WasmInterpreterInternals(&zone_, env)) {}
+WasmInterpreter::WasmInterpreter(Isolate* isolate, const ModuleBytesEnv& env)
+ : zone_(isolate->allocator(), ZONE_NAME),
+ internals_(new (&zone_) WasmInterpreterInternals(isolate, &zone_, env)) {}
-WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
+WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
@@ -1853,8 +2357,7 @@ void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
bool enabled) {
- InterpreterCode* code = internals_->codemap_.FindCode(function);
- if (!code) return false;
+ InterpreterCode* code = internals_->codemap_.GetCode(function);
size_t size = static_cast<size_t>(code->end - code->start);
// Check bounds for {pc}.
if (pc < code->locals.encoded_size || pc >= size) return false;
@@ -1874,8 +2377,7 @@ bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
}
bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
- InterpreterCode* code = internals_->codemap_.FindCode(function);
- if (!code) return false;
+ InterpreterCode* code = internals_->codemap_.GetCode(function);
size_t size = static_cast<size_t>(code->end - code->start);
// Check bounds for {pc}.
if (pc < code->locals.encoded_size || pc >= size) return false;
@@ -1888,6 +2390,10 @@ bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
return false;
}
+void WasmInterpreter::SetInstanceObject(WasmInstanceObject* instance) {
+ internals_->codemap_.SetInstanceObject(instance);
+}
+
int WasmInterpreter::GetThreadCount() {
return 1; // only one thread for now.
}
@@ -1910,14 +2416,14 @@ void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
UNIMPLEMENTED();
}
-int WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
- return internals_->codemap_.AddFunction(function, nullptr, nullptr);
+void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
+ internals_->codemap_.AddFunction(function, nullptr, nullptr);
}
-bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
+void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start,
const byte* end) {
- return internals_->codemap_.SetFunctionCode(function, start, end);
+ internals_->codemap_.SetFunctionCode(function, start, end);
}
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index ab11a0883b..76845dc500 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -14,6 +14,8 @@ class AccountingAllocator;
}
namespace internal {
+class WasmInstanceObject;
+
namespace wasm {
// forward declarations.
@@ -110,14 +112,14 @@ class InterpretedFrame {
class V8_EXPORT_PRIVATE WasmInterpreter {
public:
// State machine for a Thread:
- // +---------------Run()-----------+
- // V |
- // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED <------+
- // | | | / | |
- // | | +---- Breakpoint ---+ +-- Step() --+
- // | |
- // | +------------ Trap --------------> TRAPPED
- // +------------- Finish -------------> FINISHED
+ // +---------Run()/Step()--------+
+ // V |
+ // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED
+ // ^ | | | | /
+ // +- HandleException -+ | | +--- Breakpoint ---+
+ // | |
+ // | +---------- Trap --------------> TRAPPED
+ // +----------- Finish -------------> FINISHED
enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
// Tells a thread to pause after certain instructions.
@@ -134,20 +136,28 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
Thread() = delete;
public:
+ enum ExceptionHandlingResult { HANDLED, UNWOUND };
+
// Execution control.
State state();
- void PushFrame(const WasmFunction* function, WasmVal* args);
- State Run();
- State Step();
+ void InitFrame(const WasmFunction* function, WasmVal* args);
+ // Pass -1 as num_steps to run till completion, pause or breakpoint.
+ State Run(int num_steps = -1);
+ State Step() { return Run(1); }
void Pause();
void Reset();
+ // Handle the pending exception in the passed isolate. Unwind the stack
+ // accordingly. Return whether the exception was handled inside wasm.
+ ExceptionHandlingResult HandleException(Isolate* isolate);
// Stack inspection and modification.
pc_t GetBreakpointPc();
+ // TODO(clemensh): Make this uint32_t.
int GetFrameCount();
const InterpretedFrame GetFrame(int index);
InterpretedFrame GetMutableFrame(int index);
WasmVal GetReturnValue(int index = 0);
+ TrapReason GetTrapReason();
// Returns true if the thread executed an instruction which may produce
// nondeterministic results, e.g. float div, float sqrt, and float mul,
@@ -164,9 +174,21 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
void AddBreakFlags(uint8_t flags);
void ClearBreakFlags();
+
+ // Each thread can have multiple activations, each represented by a portion
+ // of the stack frames of this thread. StartActivation returns the id
+ // (counting from 0 up) of the started activation.
+ // Activations must be properly stacked, i.e. if FinishActivation is called,
+ // the given id must the the latest activation on the stack.
+ uint32_t NumActivations();
+ uint32_t StartActivation();
+ void FinishActivation(uint32_t activation_id);
+ // Return the frame base of the given activation, i.e. the number of frames
+ // when this activation was started.
+ uint32_t ActivationFrameBase(uint32_t activation_id);
};
- WasmInterpreter(const ModuleBytesEnv& env, AccountingAllocator* allocator);
+ WasmInterpreter(Isolate* isolate, const ModuleBytesEnv& env);
~WasmInterpreter();
//==========================================================================
@@ -185,6 +207,13 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Enable or disable tracing for {function}. Return the previous state.
bool SetTracing(const WasmFunction* function, bool enabled);
+ // Set the associated wasm instance object.
+ // If the instance object has been set, some tables stored inside it are used
+ // instead of the tables stored in the WasmModule struct. This allows to call
+ // back and forth between the interpreter and outside code (JS or wasm
+ // compiled) without repeatedly copying information.
+ void SetInstanceObject(WasmInstanceObject*);
+
//==========================================================================
// Thread iteration and inspection.
//==========================================================================
@@ -201,11 +230,11 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
//==========================================================================
// Testing functionality.
//==========================================================================
- // Manually adds a function to this interpreter, returning the index of the
- // function.
- int AddFunctionForTesting(const WasmFunction* function);
+ // Manually adds a function to this interpreter. The func_index of the
+ // function must match the current number of functions.
+ void AddFunctionForTesting(const WasmFunction* function);
// Manually adds code to the interpreter for the given function.
- bool SetFunctionCodeForTesting(const WasmFunction* function,
+ void SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
// Computes the control transfers for the given bytecode. Used internally in
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 281c4e82e6..ab6c71fd37 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -32,10 +32,6 @@ namespace v8 {
namespace {
-#define RANGE_ERROR_MSG \
- "Wasm compilation exceeds internal limits in this context for the provided " \
- "arguments"
-
// TODO(wasm): move brand check to the respective types, and don't throw
// in it, rather, use a provided ErrorThrower, or let caller handle it.
static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> sym) {
@@ -78,44 +74,6 @@ i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
v8::Utils::OpenHandle(*module_obj));
}
-bool IsCompilationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
- v8::Local<v8::Value> source, bool is_async) {
- // Allow caller to do one final check on thrower state, rather than
- // one at each step. No information is lost - failure reason is captured
- // in the thrower state.
- if (thrower->error()) return false;
-
- AllowWasmCompileCallback callback = isolate->allow_wasm_compile_callback();
- if (callback != nullptr &&
- !callback(reinterpret_cast<v8::Isolate*>(isolate), source, is_async)) {
- thrower->RangeError(RANGE_ERROR_MSG);
- return false;
- }
- return true;
-}
-
-bool IsInstantiationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
- v8::Local<v8::Value> module_or_bytes,
- i::MaybeHandle<i::JSReceiver> ffi, bool is_async) {
- // Allow caller to do one final check on thrower state, rather than
- // one at each step. No information is lost - failure reason is captured
- // in the thrower state.
- if (thrower->error()) return false;
- v8::MaybeLocal<v8::Value> v8_ffi;
- if (!ffi.is_null()) {
- v8_ffi = v8::Local<v8::Value>::Cast(Utils::ToLocal(ffi.ToHandleChecked()));
- }
- AllowWasmInstantiateCallback callback =
- isolate->allow_wasm_instantiate_callback();
- if (callback != nullptr &&
- !callback(reinterpret_cast<v8::Isolate*>(isolate), module_or_bytes,
- v8_ffi, is_async)) {
- thrower->RangeError(RANGE_ERROR_MSG);
- return false;
- }
- return true;
-}
-
i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
if (args.Length() < 1) {
@@ -176,6 +134,8 @@ i::MaybeHandle<i::JSReceiver> GetSecondArgumentAsImports(
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (i_isolate->wasm_compile_callback()(args)) return;
+
HandleScope scope(isolate);
ErrorThrower thrower(i_isolate, "WebAssembly.compile()");
@@ -186,11 +146,12 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(resolver->GetPromise());
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
- if (!IsCompilationAllowed(i_isolate, &thrower, args[0], true)) {
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ if (thrower.error()) {
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
return;
}
- DCHECK(!thrower.error());
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
i::wasm::AsyncCompile(i_isolate, promise, bytes);
}
@@ -219,13 +180,16 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (i_isolate->wasm_module_callback()(args)) return;
+
HandleScope scope(isolate);
ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
- if (!IsCompilationAllowed(i_isolate, &thrower, args[0], false)) return;
- DCHECK(!thrower.error());
+ if (thrower.error()) {
+ return;
+ }
i::MaybeHandle<i::Object> module_obj =
i::wasm::SyncCompile(i_isolate, &thrower, bytes);
if (module_obj.is_null()) return;
@@ -294,17 +258,15 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (i_isolate->wasm_instance_callback()(args)) return;
+
ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
- if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
- false)) {
- return;
- }
- DCHECK(!thrower.error());
+ if (thrower.error()) return;
i::MaybeHandle<i::Object> instance_object = i::wasm::SyncInstantiate(
i_isolate, &thrower, maybe_module.ToHandleChecked(), maybe_imports,
@@ -319,6 +281,8 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ if (i_isolate->wasm_instantiate_callback()(args)) return;
+
ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
HandleScope scope(isolate);
@@ -335,7 +299,9 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError(
"Argument 0 must be provided and must be either a buffer source or a "
"WebAssembly.Module object");
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
return;
}
@@ -343,18 +309,17 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!first_arg->IsJSObject()) {
thrower.TypeError(
"Argument 0 must be a buffer source or a WebAssembly.Module object");
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
return;
}
auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
if (thrower.error()) {
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- return;
- }
- if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
- true)) {
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
return;
}
i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
@@ -367,7 +332,9 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
// WebAssembly.instantiate(bytes, imports) -> {module, instance}
auto bytes = GetFirstArgumentAsBytes(args, &thrower);
if (thrower.error()) {
- resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false),
+ i_isolate->has_scheduled_exception());
return;
}
i::wasm::AsyncCompileAndInstantiate(i_isolate, promise, bytes,
@@ -684,13 +651,14 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
: "maximum memory size exceeded");
return;
}
- int32_t ret = i::wasm::GrowWebAssemblyMemory(
- i_isolate, receiver, static_cast<uint32_t>(delta_size));
+ int32_t ret = i::WasmMemoryObject::Grow(i_isolate, receiver,
+ static_cast<uint32_t>(delta_size));
if (ret == -1) {
thrower.RangeError("Unable to grow instance memory.");
return;
}
- i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer);
+ bool free_memory = (delta_size != 0);
+ i::wasm::DetachWebAssemblyMemoryBuffer(i_isolate, old_buffer, free_memory);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
}
@@ -770,16 +738,16 @@ void WasmJs::Install(Isolate* isolate) {
Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
InstanceType instance_type = prev_map->instance_type();
- int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
- CHECK_EQ(0, internal_fields);
+ int embedder_fields = JSObject::GetEmbedderFieldCount(*prev_map);
+ CHECK_EQ(0, embedder_fields);
int pre_allocated =
prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
int instance_size = 0;
int in_object_properties = 0;
- int wasm_internal_fields = internal_fields + 1 // module instance object
- + 1 // function arity
- + 1; // function signature
- JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_internal_fields,
+ int wasm_embedder_fields = embedder_fields + 1 // module instance object
+ + 1 // function arity
+ + 1; // function signature
+ JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_embedder_fields,
0, &instance_size,
&in_object_properties);
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index bf657a8c5b..c2ecf61657 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -5,41 +5,48 @@
#ifndef V8_WASM_WASM_LIMITS_H_
#define V8_WASM_WASM_LIMITS_H_
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
namespace v8 {
namespace internal {
namespace wasm {
// The following limits are imposed by V8 on WebAssembly modules.
// The limits are agreed upon with other engines for consistency.
-const size_t kV8MaxWasmTypes = 1000000;
-const size_t kV8MaxWasmFunctions = 1000000;
-const size_t kV8MaxWasmImports = 100000;
-const size_t kV8MaxWasmExports = 100000;
-const size_t kV8MaxWasmGlobals = 1000000;
-const size_t kV8MaxWasmDataSegments = 100000;
+constexpr size_t kV8MaxWasmTypes = 1000000;
+constexpr size_t kV8MaxWasmFunctions = 1000000;
+constexpr size_t kV8MaxWasmImports = 100000;
+constexpr size_t kV8MaxWasmExports = 100000;
+constexpr size_t kV8MaxWasmGlobals = 1000000;
+constexpr size_t kV8MaxWasmDataSegments = 100000;
// Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages.
-const size_t kV8MaxWasmMemoryPages = 16384; // = 1 GiB
-const size_t kV8MaxWasmStringSize = 100000;
-const size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
-const size_t kV8MaxWasmFunctionSize = 128 * 1024;
-const size_t kV8MaxWasmFunctionLocals = 50000;
-const size_t kV8MaxWasmFunctionParams = 1000;
-const size_t kV8MaxWasmFunctionMultiReturns = 1000;
-const size_t kV8MaxWasmFunctionReturns = 1;
+constexpr size_t kV8MaxWasmMemoryPages = 16384; // = 1 GiB
+constexpr size_t kV8MaxWasmStringSize = 100000;
+constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
+constexpr size_t kV8MaxWasmFunctionSize = 128 * 1024;
+constexpr size_t kV8MaxWasmFunctionLocals = 50000;
+constexpr size_t kV8MaxWasmFunctionParams = 1000;
+constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
+constexpr size_t kV8MaxWasmFunctionReturns = 1;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
-const size_t kV8MaxWasmTableSize = 10000000;
-const size_t kV8MaxWasmTableEntries = 10000000;
-const size_t kV8MaxWasmTables = 1;
-const size_t kV8MaxWasmMemories = 1;
+constexpr size_t kV8MaxWasmTableSize = 10000000;
+constexpr size_t kV8MaxWasmTableEntries = 10000000;
+constexpr size_t kV8MaxWasmTables = 1;
+constexpr size_t kV8MaxWasmMemories = 1;
-const size_t kSpecMaxWasmMemoryPages = 65536;
-const size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
+constexpr size_t kSpecMaxWasmMemoryPages = 65536;
+constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
-const uint64_t kWasmMaxHeapOffset =
+constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
+// Limit the control stack size of the C++ wasm interpreter.
+constexpr size_t kV8MaxWasmInterpretedStackSize = 64 * 1024;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index 931ad92373..a0b083bbdc 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -619,36 +619,6 @@ class LocalDeclEncoder {
#define WASM_GROW_MEMORY(x) x, kExprGrowMemory, 0
#define WASM_MEMORY_SIZE kExprMemorySize, 0
-//------------------------------------------------------------------------------
-// Simd Operations.
-//------------------------------------------------------------------------------
-// TODO(bbudge) Migrate these into tests.
-#define WASM_SIMD_F32x4_SPLAT(x) \
- x, kSimdPrefix, static_cast<byte>(kExprF32x4Splat)
-#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
- x, kSimdPrefix, static_cast<byte>(kExprF32x4ExtractLane), \
- static_cast<byte>(lane)
-#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
- x, y, kSimdPrefix, static_cast<byte>(kExprF32x4ReplaceLane), \
- static_cast<byte>(lane)
-#define WASM_SIMD_F32x4_ADD(x, y) \
- x, y, kSimdPrefix, static_cast<byte>(kExprF32x4Add)
-#define WASM_SIMD_F32x4_SUB(x, y) \
- x, y, kSimdPrefix, static_cast<byte>(kExprF32x4Sub)
-
-#define WASM_SIMD_I32x4_SPLAT(x) \
- x, kSimdPrefix, static_cast<byte>(kExprI32x4Splat)
-#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
- x, kSimdPrefix, static_cast<byte>(kExprI32x4ExtractLane), \
- static_cast<byte>(lane)
-#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
- x, y, kSimdPrefix, static_cast<byte>(kExprI32x4ReplaceLane), \
- static_cast<byte>(lane)
-#define WASM_SIMD_I32x4_ADD(x, y) \
- x, y, kSimdPrefix, static_cast<byte>(kExprI32x4Add)
-#define WASM_SIMD_I32x4_SUB(x, y) \
- x, y, kSimdPrefix, static_cast<byte>(kExprI32x4Sub)
-
#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index a9c724a78d..a3d52eba79 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -34,7 +34,7 @@ namespace wasm {
// Emit a section code and the size as a padded varint that can be patched
// later.
-size_t EmitSection(WasmSectionCode code, ZoneBuffer& buffer) {
+size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
// Emit the section code.
buffer.write_u8(code);
@@ -185,6 +185,18 @@ void WasmFunctionBuilder::SetAsmFunctionStartPosition(int position) {
last_asm_source_position_ = position;
}
+void WasmFunctionBuilder::StashCode(std::vector<byte>* dst, size_t position) {
+ if (dst == nullptr) {
+ body_.resize(position);
+ return;
+ }
+ DCHECK_LE(position, body_.size());
+ size_t len = body_.size() - position;
+ dst->resize(len);
+ memcpy(dst->data(), body_.data() + position, len);
+ body_.resize(position);
+}
+
void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
buffer.write_u32v(signature_index_);
}
@@ -194,8 +206,7 @@ void WasmFunctionBuilder::WriteExports(ZoneBuffer& buffer) const {
buffer.write_size(name.size());
buffer.write(reinterpret_cast<const byte*>(name.data()), name.size());
buffer.write_u8(kExternalFunction);
- buffer.write_u32v(func_index_ +
- static_cast<uint32_t>(builder_->imports_.size()));
+ buffer.write_size(func_index_ + builder_->function_imports_.size());
}
}
@@ -212,7 +223,8 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
for (DirectCallIndex call : direct_calls_) {
buffer.patch_u32v(
base + call.offset,
- call.direct_index + static_cast<uint32_t>(builder_->imports_.size()));
+ call.direct_index +
+ static_cast<uint32_t>(builder_->function_imports_.size()));
}
}
}
@@ -237,7 +249,8 @@ void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
- imports_(zone),
+ function_imports_(zone),
+ global_imports_(zone),
functions_(zone),
data_segments_(zone),
indirect_functions_(zone),
@@ -303,8 +316,15 @@ void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
FunctionSig* sig) {
- imports_.push_back({AddSignature(sig), name, name_length});
- return static_cast<uint32_t>(imports_.size() - 1);
+ function_imports_.push_back({AddSignature(sig), name, name_length});
+ return static_cast<uint32_t>(function_imports_.size() - 1);
+}
+
+uint32_t WasmModuleBuilder::AddGlobalImport(const char* name, int name_length,
+ ValueType type) {
+ global_imports_.push_back(
+ {WasmOpcodes::ValueTypeCodeFor(type), name, name_length});
+ return static_cast<uint32_t>(global_imports_.size() - 1);
}
void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
@@ -334,22 +354,31 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
for (FunctionSig* sig : signatures_) {
buffer.write_u8(kWasmFunctionTypeForm);
buffer.write_size(sig->parameter_count());
- for (size_t j = 0; j < sig->parameter_count(); j++) {
- buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(sig->GetParam(j)));
+ for (auto param : sig->parameters()) {
+ buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(param));
}
buffer.write_size(sig->return_count());
- for (size_t j = 0; j < sig->return_count(); j++) {
- buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(sig->GetReturn(j)));
+ for (auto ret : sig->returns()) {
+ buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(ret));
}
}
FixupSection(buffer, start);
}
// == Emit imports ===========================================================
- if (imports_.size() > 0) {
+ if (global_imports_.size() + function_imports_.size() > 0) {
size_t start = EmitSection(kImportSectionCode, buffer);
- buffer.write_size(imports_.size());
- for (auto import : imports_) {
+ buffer.write_size(global_imports_.size() + function_imports_.size());
+ for (auto import : global_imports_) {
+ buffer.write_u32v(0); // module name length
+ buffer.write_u32v(import.name_length); // field name length
+ buffer.write(reinterpret_cast<const byte*>(import.name), // field name
+ import.name_length);
+ buffer.write_u8(kExternalGlobal);
+ buffer.write_u8(import.type_code);
+ buffer.write_u8(0); // immutable
+ }
+ for (auto import : function_imports_) {
buffer.write_u32v(0); // module name length
buffer.write_u32v(import.name_length); // field name length
buffer.write(reinterpret_cast<const byte*>(import.name), // field name
@@ -361,14 +390,14 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
// == Emit function signatures ===============================================
- bool has_names = false;
+ uint32_t num_function_names = 0;
if (functions_.size() > 0) {
size_t start = EmitSection(kFunctionSectionCode, buffer);
buffer.write_size(functions_.size());
for (auto function : functions_) {
function->WriteSignature(buffer);
- exports += function->exported_names_.size();
- if (function->name_.size() > 0) has_names = true;
+ exports += static_cast<uint32_t>(function->exported_names_.size());
+ if (!function->name_.empty()) ++num_function_names;
}
FixupSection(buffer, start);
}
@@ -477,8 +506,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit start function index ==============================================
if (start_function_index_ >= 0) {
size_t start = EmitSection(kStartSectionCode, buffer);
- buffer.write_u32v(start_function_index_ +
- static_cast<uint32_t>(imports_.size()));
+ buffer.write_size(start_function_index_ + function_imports_.size());
FixupSection(buffer, start);
}
@@ -493,7 +521,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(indirect_functions_.size()); // element count
for (auto index : indirect_functions_) {
- buffer.write_u32v(index + static_cast<uint32_t>(imports_.size()));
+ buffer.write_size(index + function_imports_.size());
}
FixupSection(buffer, start);
@@ -526,7 +554,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
// == Emit names =============================================================
- if (has_names) {
+ if (num_function_names > 0 || !function_imports_.empty()) {
// Emit the section code.
buffer.write_u8(kUnknownSectionCode);
// Emit a placeholder for the length.
@@ -534,19 +562,37 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// Emit the section string.
buffer.write_size(4);
buffer.write(reinterpret_cast<const byte*>("name"), 4);
- // Emit the names.
- size_t count = functions_.size() + imports_.size();
- buffer.write_size(count);
- for (size_t i = 0; i < imports_.size(); i++) {
- buffer.write_u8(0); // empty name for import
- buffer.write_u8(0); // no local variables
+ // Emit a subsection for the function names.
+ buffer.write_u8(NameSectionType::kFunction);
+ // Emit a placeholder for the subsection length.
+ size_t functions_start = buffer.reserve_u32v();
+ // Emit the function names.
+ // Imports are always named.
+ uint32_t num_imports = static_cast<uint32_t>(function_imports_.size());
+ buffer.write_size(num_imports + num_function_names);
+ uint32_t function_index = 0;
+ for (; function_index < num_imports; ++function_index) {
+ const WasmFunctionImport* import = &function_imports_[function_index];
+ DCHECK_NOT_NULL(import->name);
+ buffer.write_u32v(function_index);
+ uint32_t name_len = static_cast<uint32_t>(import->name_length);
+ buffer.write_u32v(name_len);
+ buffer.write(reinterpret_cast<const byte*>(import->name), name_len);
}
- for (auto function : functions_) {
- buffer.write_size(function->name_.size());
- buffer.write(reinterpret_cast<const byte*>(function->name_.data()),
- function->name_.size());
- buffer.write_u8(0);
+ if (num_function_names > 0) {
+ for (auto function : functions_) {
+ DCHECK_EQ(function_index,
+ function->func_index() + function_imports_.size());
+ if (!function->name_.empty()) {
+ buffer.write_u32v(function_index);
+ buffer.write_size(function->name_.size());
+ buffer.write(reinterpret_cast<const byte*>(function->name_.data()),
+ function->name_.size());
+ }
+ ++function_index;
+ }
}
+ FixupSection(buffer, functions_start);
FixupSection(buffer, start);
}
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index c6903cd953..61dd269020 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -139,6 +139,10 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void AddAsmWasmOffset(int call_position, int to_number_position);
void SetAsmFunctionStartPosition(int position);
+ size_t GetPosition() const { return body_.size(); }
+ void FixupByte(size_t position, byte value) { body_[position] = value; }
+ void StashCode(std::vector<byte>* dst, size_t position);
+
void WriteSignature(ZoneBuffer& buffer) const;
void WriteExports(ZoneBuffer& buffer) const;
void WriteBody(ZoneBuffer& buffer) const;
@@ -223,12 +227,13 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Building methods.
uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
void SetImportName(uint32_t index, const char* name, int name_length) {
- imports_[index].name = name;
- imports_[index].name_length = name_length;
+ function_imports_[index].name = name;
+ function_imports_[index].name_length = name_length;
}
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
+ uint32_t AddGlobalImport(const char* name, int name_length, ValueType type);
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
uint32_t AllocateIndirectFunctions(uint32_t count);
@@ -257,6 +262,12 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
int name_length;
};
+ struct WasmGlobalImport {
+ ValueTypeCode type_code;
+ const char* name;
+ int name_length;
+ };
+
struct WasmGlobal {
ValueType type;
bool exported;
@@ -272,7 +283,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
friend class WasmFunctionBuilder;
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
- ZoneVector<WasmFunctionImport> imports_;
+ ZoneVector<WasmFunctionImport> function_imports_;
+ ZoneVector<WasmGlobalImport> global_imports_;
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 9df236fa9e..65cd79f9ee 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -10,10 +10,12 @@
#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
+#include "src/frames-inl.h"
#include "src/objects.h"
#include "src/property-descriptor.h"
#include "src/simulator.h"
#include "src/snapshot/snapshot.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
#include "src/asmjs/asm-wasm-builder.h"
@@ -40,6 +42,11 @@ namespace base = v8::base;
instance->PrintInstancesChain(); \
} while (false)
+#define TRACE_COMPILE(...) \
+ do { \
+ if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace {
static const int kInvalidSigIndex = -1;
@@ -66,16 +73,6 @@ static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
}
-#if V8_TARGET_ARCH_64_BIT
-const bool kGuardRegionsSupported = true;
-#else
-const bool kGuardRegionsSupported = false;
-#endif
-
-bool EnableGuardRegions() {
- return FLAG_wasm_guard_pages && kGuardRegionsSupported;
-}
-
static void RecordStats(Isolate* isolate, Code* code) {
isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
isolate->counters()->wasm_reloc_size()->Increment(
@@ -182,8 +179,10 @@ class JSToWasmWrapperCache {
Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
if (target->kind() == Code::WASM_FUNCTION ||
target->kind() == Code::WASM_TO_JS_FUNCTION ||
- target->builtin_index() == Builtins::kIllegal) {
- it.rinfo()->set_target_address(wasm_code->instruction_start());
+ target->builtin_index() == Builtins::kIllegal ||
+ target->builtin_index() == Builtins::kWasmCompileLazy) {
+ it.rinfo()->set_target_address(isolate,
+ wasm_code->instruction_start());
break;
}
}
@@ -205,6 +204,101 @@ class JSToWasmWrapperCache {
std::vector<Handle<Code>> code_cache_;
};
+// Ensure that the code object in <code_table> at offset <func_index> has
+// deoptimization data attached. This is needed for lazy compile stubs which are
+// called from JS_TO_WASM functions or via exported function tables. The deopt
+// data is used to determine which function this lazy compile stub belongs to.
+Handle<Code> EnsureExportedLazyDeoptData(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> code_table,
+ int func_index) {
+ Handle<Code> code(Code::cast(code_table->get(func_index)), isolate);
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) {
+ // No special deopt data needed for compiled functions, and imported
+ // functions, which map to Illegal at this point (they get compiled at
+ // instantiation time).
+ DCHECK(code->kind() == Code::WASM_FUNCTION ||
+ code->kind() == Code::WASM_TO_JS_FUNCTION ||
+ code->builtin_index() == Builtins::kIllegal);
+ return code;
+ }
+ // deopt_data:
+ // #0: weak instance
+ // #1: func_index
+ // might be extended later for table exports (see
+ // EnsureTableExportLazyDeoptData).
+ Handle<FixedArray> deopt_data(code->deoptimization_data());
+ DCHECK_EQ(0, deopt_data->length() % 2);
+ if (deopt_data->length() == 0) {
+ code = isolate->factory()->CopyCode(code);
+ code_table->set(func_index, *code);
+ deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
+ code->set_deoptimization_data(*deopt_data);
+ if (!instance.is_null()) {
+ Handle<WeakCell> weak_instance =
+ isolate->factory()->NewWeakCell(instance);
+ deopt_data->set(0, *weak_instance);
+ }
+ deopt_data->set(1, Smi::FromInt(func_index));
+ }
+ DCHECK_IMPLIES(!instance.is_null(),
+ WeakCell::cast(code->deoptimization_data()->get(0))->value() ==
+ *instance);
+ DCHECK_EQ(func_index,
+ Smi::cast(code->deoptimization_data()->get(1))->value());
+ return code;
+}
+
+// Ensure that the code object in <code_table> at offset <func_index> has
+// deoptimization data attached. This is needed for lazy compile stubs which are
+// called from JS_TO_WASM functions or via exported function tables. The deopt
+// data is used to determine which function this lazy compile stub belongs to.
+Handle<Code> EnsureTableExportLazyDeoptData(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ Handle<FixedArray> code_table, int func_index,
+ Handle<FixedArray> export_table, int export_index,
+ std::unordered_map<uint32_t, uint32_t>& table_export_count) {
+ Handle<Code> code =
+ EnsureExportedLazyDeoptData(isolate, instance, code_table, func_index);
+ if (code->builtin_index() != Builtins::kWasmCompileLazy) return code;
+
+ // deopt_data:
+ // #0: weak instance
+ // #1: func_index
+ // [#2: export table
+ // #3: export table index]
+ // [#4: export table
+ // #5: export table index]
+ // ...
+ // table_export_count counts down and determines the index for the new export
+ // table entry.
+ auto table_export_entry = table_export_count.find(func_index);
+ DCHECK(table_export_entry != table_export_count.end());
+ DCHECK_LT(0, table_export_entry->second);
+ uint32_t this_idx = 2 * table_export_entry->second;
+ --table_export_entry->second;
+ Handle<FixedArray> deopt_data(code->deoptimization_data());
+ DCHECK_EQ(0, deopt_data->length() % 2);
+ if (deopt_data->length() == 2) {
+ // Then only the "header" (#0 and #1) exists. Extend for the export table
+ // entries (make space for this_idx + 2 elements).
+ deopt_data = isolate->factory()->CopyFixedArrayAndGrow(deopt_data, this_idx,
+ TENURED);
+ code->set_deoptimization_data(*deopt_data);
+ }
+ DCHECK_LE(this_idx + 2, deopt_data->length());
+ DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
+ DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
+ deopt_data->set(this_idx, *export_table);
+ deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
+ return code;
+}
+
+bool compile_lazy(const WasmModule* module) {
+ return FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
+}
+
// A helper for compiling an entire module.
class CompilationHelper {
public:
@@ -231,6 +325,7 @@ class CompilationHelper {
std::queue<compiler::WasmCompilationUnit*> executed_units_;
base::Mutex result_mutex_;
base::AtomicNumber<size_t> next_unit_;
+ size_t num_background_tasks_ = 0;
// Run by each compilation task and by the main thread.
bool FetchAndExecuteCompilationUnit() {
@@ -246,34 +341,39 @@ class CompilationHelper {
}
compiler::WasmCompilationUnit* unit = compilation_units_.at(index);
- if (unit != nullptr) {
- unit->ExecuteCompilation();
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- executed_units_.push(unit);
- }
+ unit->ExecuteCompilation();
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ executed_units_.push(unit);
return true;
}
- void InitializeParallelCompilation(const std::vector<WasmFunction>& functions,
- ModuleBytesEnv& module_env,
- ErrorThrower* thrower) {
- compilation_units_.reserve(functions.size());
- for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size();
- ++i) {
+ size_t InitializeParallelCompilation(
+ const std::vector<WasmFunction>& functions, ModuleBytesEnv& module_env) {
+ uint32_t start = module_env.module_env.module->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs;
+ uint32_t num_funcs = static_cast<uint32_t>(functions.size());
+ uint32_t funcs_to_compile = start > num_funcs ? 0 : num_funcs - start;
+ compilation_units_.reserve(funcs_to_compile);
+ for (uint32_t i = start; i < num_funcs; ++i) {
const WasmFunction* func = &functions[i];
compilation_units_.push_back(
- func->imported ? nullptr
- : new compiler::WasmCompilationUnit(
- thrower, isolate_, &module_env, func, i));
+ new compiler::WasmCompilationUnit(isolate_, &module_env, func));
+ }
+ return funcs_to_compile;
+ }
+
+ void InitializeHandles() {
+ for (auto unit : compilation_units_) {
+ unit->InitializeHandles();
}
}
uint32_t* StartCompilationTasks() {
- const size_t num_tasks =
+ num_background_tasks_ =
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- uint32_t* task_ids = new uint32_t[num_tasks];
- for (size_t i = 0; i < num_tasks; ++i) {
+ uint32_t* task_ids = new uint32_t[num_background_tasks_];
+ for (size_t i = 0; i < num_background_tasks_; ++i) {
CompilationTask* task = new CompilationTask(this);
task_ids[i] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -283,13 +383,9 @@ class CompilationHelper {
}
void WaitForCompilationTasks(uint32_t* task_ids) {
- const size_t num_tasks =
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- for (size_t i = 0; i < num_tasks; ++i) {
+ for (size_t i = 0; i < num_background_tasks_; ++i) {
// If the task has not started yet, then we abort it. Otherwise we wait
- // for
- // it to finish.
+ // for it to finish.
if (isolate_->cancelable_task_manager()->TryAbort(task_ids[i]) !=
CancelableTaskManager::kTaskAborted) {
module_->pending_tasks.get()->Wait();
@@ -297,23 +393,30 @@ class CompilationHelper {
}
}
- void FinishCompilationUnits(std::vector<Handle<Code>>& results) {
+ void FinishCompilationUnits(std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower) {
while (true) {
- compiler::WasmCompilationUnit* unit = nullptr;
- {
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- if (executed_units_.empty()) {
- break;
- }
- unit = executed_units_.front();
- executed_units_.pop();
- }
- int j = unit->index();
- results[j] = unit->FinishCompilation();
- delete unit;
+ int func_index = -1;
+ Handle<Code> result = FinishCompilationUnit(thrower, &func_index);
+ if (func_index < 0) break;
+ results[func_index] = result;
}
}
+ Handle<Code> FinishCompilationUnit(ErrorThrower* thrower, int* func_index) {
+ compiler::WasmCompilationUnit* unit = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ if (executed_units_.empty()) return Handle<Code>::null();
+ unit = executed_units_.front();
+ executed_units_.pop();
+ }
+ *func_index = unit->func_index();
+ Handle<Code> result = unit->FinishCompilation(thrower);
+ delete unit;
+ return result;
+ }
+
void CompileInParallel(ModuleBytesEnv* module_env,
std::vector<Handle<Code>>& results,
ErrorThrower* thrower) {
@@ -342,7 +445,8 @@ class CompilationHelper {
// 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units}.
- InitializeParallelCompilation(module->functions, *module_env, thrower);
+ InitializeParallelCompilation(module->functions, *module_env);
+ InitializeHandles();
// Objects for the synchronization with the background threads.
base::AtomicNumber<size_t> next_unit(
@@ -361,13 +465,13 @@ class CompilationHelper {
// dequeues it and finishes the compilation unit. Compilation units
// are finished concurrently to the background threads to save
// memory.
- FinishCompilationUnits(results);
+ FinishCompilationUnits(results, thrower);
}
// 4) After the parallel phase of all compilation units has started, the
// main thread waits for all {CompilationTask} instances to finish.
WaitForCompilationTasks(task_ids.get());
// Finish the compilation of the remaining compilation units.
- FinishCompilationUnits(results);
+ FinishCompilationUnits(results, thrower);
}
void CompileSequentially(ModuleBytesEnv* module_env,
@@ -382,9 +486,8 @@ class CompilationHelper {
if (func.imported)
continue; // Imports are compiled at instantiation time.
- Handle<Code> code = Handle<Code>::null();
// Compile the function.
- code = compiler::WasmCompilationUnit::CompileWasmFunction(
+ Handle<Code> code = compiler::WasmCompilationUnit::CompileWasmFunction(
thrower, isolate_, module_env, &func);
if (code.is_null()) {
WasmName str = module_env->wire_bytes.GetName(&func);
@@ -426,7 +529,9 @@ class CompilationHelper {
}
HistogramTimerScope wasm_compile_module_time_scope(
- isolate_->counters()->wasm_compile_module_time());
+ module_->is_wasm()
+ ? isolate_->counters()->wasm_compile_wasm_module_time()
+ : isolate_->counters()->wasm_compile_asm_module_time());
ModuleBytesEnv module_env(module_, &temp_instance, wire_bytes);
@@ -437,28 +542,41 @@ class CompilationHelper {
Handle<FixedArray> code_table =
factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
- // Initialize the code table with the illegal builtin. All call sites will
- // be
- // patched at instantiation.
- Handle<Code> illegal_builtin = isolate_->builtins()->Illegal();
- for (uint32_t i = 0; i < module_->functions.size(); ++i) {
- code_table->set(static_cast<int>(i), *illegal_builtin);
- temp_instance.function_code[i] = illegal_builtin;
- }
+ // Check whether lazy compilation is enabled for this module.
+ bool lazy_compile = compile_lazy(module_);
- isolate_->counters()->wasm_functions_per_module()->AddSample(
- static_cast<int>(module_->functions.size()));
- CompilationHelper helper(isolate_, module_);
- if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
- // Avoid a race condition by collecting results into a second vector.
- std::vector<Handle<Code>> results(temp_instance.function_code);
- helper.CompileInParallel(&module_env, results, thrower);
- temp_instance.function_code.swap(results);
- } else {
- helper.CompileSequentially(&module_env, temp_instance.function_code,
- thrower);
+ // If lazy compile: Initialize the code table with the lazy compile builtin.
+ // Otherwise: Initialize with the illegal builtin. All call sites will be
+ // patched at instantiation.
+ Handle<Code> init_builtin = lazy_compile
+ ? isolate_->builtins()->WasmCompileLazy()
+ : isolate_->builtins()->Illegal();
+ for (int i = 0, e = static_cast<int>(module_->functions.size()); i < e;
+ ++i) {
+ code_table->set(i, *init_builtin);
+ temp_instance.function_code[i] = init_builtin;
+ }
+
+ (module_->is_wasm() ? isolate_->counters()->wasm_functions_per_wasm_module()
+ : isolate_->counters()->wasm_functions_per_asm_module())
+ ->AddSample(static_cast<int>(module_->functions.size()));
+
+ if (!lazy_compile) {
+ CompilationHelper helper(isolate_, module_);
+ size_t funcs_to_compile =
+ module_->functions.size() - module_->num_imported_functions;
+ if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0 &&
+ funcs_to_compile > 1) {
+ // Avoid a race condition by collecting results into a second vector.
+ std::vector<Handle<Code>> results(temp_instance.function_code);
+ helper.CompileInParallel(&module_env, results, thrower);
+ temp_instance.function_code.swap(results);
+ } else {
+ helper.CompileSequentially(&module_env, temp_instance.function_code,
+ thrower);
+ }
+ if (thrower->error()) return {};
}
- if (thrower->error()) return {};
// At this point, compilation has completed. Update the code table.
for (size_t i = FLAG_skip_compiling_wasm_funcs;
@@ -499,23 +617,14 @@ class CompilationHelper {
Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
isolate_, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
script, asm_js_offset_table);
+ if (lazy_compile) WasmSharedModuleData::PrepareForLazyCompilation(shared);
// Create the compiled module object, and populate with compiled functions
// and information needed at instantiation time. This object needs to be
// serializable. Instantiation may occur off a deserialized version of this
// object.
- Handle<WasmCompiledModule> compiled_module =
- WasmCompiledModule::New(isolate_, shared);
- compiled_module->set_num_imported_functions(
- module_->num_imported_functions);
- compiled_module->set_code_table(code_table);
- compiled_module->set_min_mem_pages(module_->min_mem_pages);
- compiled_module->set_max_mem_pages(module_->max_mem_pages);
- if (function_table_count > 0) {
- compiled_module->set_function_tables(function_tables);
- compiled_module->set_signature_tables(signature_tables);
- compiled_module->set_empty_function_tables(function_tables);
- }
+ Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
+ isolate_, shared, code_table, function_tables, signature_tables);
// If we created a wasm script, finish it now and make it public to the
// debugger.
@@ -529,7 +638,8 @@ class CompilationHelper {
int func_index = 0;
for (auto exp : module_->export_table) {
if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code(Code::cast(code_table->get(exp.index)), isolate_);
+ Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
+ isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
Handle<Code> wrapper_code =
js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module_,
wasm_code, exp.index);
@@ -544,75 +654,6 @@ class CompilationHelper {
}
};
-static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
- WasmCompiledModule* compiled_module) {
- TRACE("Resetting %d\n", compiled_module->instance_id());
- Object* undefined = *isolate->factory()->undefined_value();
- Object* fct_obj = compiled_module->ptr_to_code_table();
- if (fct_obj != nullptr && fct_obj != undefined) {
- uint32_t old_mem_size = compiled_module->mem_size();
- uint32_t default_mem_size = compiled_module->default_mem_size();
- Object* mem_start = compiled_module->maybe_ptr_to_memory();
-
- // Patch code to update memory references, global references, and function
- // table references.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- if (old_mem_size > 0) {
- CHECK_NE(mem_start, undefined);
- Address old_mem_address =
- static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
- code_specialization.RelocateMemoryReferences(
- old_mem_address, old_mem_size, nullptr, default_mem_size);
- }
-
- if (owner->has_globals_buffer()) {
- Address globals_start =
- static_cast<Address>(owner->globals_buffer()->backing_store());
- code_specialization.RelocateGlobals(globals_start, nullptr);
- }
-
- // Reset function tables.
- if (compiled_module->has_function_tables()) {
- FixedArray* function_tables = compiled_module->ptr_to_function_tables();
- FixedArray* empty_function_tables =
- compiled_module->ptr_to_empty_function_tables();
- DCHECK_EQ(function_tables->length(), empty_function_tables->length());
- for (int i = 0, e = function_tables->length(); i < e; ++i) {
- code_specialization.RelocateObject(
- handle(function_tables->get(i), isolate),
- handle(empty_function_tables->get(i), isolate));
- }
- compiled_module->set_ptr_to_function_tables(empty_function_tables);
- }
-
- FixedArray* functions = FixedArray::cast(fct_obj);
- for (int i = compiled_module->num_imported_functions(),
- end = functions->length();
- i < end; ++i) {
- Code* code = Code::cast(functions->get(i));
- if (code->kind() != Code::WASM_FUNCTION) {
- // From here on, there should only be wrappers for exported functions.
- for (; i < end; ++i) {
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
- Code::cast(functions->get(i))->kind());
- }
- break;
- }
- bool changed =
- code_specialization.ApplyToWasmCode(code, SKIP_ICACHE_FLUSH);
- // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
- // above.
- if (changed) {
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
- }
- }
- compiled_module->reset_memory();
-}
-
static void MemoryInstanceFinalizer(Isolate* isolate,
WasmInstanceObject* instance) {
DisallowHeapAllocation no_gc;
@@ -668,13 +709,25 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
DCHECK(compiled_module->has_weak_wasm_module());
WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
+ if (trap_handler::UseTrapHandler()) {
+ Handle<FixedArray> code_table = compiled_module->code_table();
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+ int index = code->trap_handler_index()->value();
+ if (index >= 0) {
+ trap_handler::ReleaseHandlerData(index);
+ code->set_trap_handler_index(Smi::FromInt(-1));
+ }
+ }
+ }
+
// weak_wasm_module may have been cleared, meaning the module object
// was GC-ed. In that case, there won't be any new instances created,
// and we don't need to maintain the links between instances.
if (!weak_wasm_module->cleared()) {
JSObject* wasm_module = JSObject::cast(weak_wasm_module->value());
WasmCompiledModule* current_template =
- WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+ WasmCompiledModule::cast(wasm_module->GetEmbedderField(0));
TRACE("chain before {\n");
TRACE_CHAIN(current_template);
@@ -686,10 +739,10 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
if (current_template == compiled_module) {
if (next == nullptr) {
- ResetCompiledModule(isolate, owner, compiled_module);
+ WasmCompiledModule::Reset(isolate, compiled_module);
} else {
DCHECK(next->value()->IsFixedArray());
- wasm_module->SetInternalField(0, next->value());
+ wasm_module->SetEmbedderField(0, next->value());
DCHECK_NULL(prev);
WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
}
@@ -718,7 +771,7 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
TRACE("chain after {\n");
- TRACE_CHAIN(WasmCompiledModule::cast(wasm_module->GetInternalField(0)));
+ TRACE_CHAIN(WasmCompiledModule::cast(wasm_module->GetEmbedderField(0)));
TRACE("}\n");
}
compiled_module->reset_weak_owning_instance();
@@ -726,29 +779,48 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
TRACE("}\n");
}
-std::pair<int, int> GetFunctionOffsetAndLength(
- Handle<WasmCompiledModule> compiled_module, int func_index) {
- WasmModule* module = compiled_module->module();
- if (func_index < 0 ||
- static_cast<size_t>(func_index) > module->functions.size()) {
- return {0, 0};
- }
- WasmFunction& func = module->functions[func_index];
- return {static_cast<int>(func.code_start_offset),
- static_cast<int>(func.code_end_offset - func.code_start_offset)};
+int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
+ int offset) {
+ DCHECK(!iterator.done());
+ int byte_pos;
+ do {
+ byte_pos = iterator.source_position().ScriptOffset();
+ iterator.Advance();
+ } while (!iterator.done() && iterator.code_offset() <= offset);
+ return byte_pos;
+}
+
+int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
+ DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
+ // Read the leb128 encoded u32 value (up to 5 bytes starting at pc + 1).
+ decoder.Reset(pc + 1, pc + 6);
+ uint32_t call_idx = decoder.consume_u32v("call index");
+ DCHECK(decoder.ok());
+ DCHECK_GE(kMaxInt, call_idx);
+ return static_cast<int>(call_idx);
+}
+
+void RecordLazyCodeStats(Isolate* isolate, Code* code) {
+ isolate->counters()->wasm_lazily_compiled_functions()->Increment();
+ isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ code->relocation_info()->length());
}
+
} // namespace
-Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
- size_t size, bool is_external,
- bool enable_guard_regions) {
+Handle<JSArrayBuffer> wasm::SetupArrayBuffer(Isolate* isolate,
+ void* backing_store, size_t size,
+ bool is_external,
+ bool enable_guard_regions) {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
static_cast<int>(size));
buffer->set_is_neuterable(false);
+ buffer->set_is_wasm_buffer(true);
buffer->set_has_guard_region(enable_guard_regions);
- if (is_external) {
+ if (enable_guard_regions) {
// We mark the buffer as external if we allocated it here with guard
// pages. That means we need to arrange for it to be freed.
@@ -790,22 +862,40 @@ Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
enable_guard_regions);
}
-std::ostream& wasm::operator<<(std::ostream& os, const WasmModule& module) {
- os << "WASM module with ";
- os << (module.min_mem_pages * module.kPageSize) << " min mem";
- os << (module.max_mem_pages * module.kPageSize) << " max mem";
- os << module.functions.size() << " functions";
- os << module.functions.size() << " globals";
- os << module.functions.size() << " data segments";
- return os;
-}
+void wasm::UnpackAndRegisterProtectedInstructions(
+ Isolate* isolate, Handle<FixedArray> code_table) {
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code;
+ // This is sometimes undefined when we're called from cctests.
+ if (!code_table->GetValue<Code>(isolate, i).ToHandle(&code)) {
+ continue;
+ }
-std::ostream& wasm::operator<<(std::ostream& os, const WasmFunction& function) {
- os << "WASM function with signature " << *function.sig;
+ if (code->kind() != Code::WASM_FUNCTION) {
+ continue;
+ }
- os << " code bytes: "
- << (function.code_end_offset - function.code_start_offset);
- return os;
+ const intptr_t base = reinterpret_cast<intptr_t>(code->entry());
+
+ Zone zone(isolate->allocator(), "Wasm Module");
+ ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
+ const int mode_mask =
+ RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ trap_handler::ProtectedInstructionData data;
+ data.instr_offset = it.rinfo()->data();
+ data.landing_offset = reinterpret_cast<intptr_t>(it.rinfo()->pc()) - base;
+ unpacked.emplace_back(data);
+ }
+ if (unpacked.size() > 0) {
+ int size = code->CodeSize();
+ const int index = RegisterHandlerData(reinterpret_cast<void*>(base), size,
+ unpacked.size(), &unpacked[0]);
+ // TODO(eholk): if index is negative, fail.
+ DCHECK(index >= 0);
+ code->set_trap_handler_index(Smi::FromInt(index));
+ }
+ }
}
std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
@@ -826,7 +916,6 @@ WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
DCHECK(code->kind() == Code::WASM_FUNCTION ||
code->kind() == Code::WASM_INTERPRETER_ENTRY);
FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_NOT_NULL(deopt_data);
DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
deopt_data->length());
Object* weak_link = deopt_data->get(0);
@@ -836,16 +925,13 @@ WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
return WasmInstanceObject::cast(cell->value());
}
-int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
- int func_index) {
- return GetFunctionOffsetAndLength(compiled_module, func_index).first;
-}
-
WasmModule::WasmModule(Zone* owned)
: owned_zone(owned), pending_tasks(new base::Semaphore(0)) {}
-static WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
- Handle<Object> target) {
+namespace {
+
+WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
+ Handle<Object> target) {
if (target->IsJSFunction()) {
Handle<JSFunction> func = Handle<JSFunction>::cast(target);
if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
@@ -858,52 +944,53 @@ static WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
return nullptr;
}
-static Handle<Code> UnwrapImportWrapper(Handle<Object> target) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+static Handle<Code> UnwrapImportWrapper(Handle<Object> import_wrapper) {
+ Handle<JSFunction> func = Handle<JSFunction>::cast(import_wrapper);
Handle<Code> export_wrapper_code = handle(func->code());
- int found = 0;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- Handle<Code> code;
- for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION ||
- target->kind() == Code::WASM_TO_JS_FUNCTION) {
- ++found;
- code = handle(target);
- }
- }
- DCHECK_EQ(1, found);
- return code;
+ for (RelocIterator it(*export_wrapper_code, mask);; it.next()) {
+ DCHECK(!it.done());
+ Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() != Code::WASM_FUNCTION &&
+ target->kind() != Code::WASM_TO_JS_FUNCTION &&
+ target->kind() != Code::WASM_INTERPRETER_ENTRY)
+ continue;
+// There should only be this one call to wasm code.
+#ifdef DEBUG
+ for (it.next(); !it.done(); it.next()) {
+ Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ DCHECK(code->kind() != Code::WASM_FUNCTION &&
+ code->kind() != Code::WASM_TO_JS_FUNCTION &&
+ code->kind() != Code::WASM_INTERPRETER_ENTRY);
+ }
+#endif
+ return handle(target);
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
}
-static Handle<Code> CompileImportWrapper(Isolate* isolate, int index,
- FunctionSig* sig,
- Handle<JSReceiver> target,
- Handle<String> module_name,
- MaybeHandle<String> import_name,
- ModuleOrigin origin) {
+Handle<Code> CompileImportWrapper(Isolate* isolate, int index, FunctionSig* sig,
+ Handle<JSReceiver> target,
+ Handle<String> module_name,
+ MaybeHandle<String> import_name,
+ ModuleOrigin origin) {
WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
if (other_func) {
- if (sig->Equals(other_func->sig)) {
- // Signature matched. Unwrap the JS->WASM wrapper and return the raw
- // WASM function code.
- return UnwrapImportWrapper(target);
- } else {
- return Handle<Code>::null();
- }
- } else {
- // Signature mismatch. Compile a new wrapper for the new signature.
- return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
- module_name, import_name, origin);
- }
+ if (!sig->Equals(other_func->sig)) return Handle<Code>::null();
+ // Signature matched. Unwrap the JS->WASM wrapper and return the raw
+ // WASM function code.
+ return UnwrapImportWrapper(target);
+ }
+ // No wasm function or being debugged. Compile a new wrapper for the new
+ // signature.
+ return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
+ module_name, import_name, origin);
}
-static void UpdateDispatchTablesInternal(Isolate* isolate,
- Handle<FixedArray> dispatch_tables,
- int index, WasmFunction* function,
- Handle<Code> code) {
+void UpdateDispatchTablesInternal(Isolate* isolate,
+ Handle<FixedArray> dispatch_tables, int index,
+ WasmFunction* function, Handle<Code> code) {
DCHECK_EQ(0, dispatch_tables->length() % 4);
for (int i = 0; i < dispatch_tables->length(); i += 4) {
int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
@@ -916,19 +1003,19 @@ static void UpdateDispatchTablesInternal(Isolate* isolate,
// a dangling pointer in the signature map.
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- int sig_index = static_cast<int>(
- instance->module()->function_tables[table_index].map.FindOrInsert(
- function->sig));
- signature_table->set(index, Smi::FromInt(sig_index));
+ auto func_table = instance->module()->function_tables[table_index];
+ uint32_t sig_index = func_table.map.FindOrInsert(function->sig);
+ signature_table->set(index, Smi::FromInt(static_cast<int>(sig_index)));
function_table->set(index, *code);
} else {
- Code* code = nullptr;
signature_table->set(index, Smi::FromInt(-1));
- function_table->set(index, code);
+ function_table->set(index, Smi::kZero);
}
}
}
+} // namespace
+
void wasm::UpdateDispatchTables(Isolate* isolate,
Handle<FixedArray> dispatch_tables, int index,
Handle<JSFunction> function) {
@@ -971,14 +1058,20 @@ class InstantiationHelper {
return {};
}
+ // Record build time into correct bucket, then build instance.
HistogramTimerScope wasm_instantiate_module_time_scope(
- isolate_->counters()->wasm_instantiate_module_time());
+ module_->is_wasm()
+ ? isolate_->counters()->wasm_instantiate_wasm_module_time()
+ : isolate_->counters()->wasm_instantiate_asm_module_time());
Factory* factory = isolate_->factory();
//--------------------------------------------------------------------------
// Reuse the compiled module (if no owner), otherwise clone.
//--------------------------------------------------------------------------
Handle<FixedArray> code_table;
+ // We keep around a copy of the old code table, because we'll be replacing
+ // imports for the new instance, and then we need the old imports to be
+ // able to relocate.
Handle<FixedArray> old_code_table;
MaybeHandle<WasmInstanceObject> owner;
@@ -1000,11 +1093,6 @@ class InstantiationHelper {
}
}
DCHECK(!original.is_null());
- // Always make a new copy of the code_table, since the old_code_table
- // may still have placeholders for imports.
- old_code_table = original->code_table();
- code_table = factory->CopyFixedArray(old_code_table);
-
if (original->has_weak_owning_instance()) {
// Clone, but don't insert yet the clone in the instances chain.
// We do that last. Since we are holding on to the owner instance,
@@ -1012,18 +1100,32 @@ class InstantiationHelper {
// won't be mutated by possible finalizer runs.
DCHECK(!owner.is_null());
TRACE("Cloning from %d\n", original->instance_id());
+ old_code_table = original->code_table();
compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+ code_table = compiled_module_->code_table();
// Avoid creating too many handles in the outer scope.
HandleScope scope(isolate_);
// Clone the code for WASM functions and exports.
for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> orig_code =
- code_table->GetValueChecked<Code>(isolate_, i);
+ Handle<Code> orig_code(Code::cast(code_table->get(i)), isolate_);
switch (orig_code->kind()) {
case Code::WASM_TO_JS_FUNCTION:
// Imports will be overwritten with newly compiled wrappers.
break;
+ case Code::BUILTIN:
+ DCHECK_EQ(Builtins::kWasmCompileLazy, orig_code->builtin_index());
+ // If this code object has deoptimization data, then we need a
+ // unique copy to attach updated deoptimization data.
+ if (orig_code->deoptimization_data()->length() > 0) {
+ Handle<Code> code = factory->CopyCode(orig_code);
+ Handle<FixedArray> deopt_data =
+ factory->NewFixedArray(2, TENURED);
+ deopt_data->set(1, Smi::FromInt(i));
+ code->set_deoptimization_data(*deopt_data);
+ code_table->set(i, *code);
+ }
+ break;
case Code::JS_TO_WASM_FUNCTION:
case Code::WASM_FUNCTION: {
Handle<Code> code = factory->CopyCode(orig_code);
@@ -1038,10 +1140,12 @@ class InstantiationHelper {
} else {
// There was no owner, so we can reuse the original.
compiled_module_ = original;
+ old_code_table =
+ factory->CopyFixedArray(compiled_module_->code_table());
+ code_table = compiled_module_->code_table();
TRACE("Reusing existing instance %d\n",
compiled_module_->instance_id());
}
- compiled_module_->set_code_table(code_table);
compiled_module_->set_native_context(isolate_->native_context());
}
@@ -1067,15 +1171,17 @@ class InstantiationHelper {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
- Address old_globals_start = nullptr;
- if (!owner.is_null()) {
- DCHECK(owner.ToHandleChecked()->has_globals_buffer());
- old_globals_start = static_cast<Address>(
- owner.ToHandleChecked()->globals_buffer()->backing_store());
- }
+ Address old_globals_start = compiled_module_->GetGlobalsStartOrNull();
Address new_globals_start =
static_cast<Address>(global_buffer->backing_store());
code_specialization.RelocateGlobals(old_globals_start, new_globals_start);
+ // The address of the backing buffer for the golbals is in native memory
+ // and, thus, not moving. We need it saved for
+ // serialization/deserialization purposes - so that the other end
+ // understands how to relocate the references. We still need to save the
+ // JSArrayBuffer on the instance, to keep it all alive.
+ WasmCompiledModule::SetGlobalsStartAddressFrom(factory, compiled_module_,
+ global_buffer);
instance->set_globals_buffer(*global_buffer);
}
@@ -1106,22 +1212,23 @@ class InstantiationHelper {
// Set up the indirect function tables for the new instance.
//--------------------------------------------------------------------------
if (function_table_count > 0)
- InitializeTables(code_table, instance, &code_specialization);
+ InitializeTables(instance, &code_specialization);
//--------------------------------------------------------------------------
// Set up the memory for the new instance.
//--------------------------------------------------------------------------
- MaybeHandle<JSArrayBuffer> old_memory;
-
uint32_t min_mem_pages = module_->min_mem_pages;
- isolate_->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
+ (module_->is_wasm() ? isolate_->counters()->wasm_wasm_min_mem_pages_count()
+ : isolate_->counters()->wasm_asm_min_mem_pages_count())
+ ->AddSample(min_mem_pages);
if (!memory_.is_null()) {
// Set externally passed ArrayBuffer non neuterable.
memory_->set_is_neuterable(false);
+ memory_->set_is_wasm_buffer(true);
- DCHECK_IMPLIES(EnableGuardRegions(), module_->origin == kAsmJsOrigin ||
- memory_->has_guard_region());
+ DCHECK_IMPLIES(EnableGuardRegions(),
+ module_->is_asm_js() || memory_->has_guard_region());
} else if (min_mem_pages > 0) {
memory_ = AllocateMemory(min_mem_pages);
if (memory_.is_null()) return {}; // failed to allocate memory
@@ -1159,25 +1266,24 @@ class InstantiationHelper {
// Initialize memory.
//--------------------------------------------------------------------------
if (!memory_.is_null()) {
- instance->set_memory_buffer(*memory_);
Address mem_start = static_cast<Address>(memory_->backing_store());
uint32_t mem_size =
static_cast<uint32_t>(memory_->byte_length()->Number());
LoadDataSegments(mem_start, mem_size);
uint32_t old_mem_size = compiled_module_->mem_size();
- Address old_mem_start =
- compiled_module_->has_memory()
- ? static_cast<Address>(
- compiled_module_->memory()->backing_store())
- : nullptr;
+ Address old_mem_start = compiled_module_->GetEmbeddedMemStartOrNull();
// We might get instantiated again with the same memory. No patching
// needed in this case.
if (old_mem_start != mem_start || old_mem_size != mem_size) {
code_specialization.RelocateMemoryReferences(
old_mem_start, old_mem_size, mem_start, mem_size);
}
- compiled_module_->set_memory(memory_);
+ // Just like with globals, we need to keep both the JSArrayBuffer
+ // and save the start pointer.
+ instance->set_memory_buffer(*memory_);
+ WasmCompiledModule::SetSpecializationMemInfoFrom(
+ factory, compiled_module_, memory_);
}
//--------------------------------------------------------------------------
@@ -1185,16 +1291,22 @@ class InstantiationHelper {
//--------------------------------------------------------------------------
Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
- for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
- i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
+ for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs,
+ num_functions = static_cast<int>(module_->functions.size());
+ i < num_functions; ++i) {
+ Handle<Code> code = handle(Code::cast(code_table->get(i)), isolate_);
if (code->kind() == Code::WASM_FUNCTION) {
Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
deopt_data->set(0, *weak_link);
- deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
- deopt_data->set_length(2);
+ deopt_data->set(1, Smi::FromInt(i));
code->set_deoptimization_data(*deopt_data);
+ continue;
}
+ DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ if (code->deoptimization_data()->length() == 0) continue;
+ DCHECK_LE(2, code->deoptimization_data()->length());
+ DCHECK_EQ(i, Smi::cast(code->deoptimization_data()->get(1))->value());
+ code->deoptimization_data()->set(0, *weak_link);
}
//--------------------------------------------------------------------------
@@ -1216,40 +1328,16 @@ class InstantiationHelper {
if (function_table_count > 0) LoadTableSegments(code_table, instance);
// Patch all code with the relocations registered in code_specialization.
- {
- code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
- }
+ code_specialization.RelocateDirectCalls(instance);
+ code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
FlushICache(isolate_, code_table);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
//--------------------------------------------------------------------------
- if (FLAG_wasm_trap_handler) {
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
-
- if (code->kind() != Code::WASM_FUNCTION) {
- continue;
- }
-
- const intptr_t base = reinterpret_cast<intptr_t>(code->entry());
-
- Zone zone(isolate_->allocator(), "Wasm Module");
- ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
- const int mode_mask =
- RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- trap_handler::ProtectedInstructionData data;
- data.instr_offset = it.rinfo()->data();
- data.landing_offset =
- reinterpret_cast<intptr_t>(it.rinfo()->pc()) - base;
- unpacked.emplace_back(data);
- }
- // TODO(eholk): Register the protected instruction information once the
- // trap handler is in place.
- }
+ if (trap_handler::UseTrapHandler()) {
+ UnpackAndRegisterProtectedInstructions(isolate_, code_table);
}
//--------------------------------------------------------------------------
@@ -1281,7 +1369,7 @@ class InstantiationHelper {
compiled_module_->set_weak_wasm_module(
original.ToHandleChecked()->weak_wasm_module());
}
- module_object_->SetInternalField(0, *compiled_module_);
+ module_object_->SetEmbedderField(0, *compiled_module_);
compiled_module_->set_weak_owning_instance(link_to_owning_instance);
GlobalHandles::MakeWeak(global_handle.location(),
global_handle.location(), &InstanceFinalizer,
@@ -1290,19 +1378,34 @@ class InstantiationHelper {
}
//--------------------------------------------------------------------------
- // Set all breakpoints that were set on the shared module.
+ // Debugging support.
//--------------------------------------------------------------------------
+ // Set all breakpoints that were set on the shared module.
WasmSharedModuleData::SetBreakpointsOnNewInstance(
compiled_module_->shared(), instance);
+ if (FLAG_wasm_interpret_all) {
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ std::vector<int> func_indexes;
+ for (int func_index = num_imported_functions,
+ num_wasm_functions = static_cast<int>(module_->functions.size());
+ func_index < num_wasm_functions; ++func_index) {
+ func_indexes.push_back(func_index);
+ }
+ WasmDebugInfo::RedirectToInterpreter(
+ debug_info, Vector<int>(func_indexes.data(),
+ static_cast<int>(func_indexes.size())));
+ }
+
//--------------------------------------------------------------------------
// Run the start function if one was specified.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
HandleScope scope(isolate_);
int start_index = module_->start_function_index;
- Handle<Code> startup_code =
- code_table->GetValueChecked<Code>(isolate_, start_index);
+ Handle<Code> startup_code = EnsureExportedLazyDeoptData(
+ isolate_, instance, code_table, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code =
js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
@@ -1354,22 +1457,26 @@ class InstantiationHelper {
std::vector<Handle<JSFunction>> js_wrappers_;
JSToWasmWrapperCache js_to_wasm_cache_;
- // Helper routines to print out errors with imports.
- void ReportLinkError(const char* error, uint32_t index,
- Handle<String> module_name, Handle<String> import_name) {
- thrower_->LinkError(
- "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
- module_name->length(), module_name->ToCString().get(),
- import_name->length(), import_name->ToCString().get(), error);
- }
-
- MaybeHandle<Object> ReportLinkError(const char* error, uint32_t index,
- Handle<String> module_name) {
- thrower_->LinkError("Import #%d module=\"%.*s\" error: %s", index,
- module_name->length(), module_name->ToCString().get(),
- error);
- return MaybeHandle<Object>();
- }
+// Helper routines to print out errors with imports.
+#define ERROR_THROWER_WITH_MESSAGE(TYPE) \
+ void Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name, Handle<String> import_name) { \
+ thrower_->TYPE("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", \
+ index, module_name->length(), \
+ module_name->ToCString().get(), import_name->length(), \
+ import_name->ToCString().get(), error); \
+ } \
+ \
+ MaybeHandle<Object> Report##TYPE(const char* error, uint32_t index, \
+ Handle<String> module_name) { \
+ thrower_->TYPE("Import #%d module=\"%.*s\" error: %s", index, \
+ module_name->length(), module_name->ToCString().get(), \
+ error); \
+ return MaybeHandle<Object>(); \
+ }
+
+ ERROR_THROWER_WITH_MESSAGE(LinkError)
+ ERROR_THROWER_WITH_MESSAGE(TypeError)
// Look up an import value in the {ffi_} object.
MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
@@ -1382,14 +1489,14 @@ class InstantiationHelper {
MaybeHandle<Object> result =
Object::GetPropertyOrElement(ffi_, module_name);
if (result.is_null()) {
- return ReportLinkError("module not found", index, module_name);
+ return ReportTypeError("module not found", index, module_name);
}
Handle<Object> module = result.ToHandleChecked();
// Look up the value in the module.
if (!module->IsJSReceiver()) {
- return ReportLinkError("module is not an object or function", index,
+ return ReportTypeError("module is not an object or function", index,
module_name);
}
@@ -1439,14 +1546,7 @@ class InstantiationHelper {
}
void WriteGlobalValue(WasmGlobal& global, Handle<Object> value) {
- double num = 0;
- if (value->IsSmi()) {
- num = Smi::cast(*value)->value();
- } else if (value->IsHeapNumber()) {
- num = HeapNumber::cast(*value)->value();
- } else {
- UNREACHABLE();
- }
+ double num = value->Number();
TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
WasmOpcodes::TypeName(global.type));
switch (global.type) {
@@ -1510,7 +1610,7 @@ class InstantiationHelper {
Handle<Code> import_wrapper = CompileImportWrapper(
isolate_, index, module_->functions[import.index].sig,
Handle<JSReceiver>::cast(value), module_name, import_name,
- module_->origin);
+ module_->get_origin());
if (import_wrapper.is_null()) {
ReportLinkError(
"imported function does not match the expected type", index,
@@ -1637,6 +1737,13 @@ class InstantiationHelper {
module_name, import_name);
return -1;
}
+ if (module_->is_asm_js() && FLAG_fast_validate_asm) {
+ if (module_->globals[import.index].type == kWasmI32) {
+ value = Object::ToInt32(isolate_, value).ToHandleChecked();
+ } else {
+ value = Object::ToNumber(value).ToHandleChecked();
+ }
+ }
if (!value->IsNumber()) {
ReportLinkError("global import must be a number", index,
module_name, import_name);
@@ -1737,10 +1844,10 @@ class InstantiationHelper {
}
Handle<JSObject> exports_object;
- if (module_->origin == kWasmOrigin) {
+ if (module_->is_wasm()) {
// Create the "exports" object.
exports_object = isolate_->factory()->NewJSObjectWithNullProto();
- } else if (module_->origin == kAsmJsOrigin) {
+ } else if (module_->is_asm_js()) {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
exports_object = isolate_->factory()->NewJSObject(object_function);
@@ -1759,7 +1866,7 @@ class InstantiationHelper {
wasm::AsmWasmBuilder::single_function_name);
PropertyDescriptor desc;
- desc.set_writable(module_->origin == kAsmJsOrigin);
+ desc.set_writable(module_->is_asm_js());
desc.set_enumerable(true);
// Count up export indexes.
@@ -1789,7 +1896,7 @@ class InstantiationHelper {
isolate_, compiled_module_, exp.name_offset, exp.name_length)
.ToHandleChecked();
Handle<JSObject> export_to;
- if (module_->origin == kAsmJsOrigin && exp.kind == kExternalFunction &&
+ if (module_->is_asm_js() && exp.kind == kExternalFunction &&
(String::Equals(name, foreign_init_name) ||
String::Equals(name, single_function_name))) {
export_to = instance;
@@ -1809,7 +1916,7 @@ class InstantiationHelper {
Handle<Code> export_code =
code_table->GetValueChecked<Code>(isolate_, func_index);
MaybeHandle<String> func_name;
- if (module_->origin == kAsmJsOrigin) {
+ if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
isolate_, compiled_module_, function.name_offset,
@@ -1892,7 +1999,7 @@ class InstantiationHelper {
}
// Skip duplicates for asm.js.
- if (module_->origin == kAsmJsOrigin) {
+ if (module_->is_asm_js()) {
v8::Maybe<bool> status = JSReceiver::HasOwnProperty(export_to, name);
if (status.FromMaybe(false)) {
continue;
@@ -1907,7 +2014,7 @@ class InstantiationHelper {
}
}
- if (module_->origin == kWasmOrigin) {
+ if (module_->is_wasm()) {
v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
exports_object, FROZEN, Object::DONT_THROW);
DCHECK(success.FromMaybe(false));
@@ -1915,8 +2022,7 @@ class InstantiationHelper {
}
}
- void InitializeTables(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance,
+ void InitializeTables(Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization) {
int function_table_count =
static_cast<int>(module_->function_tables.size());
@@ -1994,13 +2100,31 @@ class InstantiationHelper {
Handle<FixedArray>::null(), Handle<FixedArray>::null());
}
+ // Count the number of table exports for each function (needed for lazy
+ // compilation).
+ std::unordered_map<uint32_t, uint32_t> num_table_exports;
+ if (compile_lazy(module_)) {
+ for (auto table_init : module_->table_inits) {
+ for (uint32_t func_index : table_init.entries) {
+ Code* code =
+ Code::cast(code_table->get(static_cast<int>(func_index)));
+ // Only increase the counter for lazy compile builtins (it's not
+ // needed otherwise).
+ if (code->is_wasm_code()) continue;
+ DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
+ ++num_table_exports[func_index];
+ }
+ }
+ }
+
// TODO(titzer): this does redundant work if there are multiple tables,
// since initializations are not sorted by table index.
for (auto table_init : module_->table_inits) {
uint32_t base = EvalUint32InitExpr(table_init.offset);
DCHECK(in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
table_instance.function_table->length()));
- for (int i = 0; i < static_cast<int>(table_init.entries.size()); ++i) {
+ for (int i = 0, e = static_cast<int>(table_init.entries.size()); i < e;
+ ++i) {
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
@@ -2008,12 +2132,12 @@ class InstantiationHelper {
DCHECK_GE(sig_index, 0);
table_instance.signature_table->set(table_index,
Smi::FromInt(sig_index));
- table_instance.function_table->set(table_index,
- code_table->get(func_index));
+ Handle<Code> wasm_code = EnsureTableExportLazyDeoptData(
+ isolate_, instance, code_table, func_index,
+ table_instance.function_table, table_index, num_table_exports);
+ table_instance.function_table->set(table_index, *wasm_code);
if (!all_dispatch_tables.is_null()) {
- Handle<Code> wasm_code(Code::cast(code_table->get(func_index)),
- isolate_);
if (js_wrappers_[func_index].is_null()) {
// No JSFunction entry yet exists for this function. Create one.
// TODO(titzer): We compile JS->WASM wrappers for functions are
@@ -2024,7 +2148,7 @@ class InstantiationHelper {
js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
isolate_, module_, wasm_code, func_index);
MaybeHandle<String> func_name;
- if (module_->origin == kAsmJsOrigin) {
+ if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
func_name =
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
@@ -2048,6 +2172,14 @@ class InstantiationHelper {
}
}
+#ifdef DEBUG
+ // Check that the count of table exports was accurate. The entries are
+ // decremented on each export, so all should be zero now.
+ for (auto e : num_table_exports) {
+ DCHECK_EQ(0, e.second);
+ }
+#endif
+
// TODO(titzer): we add the new dispatch table at the end to avoid
// redundant work and also because the new instance is not yet fully
// initialized.
@@ -2076,102 +2208,9 @@ bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
isolate->allow_code_gen_callback()(v8::Utils::ToLocal(context));
}
-MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(
- Isolate* isolate, Handle<WasmInstanceObject> object) {
- auto instance = Handle<WasmInstanceObject>::cast(object);
- if (instance->has_memory_buffer()) {
- return Handle<JSArrayBuffer>(instance->memory_buffer(), isolate);
- }
- return MaybeHandle<JSArrayBuffer>();
-}
-
-void SetInstanceMemory(Handle<WasmInstanceObject> instance,
- JSArrayBuffer* buffer) {
- DisallowHeapAllocation no_gc;
- instance->set_memory_buffer(buffer);
- instance->compiled_module()->set_ptr_to_memory(buffer);
-}
-
-int32_t wasm::GetInstanceMemorySize(Isolate* isolate,
- Handle<WasmInstanceObject> instance) {
- DCHECK(IsWasmInstance(*instance));
- MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
- GetInstanceMemory(isolate, instance);
- Handle<JSArrayBuffer> buffer;
- if (!maybe_mem_buffer.ToHandle(&buffer)) {
- return 0;
- } else {
- return buffer->byte_length()->Number() / WasmModule::kPageSize;
- }
-}
-
-uint32_t GetMaxInstanceMemoryPages(Isolate* isolate,
- Handle<WasmInstanceObject> instance) {
- if (instance->has_memory_object()) {
- Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
- if (memory_object->has_maximum_pages()) {
- uint32_t maximum = static_cast<uint32_t>(memory_object->maximum_pages());
- if (maximum < FLAG_wasm_max_mem_pages) return maximum;
- }
- }
- uint32_t compiled_max_pages = instance->compiled_module()->max_mem_pages();
- isolate->counters()->wasm_max_mem_pages_count()->AddSample(
- compiled_max_pages);
- if (compiled_max_pages != 0) return compiled_max_pages;
- return FLAG_wasm_max_mem_pages;
-}
-
-Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
- MaybeHandle<JSArrayBuffer> buffer,
- uint32_t pages, uint32_t max_pages) {
- Handle<JSArrayBuffer> old_buffer;
- Address old_mem_start = nullptr;
- uint32_t old_size = 0;
- if (buffer.ToHandle(&old_buffer) && old_buffer->backing_store() != nullptr &&
- old_buffer->byte_length()->IsNumber()) {
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- DCHECK_NOT_NULL(old_mem_start);
- old_size = old_buffer->byte_length()->Number();
- }
- DCHECK(old_size + pages * WasmModule::kPageSize <=
- std::numeric_limits<uint32_t>::max());
- uint32_t new_size = old_size + pages * WasmModule::kPageSize;
- if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
- FLAG_wasm_max_mem_pages * WasmModule::kPageSize < new_size) {
- return Handle<JSArrayBuffer>::null();
- }
-
- // TODO(gdeepti): Change the protection here instead of allocating a new
- // buffer before guard regions are turned on, see issue #5886.
- const bool enable_guard_regions =
- !old_buffer.is_null() && old_buffer->has_guard_region();
- Handle<JSArrayBuffer> new_buffer =
- NewArrayBuffer(isolate, new_size, enable_guard_regions);
- if (new_buffer.is_null()) return new_buffer;
- Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
- if (old_size != 0) {
- memcpy(new_mem_start, old_mem_start, old_size);
- }
- return new_buffer;
-}
-
-void UncheckedUpdateInstanceMemory(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Address old_mem_start, uint32_t old_size) {
- DCHECK(instance->has_memory_buffer());
- Handle<JSArrayBuffer> mem_buffer(instance->memory_buffer());
- uint32_t new_size = mem_buffer->byte_length()->Number();
- Address new_mem_start = static_cast<Address>(mem_buffer->backing_store());
- DCHECK_NOT_NULL(new_mem_start);
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.RelocateMemoryReferences(old_mem_start, old_size,
- new_mem_start, new_size);
- code_specialization.ApplyToWholeInstance(*instance);
-}
-
void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer) {
+ Handle<JSArrayBuffer> buffer,
+ bool free_memory) {
int64_t byte_length =
buffer->byte_length()->IsNumber()
? static_cast<uint32_t>(buffer->byte_length()->Number())
@@ -2187,6 +2226,9 @@ void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
}
buffer->set_is_neuterable(true);
buffer->Neuter();
+ // Neuter but do not free, as when pages == 0, the backing store is being used
+ // by the new buffer.
+ if (!free_memory) return;
if (has_guard_regions) {
base::OS::Free(backing_store, RoundUp(i::wasm::kWasmMaxHeapOffset,
base::OS::CommitPageSize()));
@@ -2197,114 +2239,6 @@ void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
}
}
-int32_t wasm::GrowWebAssemblyMemory(Isolate* isolate,
- Handle<WasmMemoryObject> receiver,
- uint32_t pages) {
- DCHECK(WasmJs::IsWasmMemoryObject(isolate, receiver));
- Handle<WasmMemoryObject> memory_object =
- handle(WasmMemoryObject::cast(*receiver));
- MaybeHandle<JSArrayBuffer> memory_buffer = handle(memory_object->buffer());
- Handle<JSArrayBuffer> old_buffer;
- uint32_t old_size = 0;
- Address old_mem_start = nullptr;
- // Force byte_length to 0, if byte_length fails IsNumber() check.
- if (memory_buffer.ToHandle(&old_buffer) &&
- old_buffer->backing_store() != nullptr &&
- old_buffer->byte_length()->IsNumber()) {
- old_size = old_buffer->byte_length()->Number();
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- }
- Handle<JSArrayBuffer> new_buffer;
- // Return current size if grow by 0
- if (pages == 0) {
- if (!old_buffer.is_null() && old_buffer->backing_store() != nullptr) {
- new_buffer = SetupArrayBuffer(isolate, old_buffer->backing_store(),
- old_size, old_buffer->is_external(),
- old_buffer->has_guard_region());
- memory_object->set_buffer(*new_buffer);
- old_buffer->set_is_neuterable(true);
- if (!old_buffer->has_guard_region()) {
- old_buffer->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*old_buffer);
- }
- // Neuter but don't free the memory because it is now being used by
- // new_buffer.
- old_buffer->Neuter();
- }
- DCHECK(old_size % WasmModule::kPageSize == 0);
- return (old_size / WasmModule::kPageSize);
- }
- if (!memory_object->has_instances_link()) {
- // Memory object does not have an instance associated with it, just grow
- uint32_t max_pages;
- if (memory_object->has_maximum_pages()) {
- max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
- if (FLAG_wasm_max_mem_pages < max_pages) return -1;
- } else {
- max_pages = FLAG_wasm_max_mem_pages;
- }
- new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
- if (new_buffer.is_null()) return -1;
- } else {
- Handle<WasmInstanceWrapper> instance_wrapper(
- memory_object->instances_link());
- DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
- DCHECK(instance_wrapper->has_instance());
- Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
- DCHECK(IsWasmInstance(*instance));
- uint32_t max_pages = GetMaxInstanceMemoryPages(isolate, instance);
-
- // Grow memory object buffer and update instances associated with it.
- new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
- if (new_buffer.is_null()) return -1;
- DCHECK(!instance_wrapper->has_previous());
- SetInstanceMemory(instance, *new_buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
- while (instance_wrapper->has_next()) {
- instance_wrapper = instance_wrapper->next_wrapper();
- DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
- Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
- DCHECK(IsWasmInstance(*instance));
- SetInstanceMemory(instance, *new_buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
- }
- }
- memory_object->set_buffer(*new_buffer);
- DCHECK(old_size % WasmModule::kPageSize == 0);
- return (old_size / WasmModule::kPageSize);
-}
-
-int32_t wasm::GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t pages) {
- if (!IsWasmInstance(*instance)) return -1;
- if (pages == 0) return GetInstanceMemorySize(isolate, instance);
- Handle<WasmInstanceObject> instance_obj(WasmInstanceObject::cast(*instance));
- if (!instance_obj->has_memory_object()) {
- // No other instances to grow, grow just the one.
- MaybeHandle<JSArrayBuffer> instance_buffer =
- GetInstanceMemory(isolate, instance);
- Handle<JSArrayBuffer> old_buffer;
- uint32_t old_size = 0;
- Address old_mem_start = nullptr;
- if (instance_buffer.ToHandle(&old_buffer) &&
- old_buffer->backing_store() != nullptr) {
- old_size = old_buffer->byte_length()->Number();
- old_mem_start = static_cast<Address>(old_buffer->backing_store());
- }
- uint32_t max_pages = GetMaxInstanceMemoryPages(isolate, instance_obj);
- Handle<JSArrayBuffer> buffer =
- GrowMemoryBuffer(isolate, instance_buffer, pages, max_pages);
- if (buffer.is_null()) return -1;
- SetInstanceMemory(instance, *buffer);
- UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
- DCHECK(old_size % WasmModule::kPageSize == 0);
- return (old_size / WasmModule::kPageSize);
- } else {
- return GrowWebAssemblyMemory(isolate, handle(instance_obj->memory_object()),
- pages);
- }
-}
-
void wasm::GrowDispatchTables(Isolate* isolate,
Handle<FixedArray> dispatch_tables,
uint32_t old_size, uint32_t count) {
@@ -2640,34 +2574,27 @@ MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
return helper.Build();
}
-void RejectPromise(Isolate* isolate, ErrorThrower* thrower,
- Handle<JSPromise> promise) {
+namespace {
+
+void RejectPromise(Isolate* isolate, Handle<Context> context,
+ ErrorThrower* thrower, Handle<JSPromise> promise) {
v8::Local<v8::Promise::Resolver> resolver =
v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
- Handle<Context> context(isolate->context(), isolate);
- resolver->Reject(v8::Utils::ToLocal(context),
+ auto maybe = resolver->Reject(v8::Utils::ToLocal(context),
v8::Utils::ToLocal(thrower->Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
}
-void ResolvePromise(Isolate* isolate, Handle<JSPromise> promise,
- Handle<Object> result) {
+void ResolvePromise(Isolate* isolate, Handle<Context> context,
+ Handle<JSPromise> promise, Handle<Object> result) {
v8::Local<v8::Promise::Resolver> resolver =
v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
- Handle<Context> context(isolate->context(), isolate);
- resolver->Resolve(v8::Utils::ToLocal(context), v8::Utils::ToLocal(result));
+ auto maybe = resolver->Resolve(v8::Utils::ToLocal(context),
+ v8::Utils::ToLocal(result));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
}
-void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
- const ModuleWireBytes& bytes) {
- ErrorThrower thrower(isolate, nullptr);
- MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(isolate, &thrower, bytes);
- if (thrower.error()) {
- RejectPromise(isolate, &thrower, promise);
- return;
- }
- ResolvePromise(isolate, promise, module_object.ToHandleChecked());
-}
+} // namespace
void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
Handle<WasmModuleObject> module_object,
@@ -2676,10 +2603,11 @@ void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
if (thrower.error()) {
- RejectPromise(isolate, &thrower, promise);
+ RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
return;
}
- ResolvePromise(isolate, promise, instance_object.ToHandleChecked());
+ ResolvePromise(isolate, handle(isolate->context()), promise,
+ instance_object.ToHandleChecked());
}
void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
@@ -2692,7 +2620,7 @@ void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
MaybeHandle<WasmModuleObject> module_object =
SyncCompile(isolate, &thrower, bytes);
if (thrower.error()) {
- RejectPromise(isolate, &thrower, promise);
+ RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
return;
}
Handle<WasmModuleObject> module = module_object.ToHandleChecked();
@@ -2701,7 +2629,7 @@ void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
isolate, &thrower, module, imports, Handle<JSArrayBuffer>::null());
if (thrower.error()) {
- RejectPromise(isolate, &thrower, promise);
+ RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
return;
}
@@ -2717,5 +2645,692 @@ void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
JSObject::AddProperty(ret, instance_property_name,
instance_object.ToHandleChecked(), NONE);
- ResolvePromise(isolate, promise, ret);
+ ResolvePromise(isolate, handle(isolate->context()), promise, ret);
+}
+
+// Encapsulates all the state and steps of an asynchronous compilation.
+// An asynchronous compile job consists of a number of tasks that are executed
+// as foreground and background tasks. Any phase that touches the V8 heap or
+// allocates on the V8 heap (e.g. creating the module object) must be a
+// foreground task. All other tasks (e.g. decoding and validating, the majority
+// of the work of compilation) can be background tasks.
+// TODO(wasm): factor out common parts of this with the synchronous pipeline.
+//
+// Note: In predictable mode, DoSync and DoAsync execute the referenced function
+// immediately before returning. Thus we handle the predictable mode specially,
+// e.g. when we synchronizing tasks or when we delete the AyncCompileJob.
+class AsyncCompileJob {
+ public:
+ explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy,
+ int length, Handle<Context> context,
+ Handle<JSPromise> promise)
+ : isolate_(isolate),
+ bytes_copy_(std::move(bytes_copy)),
+ wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length) {
+ // The handles for the context and promise must be deferred.
+ DeferredHandleScope deferred(isolate);
+ context_ = Handle<Context>(*context);
+ module_promise_ = Handle<JSPromise>(*promise);
+ deferred_handles_.push_back(deferred.Detach());
+ }
+
+ bool Start() {
+ return DoAsync(&AsyncCompileJob::DecodeModule); // --
+ }
+
+ ~AsyncCompileJob() {
+ for (auto d : deferred_handles_) delete d;
+ }
+
+ private:
+ Isolate* isolate_;
+ std::unique_ptr<byte[]> bytes_copy_;
+ ModuleWireBytes wire_bytes_;
+ Handle<Context> context_;
+ Handle<JSPromise> module_promise_;
+ WasmModule* module_ = nullptr;
+ ModuleResult result_;
+ std::unique_ptr<CompilationHelper> helper_ = nullptr;
+ std::unique_ptr<ModuleBytesEnv> module_bytes_env_ = nullptr;
+
+ volatile bool failed_ = false;
+ std::vector<DeferredHandles*> deferred_handles_;
+ Handle<WasmModuleWrapper> module_wrapper_;
+ Handle<WasmModuleObject> module_object_;
+ Handle<FixedArray> function_tables_;
+ Handle<FixedArray> signature_tables_;
+ Handle<WasmCompiledModule> compiled_module_;
+ Handle<FixedArray> code_table_;
+ std::unique_ptr<WasmInstance> temp_instance_ = nullptr;
+ std::unique_ptr<uint32_t[]> task_ids_ = nullptr;
+ size_t outstanding_units_ = 0;
+ size_t num_background_tasks_ = 0;
+
+ void ReopenHandlesInDeferredScope() {
+ DeferredHandleScope deferred(isolate_);
+ module_wrapper_ = handle(*module_wrapper_, isolate_);
+ function_tables_ = handle(*function_tables_, isolate_);
+ signature_tables_ = handle(*signature_tables_, isolate_);
+ code_table_ = handle(*code_table_, isolate_);
+ temp_instance_->ReopenHandles(isolate_);
+ helper_->InitializeHandles();
+ deferred_handles_.push_back(deferred.Detach());
+ }
+
+ //==========================================================================
+ // Step 1: (async) Decode the module.
+ //==========================================================================
+ bool DecodeModule() {
+ {
+ DisallowHandleAllocation no_handle;
+ DisallowHeapAllocation no_allocation;
+ // Decode the module bytes.
+ TRACE_COMPILE("(1) Decoding module...\n");
+ result_ = DecodeWasmModule(isolate_, wire_bytes_.start(),
+ wire_bytes_.end(), true, kWasmOrigin);
+ }
+ if (result_.failed()) {
+ // Decoding failure; reject the promise and clean up.
+ if (result_.val) delete result_.val;
+ return DoSync(&AsyncCompileJob::DecodeFail);
+ } else {
+ // Decode passed.
+ module_ = const_cast<WasmModule*>(result_.val);
+ return DoSync(&AsyncCompileJob::PrepareAndStartCompile);
+ }
+ }
+
+ //==========================================================================
+ // Step 1b: (sync) Fail decoding the module.
+ //==========================================================================
+ bool DecodeFail() {
+ HandleScope scope(isolate_);
+ ErrorThrower thrower(isolate_, nullptr);
+ thrower.CompileFailed("Wasm decoding failed", result_);
+ RejectPromise(isolate_, context_, &thrower, module_promise_);
+ return false;
+ }
+
+ //==========================================================================
+ // Step 2 (sync): Create heap-allocated data and start compile.
+ //==========================================================================
+ bool PrepareAndStartCompile() {
+ TRACE_COMPILE("(2) Prepare and start compile...\n");
+ HandleScope scope(isolate_);
+
+ Factory* factory = isolate_->factory();
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ module_wrapper_ = WasmModuleWrapper::New(isolate_, module_);
+ temp_instance_ = std::unique_ptr<WasmInstance>(new WasmInstance(module_));
+ temp_instance_->context = context_;
+ temp_instance_->mem_size = WasmModule::kPageSize * module_->min_mem_pages;
+ temp_instance_->mem_start = nullptr;
+ temp_instance_->globals_start = nullptr;
+
+ // Initialize the indirect tables with placeholders.
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ function_tables_ = factory->NewFixedArray(function_table_count, TENURED);
+ signature_tables_ = factory->NewFixedArray(function_table_count, TENURED);
+ for (int i = 0; i < function_table_count; ++i) {
+ temp_instance_->function_tables[i] = factory->NewFixedArray(1, TENURED);
+ temp_instance_->signature_tables[i] = factory->NewFixedArray(1, TENURED);
+ function_tables_->set(i, *temp_instance_->function_tables[i]);
+ signature_tables_->set(i, *temp_instance_->signature_tables[i]);
+ }
+
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}, and export wrappers.
+ // The results of compilation will be written into it.
+ int code_table_size = static_cast<int>(module_->functions.size() +
+ module_->num_exported_functions);
+ code_table_ = factory->NewFixedArray(code_table_size, TENURED);
+
+ // Initialize {code_table_} with the illegal builtin. All call sites
+ // will be patched at instantiation.
+ Handle<Code> illegal_builtin = isolate_->builtins()->Illegal();
+ // TODO(wasm): Fix this for lazy compilation.
+ for (uint32_t i = 0; i < module_->functions.size(); ++i) {
+ code_table_->set(static_cast<int>(i), *illegal_builtin);
+ temp_instance_->function_code[i] = illegal_builtin;
+ }
+
+ isolate_->counters()->wasm_functions_per_wasm_module()->AddSample(
+ static_cast<int>(module_->functions.size()));
+
+ helper_.reset(new CompilationHelper(isolate_, module_));
+
+ DCHECK_LE(module_->num_imported_functions, module_->functions.size());
+ size_t num_functions =
+ module_->functions.size() - module_->num_imported_functions;
+ if (num_functions == 0) {
+ ReopenHandlesInDeferredScope();
+ // Degenerate case of an empty module.
+ return DoSync(&AsyncCompileJob::FinishCompile);
+ }
+
+ // Start asynchronous compilation tasks.
+ num_background_tasks_ =
+ Max(static_cast<size_t>(1),
+ Min(num_functions,
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()
+ ->NumberOfAvailableBackgroundThreads())));
+ module_bytes_env_ = std::unique_ptr<ModuleBytesEnv>(
+ new ModuleBytesEnv(module_, temp_instance_.get(), wire_bytes_));
+ outstanding_units_ = helper_->InitializeParallelCompilation(
+ module_->functions, *module_bytes_env_);
+
+ // Reopen all handles which should survive in the DeferredHandleScope.
+ ReopenHandlesInDeferredScope();
+ task_ids_ =
+ std::unique_ptr<uint32_t[]>(new uint32_t[num_background_tasks_]);
+ for (size_t i = 0; i < num_background_tasks_; ++i) {
+ DoAsync(&AsyncCompileJob::ExecuteCompilationUnits, &(task_ids_.get())[i]);
+ }
+ return true;
+ }
+
+ //==========================================================================
+ // Step 3 (async x K tasks): Execute compilation units.
+ //==========================================================================
+ bool ExecuteCompilationUnits() {
+ TRACE_COMPILE("(3) Compiling...\n");
+ while (!failed_) {
+ {
+ DisallowHandleAllocation no_handle;
+ DisallowHeapAllocation no_allocation;
+ if (!helper_->FetchAndExecuteCompilationUnit()) break;
+ }
+ // TODO(ahaas): Create one FinishCompilationUnit job for all compilation
+ // units.
+ DoSync(&AsyncCompileJob::FinishCompilationUnit);
+ // TODO(ahaas): Limit the number of outstanding compilation units to be
+ // finished to reduce memory overhead.
+ }
+ // Special handling for predictable mode, see above.
+ if (!FLAG_verify_predictable)
+ helper_->module_->pending_tasks.get()->Signal();
+ return true;
+ }
+
+ //==========================================================================
+ // Step 4 (sync x each function): Finish a single compilation unit.
+ //==========================================================================
+ bool FinishCompilationUnit() {
+ TRACE_COMPILE("(4a) Finishing compilation unit...\n");
+ HandleScope scope(isolate_);
+ if (failed_) return true; // already failed
+
+ int func_index = -1;
+ ErrorThrower thrower(isolate_, nullptr);
+ Handle<Code> result = helper_->FinishCompilationUnit(&thrower, &func_index);
+ if (thrower.error()) {
+ RejectPromise(isolate_, context_, &thrower, module_promise_);
+ failed_ = true;
+ } else {
+ DCHECK(func_index >= 0);
+ code_table_->set(func_index, *(result));
+ }
+ if (failed_ || --outstanding_units_ == 0) {
+ // All compilation units are done. We still need to wait for the
+ // background tasks to shut down and only then is it safe to finish the
+ // compile and delete this job. We can wait for that to happen also
+ // in a background task.
+ DoAsync(&AsyncCompileJob::WaitForBackgroundTasks);
+ }
+ return true;
+ }
+
+ //==========================================================================
+ // Step 4b (async): Wait for all background tasks to finish.
+ //==========================================================================
+ bool WaitForBackgroundTasks() {
+ TRACE_COMPILE("(4b) Waiting for background tasks...\n");
+ // Special handling for predictable mode, see above.
+ if (!FLAG_verify_predictable) {
+ for (size_t i = 0; i < num_background_tasks_; ++i) {
+ // If the task has not started yet, then we abort it. Otherwise we wait
+ // for it to finish.
+
+ if (isolate_->cancelable_task_manager()->TryAbort(task_ids_.get()[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ module_->pending_tasks.get()->Wait();
+ }
+ }
+ }
+ if (failed_) {
+ // If {failed_}, we've already rejected the promise and there
+ // is nothing more to do.
+ return false;
+ } else {
+ // Otherwise, post a synchronous task to finish the compile.
+ DoSync(&AsyncCompileJob::FinishCompile);
+ return true;
+ }
+ }
+
+ //==========================================================================
+ // Step 5 (sync): Finish heap-allocated data structures.
+ //==========================================================================
+ bool FinishCompile() {
+ TRACE_COMPILE("(5) Finish compile...\n");
+ HandleScope scope(isolate_);
+ SaveContext saved_context(isolate_);
+ isolate_->set_context(*context_);
+ // At this point, compilation has completed. Update the code table.
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < temp_instance_->function_code.size(); ++i) {
+ Code* code = Code::cast(code_table_->get(static_cast<int>(i)));
+ RecordStats(isolate_, code);
+ }
+
+ // Create heap objects for script and module bytes to be stored in the
+ // shared module data. Asm.js is not compiled asynchronously.
+ Handle<Script> script = CreateWasmScript(isolate_, wire_bytes_);
+ Handle<ByteArray> asm_js_offset_table;
+ // TODO(wasm): Improve efficiency of storing module wire bytes.
+ // 1. Only store relevant sections, not function bodies
+ // 2. Don't make a second copy of the bytes here; reuse the copy made
+ // for asynchronous compilation and store it as an external one
+ // byte string for serialization/deserialization.
+ Handle<String> module_bytes =
+ isolate_->factory()
+ ->NewStringFromOneByte({wire_bytes_.start(), wire_bytes_.length()},
+ TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+
+ // Create the shared module data.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmSharedModuleData. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+ isolate_, module_wrapper_, Handle<SeqOneByteString>::cast(module_bytes),
+ script, asm_js_offset_table);
+
+ // Create the compiled module object and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of this
+ // object.
+ compiled_module_ = WasmCompiledModule::New(
+ isolate_, shared, code_table_, function_tables_, signature_tables_);
+
+ // Finish the WASM script now and make it public to the debugger.
+ script->set_wasm_compiled_module(*compiled_module_);
+ isolate_->debug()->OnAfterCompile(script);
+
+ DeferredHandleScope deferred(isolate_);
+ compiled_module_ = handle(*compiled_module_, isolate_);
+ deferred_handles_.push_back(deferred.Detach());
+ // TODO(wasm): compiling wrappers should be made async as well.
+ return DoSync(&AsyncCompileJob::CompileWrappers);
+ }
+
+ //==========================================================================
+ // Step 6 (sync): Compile JS->WASM wrappers.
+ //==========================================================================
+ bool CompileWrappers() {
+ TRACE_COMPILE("(6) Compile wrappers...\n");
+ // Compile JS->WASM wrappers for exported functions.
+ HandleScope scope(isolate_);
+ JSToWasmWrapperCache js_to_wasm_cache;
+ int func_index = 0;
+ for (auto exp : module_->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Handle<Code> wasm_code(Code::cast(code_table_->get(exp.index)), isolate_);
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module_,
+ wasm_code, exp.index);
+ int export_index =
+ static_cast<int>(module_->functions.size() + func_index);
+ code_table_->set(export_index, *wrapper_code);
+ RecordStats(isolate_, *wrapper_code);
+ func_index++;
+ }
+
+ return DoSync(&AsyncCompileJob::FinishModule);
+ }
+
+ //==========================================================================
+ // Step 7 (sync): Finish the module and resolve the promise.
+ //==========================================================================
+ bool FinishModule() {
+ TRACE_COMPILE("(7) Finish module...\n");
+ HandleScope scope(isolate_);
+ SaveContext saved_context(isolate_);
+ isolate_->set_context(*context_);
+ Handle<WasmModuleObject> result =
+ WasmModuleObject::New(isolate_, compiled_module_);
+ ResolvePromise(isolate_, context_, module_promise_, result);
+ return false; // no more work to do.
+ }
+
+ // Run the given member method as an asynchronous task.
+ bool DoAsync(bool (AsyncCompileJob::*func)(), uint32_t* task_id = nullptr) {
+ auto task = new Task(this, func);
+ if (task_id) *task_id = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ return true; // more work to do.
+ }
+
+ // Run the given member method as a synchronous task.
+ bool DoSync(bool (AsyncCompileJob::*func)()) {
+ V8::GetCurrentPlatform()->CallOnForegroundThread(
+ reinterpret_cast<v8::Isolate*>(isolate_), new Task(this, func));
+ return true; // more work to do.
+ }
+
+ // A helper closure to run a particular member method as a task.
+ class Task : public CancelableTask {
+ public:
+ AsyncCompileJob* job_;
+ bool (AsyncCompileJob::*func_)();
+ explicit Task(AsyncCompileJob* job, bool (AsyncCompileJob::*func)())
+ : CancelableTask(job->isolate_), job_(job), func_(func) {}
+
+ void RunInternal() override {
+ bool more = (job_->*func_)(); // run the task.
+ if (!more) {
+ // If no more work, then this job is done. Predictable mode is handled
+ // specially though, see above.
+ if (!FLAG_verify_predictable) delete job_;
+ }
+ }
+ };
+};
+
+void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+ const ModuleWireBytes& bytes) {
+ if (!FLAG_wasm_async_compilation) {
+ ErrorThrower thrower(isolate, "WasmCompile");
+ // Compile the module.
+ MaybeHandle<WasmModuleObject> module_object =
+ SyncCompile(isolate, &thrower, bytes);
+ if (thrower.error()) {
+ RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ ResolvePromise(isolate, handle(isolate->context()), promise, module);
+ return;
+ }
+
+ // Make a copy of the wire bytes in case the user program changes them
+ // during asynchronous compilation.
+ std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
+ memcpy(copy.get(), bytes.start(), bytes.length());
+ auto job = new AsyncCompileJob(isolate, std::move(copy), bytes.length(),
+ handle(isolate->context()), promise);
+ job->Start();
+ // Special handling for predictable mode, see above.
+ if (FLAG_verify_predictable) delete job;
+}
+
+Handle<Code> wasm::CompileLazy(Isolate* isolate) {
+ HistogramTimerScope lazy_time_scope(
+ isolate->counters()->wasm_lazy_compilation_time());
+
+ // Find the wasm frame which triggered the lazy compile, to get the wasm
+ // instance.
+ StackFrameIterator it(isolate);
+ // First frame: C entry stub.
+ DCHECK(!it.done());
+ DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
+ it.Advance();
+ // Second frame: WasmCompileLazy builtin.
+ DCHECK(!it.done());
+ Handle<Code> lazy_compile_code(it.frame()->LookupCode(), isolate);
+ DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_compile_code->builtin_index());
+ Handle<WasmInstanceObject> instance;
+ Handle<FixedArray> exp_deopt_data;
+ int func_index = -1;
+ if (lazy_compile_code->deoptimization_data()->length() > 0) {
+ // Then it's an indirect call or via JS->WASM wrapper.
+ DCHECK_LE(2, lazy_compile_code->deoptimization_data()->length());
+ exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
+ auto* weak_cell = WeakCell::cast(exp_deopt_data->get(0));
+ instance = handle(WasmInstanceObject::cast(weak_cell->value()), isolate);
+ func_index = Smi::cast(exp_deopt_data->get(1))->value();
+ }
+ it.Advance();
+ // Third frame: The calling wasm code or js-to-wasm wrapper.
+ DCHECK(!it.done());
+ DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
+ Handle<Code> caller_code = handle(it.frame()->LookupCode(), isolate);
+ if (it.frame()->is_js_to_wasm()) {
+ DCHECK(!instance.is_null());
+ } else if (instance.is_null()) {
+ instance = handle(wasm::GetOwningWasmInstance(*caller_code), isolate);
+ } else {
+ DCHECK(*instance == wasm::GetOwningWasmInstance(*caller_code));
+ }
+ int offset =
+ static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
+ // Only patch the caller code if this is *no* indirect call.
+ // exp_deopt_data will be null if the called function is not exported at all,
+ // and its length will be <= 2 if all entries in tables were already patched.
+ // Note that this check is conservative: If the first call to an exported
+ // function is direct, we will just patch the export tables, and only on the
+ // second call we will patch the caller.
+ bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
+ exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
+
+ MaybeHandle<Code> maybe_compiled_code = WasmCompiledModule::CompileLazy(
+ isolate, instance, caller_code, offset, func_index, patch_caller);
+ if (maybe_compiled_code.is_null()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->builtins()->Illegal();
+ }
+ Handle<Code> compiled_code = maybe_compiled_code.ToHandleChecked();
+ if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
+ // See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
+ // <export_table, index> followed by undefined values.
+ // Use this information here to patch all export tables.
+ DCHECK_EQ(0, exp_deopt_data->length() % 2);
+ for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
+ if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
+ FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
+ int exp_index = Smi::cast(exp_deopt_data->get(idx + 1))->value();
+ DCHECK(exp_table->get(exp_index) == *lazy_compile_code);
+ exp_table->set(exp_index, *compiled_code);
+ }
+ // After processing, remove the list of exported entries, such that we don't
+ // do the patching redundantly.
+ Handle<FixedArray> new_deopt_data =
+ isolate->factory()->CopyFixedArrayUpTo(exp_deopt_data, 2, TENURED);
+ lazy_compile_code->set_deoptimization_data(*new_deopt_data);
+ }
+
+ return compiled_code;
+}
+
+bool LazyCompilationOrchestrator::CompileFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
+ Code::WASM_FUNCTION) {
+ return true;
+ }
+ size_t num_function_tables =
+ compiled_module->module()->function_tables.size();
+ // Store a vector of handles to be embedded in the generated code.
+ // TODO(clemensh): For concurrent compilation, these will have to live in a
+ // DeferredHandleScope.
+ std::vector<Handle<FixedArray>> fun_tables(num_function_tables);
+ std::vector<Handle<FixedArray>> sig_tables(num_function_tables);
+ for (size_t i = 0; i < num_function_tables; ++i) {
+ Object* fun_table =
+ compiled_module->function_tables()->get(static_cast<int>(i));
+ fun_tables[i] = handle(FixedArray::cast(fun_table), isolate);
+ Object* sig_table =
+ compiled_module->signature_tables()->get(static_cast<int>(i));
+ sig_tables[i] = handle(FixedArray::cast(sig_table), isolate);
+ }
+ wasm::ModuleEnv module_env(compiled_module->module(), &fun_tables,
+ &sig_tables);
+ uint8_t* module_start = compiled_module->module_bytes()->GetChars();
+ const WasmFunction* func = &module_env.module->functions[func_index];
+ wasm::FunctionBody body{func->sig, module_start,
+ module_start + func->code_start_offset,
+ module_start + func->code_end_offset};
+ // TODO(wasm): Refactor this to only get the name if it is really needed for
+ // tracing / debugging.
+ std::string func_name;
+ {
+ wasm::WasmName name = Vector<const char>::cast(
+ compiled_module->GetRawFunctionName(func_index));
+ // Copy to std::string, because the underlying string object might move on
+ // the heap.
+ func_name.assign(name.start(), static_cast<size_t>(name.length()));
+ }
+ ErrorThrower thrower(isolate, "WasmLazyCompile");
+ compiler::WasmCompilationUnit unit(isolate, &module_env, body,
+ CStrVector(func_name.c_str()), func_index);
+ unit.InitializeHandles();
+ unit.ExecuteCompilation();
+ Handle<Code> code = unit.FinishCompilation(&thrower);
+
+ Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
+ Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+ // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
+ deopt_data->set(0, *weak_instance);
+ deopt_data->set(1, Smi::FromInt(func_index));
+ code->set_deoptimization_data(*deopt_data);
+
+ if (thrower.error()) {
+ if (!isolate->has_pending_exception()) isolate->Throw(*thrower.Reify());
+ return false;
+ }
+
+ DCHECK_EQ(Builtins::kWasmCompileLazy,
+ Code::cast(compiled_module->code_table()->get(func_index))
+ ->builtin_index());
+ compiled_module->code_table()->set(func_index, *code);
+
+ // Now specialize the generated code for this instance.
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ if (module_env.module->globals_size) {
+ Address globals_start =
+ reinterpret_cast<Address>(compiled_module->globals_start());
+ code_specialization.RelocateGlobals(nullptr, globals_start);
+ }
+ if (instance->has_memory_buffer()) {
+ Address mem_start =
+ reinterpret_cast<Address>(instance->memory_buffer()->backing_store());
+ int mem_size = instance->memory_buffer()->byte_length()->Number();
+ DCHECK_IMPLIES(mem_size == 0, mem_start == nullptr);
+ if (mem_size > 0) {
+ code_specialization.RelocateMemoryReferences(nullptr, 0, mem_start,
+ mem_size);
+ }
+ }
+ code_specialization.RelocateDirectCalls(instance);
+ code_specialization.ApplyToWasmCode(*code, SKIP_ICACHE_FLUSH);
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ RecordLazyCodeStats(isolate, *code);
+ return true;
+}
+
+MaybeHandle<Code> LazyCompilationOrchestrator::CompileLazy(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
+ int call_offset, int exported_func_index, bool patch_caller) {
+ struct NonCompiledFunction {
+ int offset;
+ int func_index;
+ };
+ std::vector<NonCompiledFunction> non_compiled_functions;
+ int func_to_return_idx = exported_func_index;
+ wasm::Decoder decoder(nullptr, nullptr);
+ bool is_js_to_wasm = caller->kind() == Code::JS_TO_WASM_FUNCTION;
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+
+ if (is_js_to_wasm) {
+ non_compiled_functions.push_back({0, exported_func_index});
+ } else if (patch_caller) {
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString* module_bytes = compiled_module->module_bytes();
+ SourcePositionTableIterator source_pos_iterator(
+ caller->source_position_table());
+ DCHECK_EQ(2, caller->deoptimization_data()->length());
+ int caller_func_index =
+ Smi::cast(caller->deoptimization_data()->get(1))->value();
+ const byte* func_bytes =
+ module_bytes->GetChars() + compiled_module->module()
+ ->functions[caller_func_index]
+ .code_start_offset;
+ for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ Code* callee =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
+ // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
+ // (depending on the bool) against limits of T and then static_casts.
+ size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
+ DCHECK_GE(kMaxInt, offset_l);
+ int offset = static_cast<int>(offset_l);
+ int byte_pos =
+ AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+ int called_func_index =
+ ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
+ non_compiled_functions.push_back({offset, called_func_index});
+ // Call offset one instruction after the call. Remember the last called
+ // function before that offset.
+ if (offset < call_offset) func_to_return_idx = called_func_index;
+ }
+ }
+
+ // TODO(clemensh): compile all functions in non_compiled_functions in
+ // background, wait for func_to_return_idx.
+ if (!CompileFunction(isolate, instance, func_to_return_idx)) {
+ return {};
+ }
+
+ if (is_js_to_wasm || patch_caller) {
+ DisallowHeapAllocation no_gc;
+ // Now patch the code object with all functions which are now compiled.
+ int idx = 0;
+ for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ Code* callee =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
+ DCHECK_GT(non_compiled_functions.size(), idx);
+ int called_func_index = non_compiled_functions[idx].func_index;
+ // Check that the callee agrees with our assumed called_func_index.
+ DCHECK_IMPLIES(
+ callee->deoptimization_data()->length() > 0,
+ Smi::cast(callee->deoptimization_data()->get(1))->value() ==
+ called_func_index);
+ if (is_js_to_wasm) {
+ DCHECK_EQ(func_to_return_idx, called_func_index);
+ } else {
+ DCHECK_EQ(non_compiled_functions[idx].offset,
+ it.rinfo()->pc() - caller->instruction_start());
+ }
+ ++idx;
+ Handle<Code> callee_compiled(
+ Code::cast(compiled_module->code_table()->get(called_func_index)));
+ if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
+ DCHECK_NE(func_to_return_idx, called_func_index);
+ continue;
+ }
+ DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
+ it.rinfo()->set_target_address(isolate,
+ callee_compiled->instruction_start());
+ }
+ DCHECK_EQ(non_compiled_functions.size(), idx);
+ }
+
+ Code* ret =
+ Code::cast(compiled_module->code_table()->get(func_to_return_idx));
+ DCHECK_EQ(Code::WASM_FUNCTION, ret->kind());
+ return handle(ret, isolate);
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 1aaf9a4e96..98f498b79c 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -137,6 +137,14 @@ struct WasmExport {
};
enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+
+inline bool IsWasm(ModuleOrigin Origin) {
+ return Origin == ModuleOrigin::kWasmOrigin;
+}
+inline bool IsAsmJs(ModuleOrigin Origin) {
+ return Origin == ModuleOrigin::kAsmJsOrigin;
+}
+
struct ModuleWireBytes;
// Static representation of a module.
@@ -154,7 +162,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
// the fact that we index on uint32_t, so we may technically not be
// able to represent some start_function_index -es.
int start_function_index = -1; // start function, if any
- ModuleOrigin origin = kWasmOrigin; // origin of the module
std::vector<WasmGlobal> globals; // globals in this module.
uint32_t globals_size = 0; // size of globals table.
@@ -182,6 +189,15 @@ struct V8_EXPORT_PRIVATE WasmModule {
~WasmModule() {
if (owned_zone) delete owned_zone;
}
+
+ ModuleOrigin get_origin() const { return origin_; }
+ void set_origin(ModuleOrigin new_value) { origin_ = new_value; }
+ bool is_wasm() const { return wasm::IsWasm(origin_); }
+ bool is_asm_js() const { return wasm::IsAsmJs(origin_); }
+
+ private:
+ // TODO(kschimpf) - Encapsulate more fields.
+ ModuleOrigin origin_ = kWasmOrigin; // origin of the module
};
typedef Managed<WasmModule> WasmModuleWrapper;
@@ -194,6 +210,7 @@ struct WasmInstance {
std::vector<Handle<FixedArray>> function_tables; // indirect function tables.
std::vector<Handle<FixedArray>>
signature_tables; // indirect signature tables.
+ // TODO(wasm): Remove this vector, since it is only used for testing.
std::vector<Handle<Code>> function_code; // code objects for each function.
// -- raw memory ------------------------------------------------------------
byte* mem_start = nullptr; // start of linear memory.
@@ -206,6 +223,22 @@ struct WasmInstance {
function_tables(m->function_tables.size()),
signature_tables(m->function_tables.size()),
function_code(m->functions.size()) {}
+
+ void ReopenHandles(Isolate* isolate) {
+ context = handle(*context, isolate);
+
+ for (auto& table : function_tables) {
+ table = handle(*table, isolate);
+ }
+
+ for (auto& table : signature_tables) {
+ table = handle(*table, isolate);
+ }
+
+ for (auto& code : function_code) {
+ code = handle(*code, isolate);
+ }
+ }
};
// Interface to the storage (wire bytes) of a wasm module.
@@ -271,11 +304,24 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
// minimal information about the globals, functions, and function tables.
struct V8_EXPORT_PRIVATE ModuleEnv {
ModuleEnv(const WasmModule* module, WasmInstance* instance)
- : module(module), instance(instance) {}
+ : module(module),
+ instance(instance),
+ function_tables(instance ? &instance->function_tables : nullptr),
+ signature_tables(instance ? &instance->signature_tables : nullptr) {}
+ ModuleEnv(const WasmModule* module,
+ std::vector<Handle<FixedArray>>* function_tables,
+ std::vector<Handle<FixedArray>>* signature_tables)
+ : module(module),
+ instance(nullptr),
+ function_tables(function_tables),
+ signature_tables(signature_tables) {}
const WasmModule* module;
WasmInstance* instance;
+ std::vector<Handle<FixedArray>>* function_tables;
+ std::vector<Handle<FixedArray>>* signature_tables;
+
bool IsValidGlobal(uint32_t index) const {
return module && index < module->globals.size();
}
@@ -305,8 +351,10 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
return &module->function_tables[index];
}
- bool asm_js() { return module->origin == kAsmJsOrigin; }
+ bool is_asm_js() const { return module->is_asm_js(); }
+ bool is_wasm() const { return module->is_wasm(); }
+ // Only used for testing.
Handle<Code> GetFunctionCode(uint32_t index) {
DCHECK_NOT_NULL(instance);
return instance->function_code[index];
@@ -343,8 +391,6 @@ struct WasmFunctionName {
WasmName name_;
};
-std::ostream& operator<<(std::ostream& os, const WasmModule& module);
-std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
// Get the debug info associated with the given wasm object.
@@ -352,9 +398,9 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
// Check whether the given object represents a WebAssembly.Instance instance.
-// This checks the number and type of internal fields, so it's not 100 percent
+// This checks the number and type of embedder fields, so it's not 100 percent
// secure. If it turns out that we need more complete checks, we could add a
-// special marker as internal field, which will definitely never occur anywhere
+// special marker as embedder field, which will definitely never occur anywhere
// else.
bool IsWasmInstance(Object* instance);
@@ -379,36 +425,21 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
Isolate* isolate, Handle<WasmModuleObject> module, Handle<String> name,
ErrorThrower* thrower);
-// Get the offset of the code of a function within a module.
-int GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
- int func_index);
-
// Assumed to be called with a code object associated to a wasm module instance.
// Intended to be called from runtime functions.
// Returns nullptr on failing to get owning instance.
WasmInstanceObject* GetOwningWasmInstance(Code* code);
-MaybeHandle<JSArrayBuffer> GetInstanceMemory(
- Isolate* isolate, Handle<WasmInstanceObject> instance);
-
-int32_t GetInstanceMemorySize(Isolate* isolate,
- Handle<WasmInstanceObject> instance);
-
-int32_t GrowInstanceMemory(Isolate* isolate,
- Handle<WasmInstanceObject> instance, uint32_t pages);
-
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate*, size_t size,
bool enable_guard_regions);
-int32_t GrowWebAssemblyMemory(Isolate* isolate,
- Handle<WasmMemoryObject> receiver,
- uint32_t pages);
-
-int32_t GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t pages);
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate*, void* backing_store,
+ size_t size, bool is_external,
+ bool enable_guard_regions);
void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> buffer);
+ Handle<JSArrayBuffer> buffer,
+ bool free_memory);
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, Handle<JSFunction> js_function);
@@ -446,6 +477,47 @@ V8_EXPORT_PRIVATE void AsyncCompileAndInstantiate(
Isolate* isolate, Handle<JSPromise> promise, const ModuleWireBytes& bytes,
MaybeHandle<JSReceiver> imports);
+#if V8_TARGET_ARCH_64_BIT
+const bool kGuardRegionsSupported = true;
+#else
+const bool kGuardRegionsSupported = false;
+#endif
+
+inline bool EnableGuardRegions() {
+ return FLAG_wasm_guard_pages && kGuardRegionsSupported;
+}
+
+void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
+ Handle<FixedArray> code_table);
+
+// Triggered by the WasmCompileLazy builtin.
+// Walks the stack (top three frames) to determine the wasm instance involved
+// and which function to compile.
+// Then triggers WasmCompiledModule::CompileLazy, taking care of correctly
+// patching the call site or indirect function tables.
+// Returns either the Code object that has been lazily compiled, or Illegal if
+// an error occured. In the latter case, a pending exception has been set, which
+// will be triggered when returning from the runtime function, i.e. the Illegal
+// builtin will never be called.
+Handle<Code> CompileLazy(Isolate* isolate);
+
+// This class orchestrates the lazy compilation of wasm functions. It is
+// triggered by the WasmCompileLazy builtin.
+// It contains the logic for compiling and specializing wasm functions, and
+// patching the calling wasm code.
+// Once we support concurrent lazy compilation, this class will contain the
+// logic to actually orchestrate parallel execution of wasm compilation jobs.
+// TODO(clemensh): Implement concurrent lazy compilation.
+class LazyCompilationOrchestrator {
+ bool CompileFunction(Isolate*, Handle<WasmInstanceObject>,
+ int func_index) WARN_UNUSED_RESULT;
+
+ public:
+ MaybeHandle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int call_offset,
+ int exported_func_index, bool patch_caller);
+};
+
namespace testing {
void ValidateInstancesChain(Isolate* isolate,
Handle<WasmModuleObject> module_obj,
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index d74bf0c97c..b83fd7ad4e 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -5,10 +5,13 @@
#include "src/wasm/wasm-objects.h"
#include "src/utils.h"
+#include "src/assembler-inl.h"
#include "src/base/iterator.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-specialization.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-text.h"
@@ -49,12 +52,12 @@ using namespace v8::internal::wasm;
type* Container::name() { return type::cast(getter(field)); }
#define DEFINE_OBJ_GETTER(Container, name, field, type) \
- DEFINE_GETTER0(GetInternalField, Container, name, field, type)
+ DEFINE_GETTER0(GetEmbedderField, Container, name, field, type)
#define DEFINE_OBJ_ACCESSORS(Container, name, field, type) \
- DEFINE_ACCESSORS0(GetInternalField, SetInternalField, Container, name, \
+ DEFINE_ACCESSORS0(GetEmbedderField, SetEmbedderField, Container, name, \
field, type)
#define DEFINE_OPTIONAL_OBJ_ACCESSORS(Container, name, field, type) \
- DEFINE_OPTIONAL_ACCESSORS0(GetInternalField, SetInternalField, Container, \
+ DEFINE_OPTIONAL_ACCESSORS0(GetEmbedderField, SetEmbedderField, Container, \
name, field, type)
#define DEFINE_ARR_GETTER(Container, name, field, type) \
DEFINE_GETTER0(get, Container, name, field, type)
@@ -67,30 +70,6 @@ using namespace v8::internal::wasm;
namespace {
-uint32_t SafeUint32(Object* value) {
- if (value->IsSmi()) {
- int32_t val = Smi::cast(value)->value();
- CHECK_GE(val, 0);
- return static_cast<uint32_t>(val);
- }
- DCHECK(value->IsHeapNumber());
- HeapNumber* num = HeapNumber::cast(value);
- CHECK_GE(num->value(), 0.0);
- CHECK_LE(num->value(), kMaxUInt32);
- return static_cast<uint32_t>(num->value());
-}
-
-int32_t SafeInt32(Object* value) {
- if (value->IsSmi()) {
- return Smi::cast(value)->value();
- }
- DCHECK(value->IsHeapNumber());
- HeapNumber* num = HeapNumber::cast(value);
- CHECK_GE(num->value(), Smi::kMinValue);
- CHECK_LE(num->value(), Smi::kMaxValue);
- return static_cast<int32_t>(num->value());
-}
-
// An iterator that returns first the module itself, then all modules linked via
// next, then all linked via prev.
class CompiledModulesIterator
@@ -218,10 +197,9 @@ bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- ModuleOrigin origin = compiled_module->module()->origin;
-
+ WasmModule* module = compiled_module->module();
Handle<JSObject> module_object;
- if (origin == ModuleOrigin::kWasmOrigin) {
+ if (module->is_wasm()) {
Handle<JSFunction> module_cons(
isolate->native_context()->wasm_module_constructor());
module_object = isolate->factory()->NewJSObject(module_cons);
@@ -229,13 +207,13 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Object::SetProperty(module_object, module_sym, module_object, STRICT)
.Check();
} else {
- DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
+ DCHECK(module->is_asm_js());
Handle<Map> map = isolate->factory()->NewMap(
JS_OBJECT_TYPE,
JSObject::kHeaderSize + WasmModuleObject::kFieldCount * kPointerSize);
module_object = isolate->factory()->NewJSObjectFromMap(map, TENURED);
}
- module_object->SetInternalField(WasmModuleObject::kCompiledModule,
+ module_object->SetEmbedderField(WasmModuleObject::kCompiledModule,
*compiled_module);
Handle<WeakCell> link_to_module =
isolate->factory()->NewWeakCell(module_object);
@@ -251,7 +229,7 @@ WasmModuleObject* WasmModuleObject::cast(Object* object) {
bool WasmModuleObject::IsWasmModuleObject(Object* object) {
return object->IsJSObject() &&
- JSObject::cast(object)->GetInternalFieldCount() == kFieldCount;
+ JSObject::cast(object)->GetEmbedderFieldCount() == kFieldCount;
}
DEFINE_OBJ_GETTER(WasmModuleObject, compiled_module, kCompiledModule,
@@ -263,19 +241,19 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor());
Handle<JSObject> table_obj = isolate->factory()->NewJSObject(table_ctor);
- table_obj->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+ table_obj->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
*js_functions = isolate->factory()->NewFixedArray(initial);
Object* null = isolate->heap()->null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
(*js_functions)->set(i, null);
}
- table_obj->SetInternalField(kFunctions, *(*js_functions));
+ table_obj->SetEmbedderField(kFunctions, *(*js_functions));
Handle<Object> max = isolate->factory()->NewNumber(maximum);
- table_obj->SetInternalField(kMaximum, *max);
+ table_obj->SetEmbedderField(kMaximum, *max);
Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
- table_obj->SetInternalField(kDispatchTables, *dispatch_tables);
+ table_obj->SetEmbedderField(kDispatchTables, *dispatch_tables);
Handle<Symbol> table_sym(isolate->native_context()->wasm_table_sym());
Object::SetProperty(table_obj, table_sym, table_obj, STRICT).Check();
return Handle<WasmTableObject>::cast(table_obj);
@@ -288,7 +266,7 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
Handle<WasmInstanceObject> instance, int table_index,
Handle<FixedArray> function_table, Handle<FixedArray> signature_table) {
Handle<FixedArray> dispatch_tables(
- FixedArray::cast(table_obj->GetInternalField(kDispatchTables)), isolate);
+ FixedArray::cast(table_obj->GetEmbedderField(kDispatchTables)), isolate);
DCHECK_EQ(0, dispatch_tables->length() % 4);
if (instance.is_null()) return dispatch_tables;
@@ -304,7 +282,7 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
new_dispatch_tables->set(dispatch_tables->length() + 2, *function_table);
new_dispatch_tables->set(dispatch_tables->length() + 3, *signature_table);
- table_obj->SetInternalField(WasmTableObject::kDispatchTables,
+ table_obj->SetEmbedderField(WasmTableObject::kDispatchTables,
*new_dispatch_tables);
return new_dispatch_tables;
@@ -315,11 +293,11 @@ DEFINE_OBJ_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
uint32_t WasmTableObject::current_length() { return functions()->length(); }
bool WasmTableObject::has_maximum_length() {
- return GetInternalField(kMaximum)->Number() >= 0;
+ return GetEmbedderField(kMaximum)->Number() >= 0;
}
int64_t WasmTableObject::maximum_length() {
- return static_cast<int64_t>(GetInternalField(kMaximum)->Number());
+ return static_cast<int64_t>(GetEmbedderField(kMaximum)->Number());
}
WasmTableObject* WasmTableObject::cast(Object* object) {
@@ -335,6 +313,67 @@ void WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
table->functions()->length(), count);
}
+namespace {
+
+Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> old_buffer,
+ uint32_t pages, uint32_t max_pages) {
+ Address old_mem_start = nullptr;
+ uint32_t old_size = 0;
+ if (!old_buffer.is_null()) {
+ DCHECK(old_buffer->byte_length()->IsNumber());
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ old_size = old_buffer->byte_length()->Number();
+ }
+ DCHECK_GE(std::numeric_limits<uint32_t>::max(),
+ old_size + pages * WasmModule::kPageSize);
+ uint32_t new_size = old_size + pages * WasmModule::kPageSize;
+ if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
+ FLAG_wasm_max_mem_pages * WasmModule::kPageSize < new_size) {
+ return Handle<JSArrayBuffer>::null();
+ }
+
+ // TODO(gdeepti): Change the protection here instead of allocating a new
+ // buffer before guard regions are turned on, see issue #5886.
+ const bool enable_guard_regions =
+ (old_buffer.is_null() && EnableGuardRegions()) ||
+ (!old_buffer.is_null() && old_buffer->has_guard_region());
+ Handle<JSArrayBuffer> new_buffer =
+ NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null()) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ memcpy(new_mem_start, old_mem_start, old_size);
+ return new_buffer;
+}
+
+// May GC, because SetSpecializationMemInfoFrom may GC
+void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ Handle<JSArrayBuffer> buffer) {
+ instance->set_memory_buffer(*buffer);
+ WasmCompiledModule::SetSpecializationMemInfoFrom(
+ isolate->factory(), handle(instance->compiled_module()), buffer);
+ if (instance->has_debug_info()) {
+ instance->debug_info()->UpdateMemory(*buffer);
+ }
+}
+
+void UncheckedUpdateInstanceMemory(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Address old_mem_start, uint32_t old_size) {
+ DCHECK(instance->has_memory_buffer());
+ Handle<JSArrayBuffer> mem_buffer(instance->memory_buffer());
+ uint32_t new_size = mem_buffer->byte_length()->Number();
+ Address new_mem_start = static_cast<Address>(mem_buffer->backing_store());
+ DCHECK_NOT_NULL(new_mem_start);
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ code_specialization.RelocateMemoryReferences(old_mem_start, old_size,
+ new_mem_start, new_size);
+ code_specialization.ApplyToWholeInstance(*instance);
+}
+
+} // namespace
+
Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
int32_t maximum) {
@@ -342,11 +381,11 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
isolate->native_context()->wasm_memory_constructor());
Handle<JSObject> memory_obj =
isolate->factory()->NewJSObject(memory_ctor, TENURED);
- memory_obj->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+ memory_obj->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
- memory_obj->SetInternalField(kArrayBuffer, *buffer);
+ memory_obj->SetEmbedderField(kArrayBuffer, *buffer);
Handle<Object> max = isolate->factory()->NewNumber(maximum);
- memory_obj->SetInternalField(kMaximum, *max);
+ memory_obj->SetEmbedderField(kMaximum, *max);
Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
Object::SetProperty(memory_obj, memory_sym, memory_obj, STRICT).Check();
return Handle<WasmMemoryObject>::cast(memory_obj);
@@ -357,15 +396,17 @@ DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, instances_link, kInstancesLink,
WasmInstanceWrapper)
uint32_t WasmMemoryObject::current_pages() {
- return SafeUint32(buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+ uint32_t byte_length;
+ CHECK(buffer()->byte_length()->ToUint32(&byte_length));
+ return byte_length / wasm::WasmModule::kPageSize;
}
bool WasmMemoryObject::has_maximum_pages() {
- return GetInternalField(kMaximum)->Number() >= 0;
+ return GetEmbedderField(kMaximum)->Number() >= 0;
}
int32_t WasmMemoryObject::maximum_pages() {
- return static_cast<int32_t>(GetInternalField(kMaximum)->Number());
+ return static_cast<int32_t>(GetEmbedderField(kMaximum)->Number());
}
WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
@@ -390,7 +431,73 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
void WasmMemoryObject::ResetInstancesLink(Isolate* isolate) {
Handle<Object> undefined = isolate->factory()->undefined_value();
- SetInternalField(kInstancesLink, *undefined);
+ SetEmbedderField(kInstancesLink, *undefined);
+}
+
+// static
+int32_t WasmMemoryObject::Grow(Isolate* isolate,
+ Handle<WasmMemoryObject> memory_object,
+ uint32_t pages) {
+ Handle<JSArrayBuffer> old_buffer(memory_object->buffer(), isolate);
+ uint32_t old_size = 0;
+ Address old_mem_start = nullptr;
+ // Force byte_length to 0, if byte_length fails IsNumber() check.
+ if (!old_buffer.is_null()) {
+ old_size = old_buffer->byte_length()->Number();
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ }
+ Handle<JSArrayBuffer> new_buffer;
+ // Return current size if grow by 0.
+ if (pages == 0) {
+ // Even for pages == 0, we need to attach a new JSArrayBuffer with the same
+ // backing store and neuter the old one to be spec compliant.
+ if (!old_buffer.is_null() && old_size != 0) {
+ new_buffer = SetupArrayBuffer(isolate, old_buffer->backing_store(),
+ old_size, old_buffer->is_external(),
+ old_buffer->has_guard_region());
+ memory_object->set_buffer(*new_buffer);
+ }
+ DCHECK_EQ(0, old_size % WasmModule::kPageSize);
+ return old_size / WasmModule::kPageSize;
+ }
+ if (!memory_object->has_instances_link()) {
+ // Memory object does not have an instance associated with it, just grow
+ uint32_t max_pages;
+ if (memory_object->has_maximum_pages()) {
+ max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
+ if (FLAG_wasm_max_mem_pages < max_pages) return -1;
+ } else {
+ max_pages = FLAG_wasm_max_mem_pages;
+ }
+ new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, max_pages);
+ if (new_buffer.is_null()) return -1;
+ } else {
+ Handle<WasmInstanceWrapper> instance_wrapper(
+ memory_object->instances_link());
+ DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+ DCHECK(instance_wrapper->has_instance());
+ Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
+ DCHECK(IsWasmInstance(*instance));
+ uint32_t max_pages = instance->GetMaxMemoryPages();
+
+ // Grow memory object buffer and update instances associated with it.
+ new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, max_pages);
+ if (new_buffer.is_null()) return -1;
+ DCHECK(!instance_wrapper->has_previous());
+ SetInstanceMemory(isolate, instance, new_buffer);
+ UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+ while (instance_wrapper->has_next()) {
+ instance_wrapper = instance_wrapper->next_wrapper();
+ DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+ Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
+ DCHECK(IsWasmInstance(*instance));
+ SetInstanceMemory(isolate, instance, new_buffer);
+ UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+ }
+ }
+ memory_object->set_buffer(*new_buffer);
+ DCHECK_EQ(0, old_size % WasmModule::kPageSize);
+ return old_size / WasmModule::kPageSize;
}
DEFINE_OBJ_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
@@ -430,14 +537,14 @@ bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
JSObject* obj = JSObject::cast(object);
Isolate* isolate = obj->GetIsolate();
- if (obj->GetInternalFieldCount() != kFieldCount) {
+ if (obj->GetEmbedderFieldCount() != kFieldCount) {
return false;
}
- Object* mem = obj->GetInternalField(kMemoryArrayBuffer);
+ Object* mem = obj->GetEmbedderField(kMemoryArrayBuffer);
if (!(mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) ||
!WasmCompiledModule::IsWasmCompiledModule(
- obj->GetInternalField(kCompiledModule))) {
+ obj->GetEmbedderField(kCompiledModule))) {
return false;
}
@@ -451,7 +558,7 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
isolate->native_context()->wasm_instance_constructor());
Handle<JSObject> instance_object =
isolate->factory()->NewJSObject(instance_cons, TENURED);
- instance_object->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+ instance_object->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
Handle<Symbol> instance_sym(isolate->native_context()->wasm_instance_sym());
Object::SetProperty(instance_object, instance_sym, instance_object, STRICT)
@@ -459,20 +566,75 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
Handle<WasmInstanceObject> instance(
reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
- instance->SetInternalField(kCompiledModule, *compiled_module);
- instance->SetInternalField(kMemoryObject, isolate->heap()->undefined_value());
+ instance->SetEmbedderField(kCompiledModule, *compiled_module);
+ instance->SetEmbedderField(kMemoryObject, isolate->heap()->undefined_value());
Handle<WasmInstanceWrapper> instance_wrapper =
WasmInstanceWrapper::New(isolate, instance);
- instance->SetInternalField(kWasmMemInstanceWrapper, *instance_wrapper);
+ instance->SetEmbedderField(kWasmMemInstanceWrapper, *instance_wrapper);
return instance;
}
+int32_t WasmInstanceObject::GetMemorySize() {
+ if (!has_memory_buffer()) return 0;
+ uint32_t bytes = memory_buffer()->byte_length()->Number();
+ DCHECK_EQ(0, bytes % WasmModule::kPageSize);
+ return bytes / WasmModule::kPageSize;
+}
+
+int32_t WasmInstanceObject::GrowMemory(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t pages) {
+ if (pages == 0) return instance->GetMemorySize();
+ if (instance->has_memory_object()) {
+ return WasmMemoryObject::Grow(
+ isolate, handle(instance->memory_object(), isolate), pages);
+ }
+
+ // No other instances to grow, grow just the one.
+ uint32_t old_size = 0;
+ Address old_mem_start = nullptr;
+ Handle<JSArrayBuffer> old_buffer;
+ if (instance->has_memory_buffer()) {
+ old_buffer = handle(instance->memory_buffer(), isolate);
+ old_size = old_buffer->byte_length()->Number();
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ }
+ uint32_t max_pages = instance->GetMaxMemoryPages();
+ Handle<JSArrayBuffer> buffer =
+ GrowMemoryBuffer(isolate, old_buffer, pages, max_pages);
+ if (buffer.is_null()) return -1;
+ SetInstanceMemory(isolate, instance, buffer);
+ UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+ DCHECK_EQ(0, old_size % WasmModule::kPageSize);
+ return old_size / WasmModule::kPageSize;
+}
+
+uint32_t WasmInstanceObject::GetMaxMemoryPages() {
+ if (has_memory_object()) {
+ if (memory_object()->has_maximum_pages()) {
+ uint32_t maximum =
+ static_cast<uint32_t>(memory_object()->maximum_pages());
+ if (maximum < FLAG_wasm_max_mem_pages) return maximum;
+ }
+ }
+ uint32_t compiled_max_pages = compiled_module()->module()->max_mem_pages;
+ Isolate* isolate = GetIsolate();
+ auto* histogram = (compiled_module()->module()->is_wasm()
+ ? isolate->counters()->wasm_wasm_max_mem_pages_count()
+ : isolate->counters()->wasm_asm_max_mem_pages_count());
+ histogram->AddSample(compiled_max_pages);
+ if (compiled_max_pages != 0) return compiled_max_pages;
+ return FLAG_wasm_max_mem_pages;
+}
+
WasmInstanceObject* WasmExportedFunction::instance() {
- return WasmInstanceObject::cast(GetInternalField(kInstance));
+ return WasmInstanceObject::cast(GetEmbedderField(kInstance));
}
int WasmExportedFunction::function_index() {
- return SafeInt32(GetInternalField(kIndex));
+ int32_t func_index;
+ CHECK(GetEmbedderField(kIndex)->ToInt32(&func_index));
+ return func_index;
}
WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
@@ -492,8 +654,8 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
EmbeddedVector<char, 16> buffer;
int length = SNPrintF(buffer, "%d", func_index);
name = isolate->factory()
- ->NewStringFromAscii(
- Vector<const char>::cast(buffer.SubVector(0, length)))
+ ->NewStringFromOneByte(
+ Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
} else {
name = maybe_name.ToHandleChecked();
@@ -505,12 +667,12 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
shared->set_internal_formal_parameter_count(arity);
Handle<JSFunction> function = isolate->factory()->NewFunction(
isolate->wasm_function_map(), name, export_wrapper);
- function->SetInternalField(kWrapperTracerHeader, Smi::kZero);
+ function->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
function->set_shared(*shared);
- function->SetInternalField(kInstance, *instance);
- function->SetInternalField(kIndex, Smi::FromInt(func_index));
+ function->SetEmbedderField(kInstance, *instance);
+ function->SetEmbedderField(kIndex, Smi::FromInt(func_index));
return Handle<WasmExportedFunction>::cast(function);
}
@@ -555,6 +717,8 @@ DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, asm_js_offset_table,
kAsmJsOffsetTable, ByteArray);
DEFINE_OPTIONAL_ARR_GETTER(WasmSharedModuleData, breakpoint_infos,
kBreakPointInfos, FixedArray);
+DEFINE_OPTIONAL_ARR_GETTER(WasmSharedModuleData, lazy_compilation_orchestrator,
+ kLazyCompilationOrchestrator, Foreign);
Handle<WasmSharedModuleData> WasmSharedModuleData::New(
Isolate* isolate, Handle<Foreign> module_wrapper,
@@ -579,8 +743,8 @@ Handle<WasmSharedModuleData> WasmSharedModuleData::New(
}
bool WasmSharedModuleData::is_asm_js() {
- bool asm_js = module()->origin == wasm::ModuleOrigin::kAsmJsOrigin;
- DCHECK_EQ(asm_js, script()->type() == Script::TYPE_NORMAL);
+ bool asm_js = module()->is_asm_js();
+ DCHECK_EQ(asm_js, script()->IsUserJavaScript());
DCHECK_EQ(asm_js, has_asm_js_offset_table());
return asm_js;
}
@@ -744,20 +908,149 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
}
}
+void WasmSharedModuleData::PrepareForLazyCompilation(
+ Handle<WasmSharedModuleData> shared) {
+ if (shared->has_lazy_compilation_orchestrator()) return;
+ Isolate* isolate = shared->GetIsolate();
+ LazyCompilationOrchestrator* orch = new LazyCompilationOrchestrator();
+ Handle<Managed<LazyCompilationOrchestrator>> orch_handle =
+ Managed<LazyCompilationOrchestrator>::New(isolate, orch);
+ shared->set(WasmSharedModuleData::kLazyCompilationOrchestrator, *orch_handle);
+}
+
Handle<WasmCompiledModule> WasmCompiledModule::New(
- Isolate* isolate, Handle<WasmSharedModuleData> shared) {
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
+ Handle<FixedArray> code_table,
+ MaybeHandle<FixedArray> maybe_empty_function_tables,
+ MaybeHandle<FixedArray> maybe_signature_tables) {
Handle<FixedArray> ret =
isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
// WasmCompiledModule::cast would fail since fields are not set yet.
Handle<WasmCompiledModule> compiled_module(
reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
compiled_module->InitId();
- compiled_module->set_num_imported_functions(0);
compiled_module->set_shared(shared);
compiled_module->set_native_context(isolate->native_context());
+ compiled_module->set_code_table(code_table);
+ int function_table_count =
+ static_cast<int>(shared->module()->function_tables.size());
+ if (function_table_count > 0) {
+ compiled_module->set_signature_tables(
+ maybe_signature_tables.ToHandleChecked());
+ compiled_module->set_empty_function_tables(
+ maybe_empty_function_tables.ToHandleChecked());
+ compiled_module->set_function_tables(
+ maybe_empty_function_tables.ToHandleChecked());
+ }
+ // TODO(mtrofin): we copy these because the order of finalization isn't
+ // reliable, and we need these at Reset (which is called at
+ // finalization). If the order were reliable, and top-down, we could instead
+ // just get them from shared().
+ compiled_module->set_min_mem_pages(shared->module()->min_mem_pages);
+ compiled_module->set_num_imported_functions(
+ shared->module()->num_imported_functions);
return compiled_module;
}
+Handle<WasmCompiledModule> WasmCompiledModule::Clone(
+ Isolate* isolate, Handle<WasmCompiledModule> module) {
+ Handle<FixedArray> code_copy =
+ isolate->factory()->CopyFixedArray(module->code_table());
+ Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
+ isolate->factory()->CopyFixedArray(module));
+ ret->InitId();
+ ret->set_code_table(code_copy);
+ ret->reset_weak_owning_instance();
+ ret->reset_weak_next_instance();
+ ret->reset_weak_prev_instance();
+ ret->reset_weak_exported_functions();
+ if (ret->has_embedded_mem_start()) {
+ WasmCompiledModule::recreate_embedded_mem_start(ret, isolate->factory(),
+ ret->embedded_mem_start());
+ }
+ if (ret->has_globals_start()) {
+ WasmCompiledModule::recreate_globals_start(ret, isolate->factory(),
+ ret->globals_start());
+ }
+ if (ret->has_embedded_mem_size()) {
+ WasmCompiledModule::recreate_embedded_mem_size(ret, isolate->factory(),
+ ret->embedded_mem_size());
+ }
+ return ret;
+}
+
+void WasmCompiledModule::Reset(Isolate* isolate,
+ WasmCompiledModule* compiled_module) {
+ DisallowHeapAllocation no_gc;
+ TRACE("Resetting %d\n", compiled_module->instance_id());
+ Object* undefined = *isolate->factory()->undefined_value();
+ Object* fct_obj = compiled_module->ptr_to_code_table();
+ if (fct_obj != nullptr && fct_obj != undefined) {
+ uint32_t old_mem_size = compiled_module->mem_size();
+ uint32_t default_mem_size = compiled_module->default_mem_size();
+ Address old_mem_start = compiled_module->GetEmbeddedMemStartOrNull();
+
+ // Patch code to update memory references, global references, and function
+ // table references.
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+
+ if (old_mem_size > 0 && old_mem_start != nullptr) {
+ code_specialization.RelocateMemoryReferences(old_mem_start, old_mem_size,
+ nullptr, default_mem_size);
+ }
+
+ if (compiled_module->has_globals_start()) {
+ Address globals_start =
+ reinterpret_cast<Address>(compiled_module->globals_start());
+ code_specialization.RelocateGlobals(globals_start, nullptr);
+ compiled_module->set_globals_start(0);
+ }
+
+ // Reset function tables.
+ if (compiled_module->has_function_tables()) {
+ FixedArray* function_tables = compiled_module->ptr_to_function_tables();
+ FixedArray* empty_function_tables =
+ compiled_module->ptr_to_empty_function_tables();
+ if (function_tables != empty_function_tables) {
+ DCHECK_EQ(function_tables->length(), empty_function_tables->length());
+ for (int i = 0, e = function_tables->length(); i < e; ++i) {
+ code_specialization.RelocateObject(
+ handle(function_tables->get(i), isolate),
+ handle(empty_function_tables->get(i), isolate));
+ }
+ compiled_module->set_ptr_to_function_tables(empty_function_tables);
+ }
+ }
+
+ FixedArray* functions = FixedArray::cast(fct_obj);
+ for (int i = compiled_module->num_imported_functions(),
+ end = functions->length();
+ i < end; ++i) {
+ Code* code = Code::cast(functions->get(i));
+ // Skip lazy compile stubs.
+ if (code->builtin_index() == Builtins::kWasmCompileLazy) continue;
+ if (code->kind() != Code::WASM_FUNCTION) {
+ // From here on, there should only be wrappers for exported functions.
+ for (; i < end; ++i) {
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
+ Code::cast(functions->get(i))->kind());
+ }
+ break;
+ }
+ bool changed =
+ code_specialization.ApplyToWasmCode(code, SKIP_ICACHE_FLUSH);
+ // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
+ // above.
+ if (changed) {
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
+ }
+ }
+ compiled_module->ResetSpecializationMemInfoIfNeeded();
+}
+
void WasmCompiledModule::InitId() {
#if DEBUG
static uint32_t instance_id_counter = 0;
@@ -766,6 +1059,45 @@ void WasmCompiledModule::InitId() {
#endif
}
+void WasmCompiledModule::ResetSpecializationMemInfoIfNeeded() {
+ DisallowHeapAllocation no_gc;
+ if (has_embedded_mem_start()) {
+ set_embedded_mem_size(0);
+ set_embedded_mem_start(0);
+ }
+}
+
+void WasmCompiledModule::SetSpecializationMemInfoFrom(
+ Factory* factory, Handle<WasmCompiledModule> compiled_module,
+ Handle<JSArrayBuffer> buffer) {
+ DCHECK(!buffer.is_null());
+ size_t start_address = reinterpret_cast<size_t>(buffer->backing_store());
+ uint32_t size = static_cast<uint32_t>(buffer->byte_length()->Number());
+ if (!compiled_module->has_embedded_mem_start()) {
+ DCHECK(!compiled_module->has_embedded_mem_size());
+ WasmCompiledModule::recreate_embedded_mem_start(compiled_module, factory,
+ start_address);
+ WasmCompiledModule::recreate_embedded_mem_size(compiled_module, factory,
+ size);
+ } else {
+ compiled_module->set_embedded_mem_start(start_address);
+ compiled_module->set_embedded_mem_size(size);
+ }
+}
+
+void WasmCompiledModule::SetGlobalsStartAddressFrom(
+ Factory* factory, Handle<WasmCompiledModule> compiled_module,
+ Handle<JSArrayBuffer> buffer) {
+ DCHECK(!buffer.is_null());
+ size_t start_address = reinterpret_cast<size_t>(buffer->backing_store());
+ if (!compiled_module->has_globals_start()) {
+ WasmCompiledModule::recreate_globals_start(compiled_module, factory,
+ start_address);
+ } else {
+ compiled_module->set_globals_start(start_address);
+ }
+}
+
MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
uint32_t offset, uint32_t size) {
@@ -774,9 +1106,10 @@ MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
isolate);
DCHECK_GE(module_bytes->length(), offset);
DCHECK_GE(module_bytes->length() - offset, size);
- Address raw = module_bytes->GetCharsAddress() + offset;
- if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
- return {}; // UTF8 decoding error for name.
+ // UTF8 validation happens at decode time.
+ DCHECK(unibrow::Utf8::Validate(
+ reinterpret_cast<const byte*>(module_bytes->GetCharsAddress() + offset),
+ size));
DCHECK_GE(kMaxInt, offset);
DCHECK_GE(kMaxInt, size);
return isolate->factory()->NewStringFromUtf8SubString(
@@ -793,13 +1126,25 @@ bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
Object* obj = arr->get(kID_##NAME); \
if (!(TYPE_CHECK)) return false; \
} while (false);
+// We're OK with undefined, generally, because maybe we don't
+// have a value for that item. For example, we may not have a
+// memory, or globals.
+// We're not OK with the const numbers being undefined. They are
+// expected to be initialized at construction.
#define WCM_CHECK_OBJECT(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
+#define WCM_CHECK_CONST_OBJECT(TYPE, NAME) \
+ WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
#define WCM_CHECK_WASM_OBJECT(TYPE, NAME) \
WCM_CHECK_TYPE(NAME, TYPE::Is##TYPE(obj))
#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT(WeakCell, NAME)
-#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) WCM_CHECK_TYPE(NAME, obj->IsSmi())
+#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
+ WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsSmi())
#define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
+#define WCM_CHECK_SMALL_CONST_NUMBER(TYPE, NAME) \
+ WCM_CHECK_TYPE(NAME, obj->IsSmi())
+#define WCM_CHECK_LARGE_NUMBER(TYPE, NAME) \
+ WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsMutableHeapNumber())
WCM_PROPERTY_TABLE(WCM_CHECK)
#undef WCM_CHECK
@@ -831,11 +1176,13 @@ void WasmCompiledModule::ReinitializeAfterDeserialization(
isolate);
DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
+ WasmCompiledModule::Reset(isolate, *compiled_module);
DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
uint32_t WasmCompiledModule::mem_size() const {
- return has_memory() ? memory()->byte_length()->Number() : default_mem_size();
+ DCHECK(has_embedded_mem_size() == has_embedded_mem_start());
+ return has_embedded_mem_start() ? embedded_mem_size() : default_mem_size();
}
uint32_t WasmCompiledModule::default_mem_size() const {
@@ -847,6 +1194,8 @@ MaybeHandle<String> WasmCompiledModule::GetFunctionNameOrNull(
uint32_t func_index) {
DCHECK_LT(func_index, compiled_module->module()->functions.size());
WasmFunction& function = compiled_module->module()->functions[func_index];
+ DCHECK_IMPLIES(function.name_offset == 0, function.name_length == 0);
+ if (!function.name_offset) return {};
return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
isolate, compiled_module, function.name_offset, function.name_length);
}
@@ -1049,7 +1398,7 @@ v8::debug::WasmDisassembly WasmCompiledModule::DisassembleFunction(
bool WasmCompiledModule::GetPossibleBreakpoints(
const v8::debug::Location& start, const v8::debug::Location& end,
- std::vector<v8::debug::Location>* locations) {
+ std::vector<v8::debug::BreakLocation>* locations) {
DisallowHeapAllocation no_gc;
std::vector<WasmFunction>& functions = module()->functions;
@@ -1112,7 +1461,7 @@ bool WasmCompiledModule::GetPossibleBreakpoints(
break;
}
if (total_offset < start_offset) continue;
- locations->push_back(v8::debug::Location(func_idx, offset));
+ locations->emplace_back(func_idx, offset, debug::kCommonBreakLocation);
}
}
return true;
@@ -1170,6 +1519,18 @@ MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
}
+MaybeHandle<Code> WasmCompiledModule::CompileLazy(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
+ int offset, int func_index, bool patch_caller) {
+ isolate->set_context(*instance->compiled_module()->native_context());
+ Object* orch_obj =
+ instance->compiled_module()->shared()->lazy_compilation_orchestrator();
+ LazyCompilationOrchestrator* orch =
+ Managed<LazyCompilationOrchestrator>::cast(orch_obj)->get();
+ return orch->CompileLazy(isolate, instance, caller, offset, func_index,
+ patch_caller);
+}
+
Handle<WasmInstanceWrapper> WasmInstanceWrapper::New(
Isolate* isolate, Handle<WasmInstanceObject> instance) {
Handle<FixedArray> array =
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index b198cf2755..21299878b3 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -45,7 +45,7 @@ class WasmInstanceWrapper;
class WasmModuleObject : public JSObject {
public:
// If a second field is added, we need a kWrapperTracerHeader field as well.
- // TODO(titzer): add the brand as an internal field instead of a property.
+ // TODO(titzer): add the brand as an embedder field instead of a property.
enum Fields { kCompiledModule, kFieldCount };
DECLARE_CASTS(WasmModuleObject);
@@ -60,7 +60,7 @@ class WasmModuleObject : public JSObject {
class WasmTableObject : public JSObject {
public:
// The 0-th field is used by the Blink Wrapper Tracer.
- // TODO(titzer): add the brand as an internal field instead of a property.
+ // TODO(titzer): add the brand as an embedder field instead of a property.
enum Fields {
kWrapperTracerHeader,
kFunctions,
@@ -92,7 +92,7 @@ class WasmTableObject : public JSObject {
class WasmMemoryObject : public JSObject {
public:
// The 0-th field is used by the Blink Wrapper Tracer.
- // TODO(titzer): add the brand as an internal field instead of a property.
+ // TODO(titzer): add the brand as an embedder field instead of a property.
enum Fields : uint8_t {
kWrapperTracerHeader,
kArrayBuffer,
@@ -115,15 +115,14 @@ class WasmMemoryObject : public JSObject {
Handle<JSArrayBuffer> buffer,
int32_t maximum);
- static bool Grow(Isolate* isolate, Handle<WasmMemoryObject> memory,
- uint32_t count);
+ static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
};
// Representation of a WebAssembly.Instance JavaScript-level object.
class WasmInstanceObject : public JSObject {
public:
// The 0-th field is used by the Blink Wrapper Tracer.
- // TODO(titzer): add the brand as an internal field instead of a property.
+ // TODO(titzer): add the brand as an embedder field instead of a property.
enum Fields {
kWrapperTracerHeader,
kCompiledModule,
@@ -149,11 +148,16 @@ class WasmInstanceObject : public JSObject {
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
- static Handle<WasmDebugInfo> GetOrCreateDebugInfo(
- Handle<WasmInstanceObject> instance);
+ static Handle<WasmDebugInfo> GetOrCreateDebugInfo(Handle<WasmInstanceObject>);
- static Handle<WasmInstanceObject> New(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+ static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmCompiledModule>);
+
+ int32_t GetMemorySize();
+
+ static int32_t GrowMemory(Isolate*, Handle<WasmInstanceObject>,
+ uint32_t pages);
+
+ uint32_t GetMaxMemoryPages();
};
// Representation of an exported WASM function.
@@ -184,6 +188,7 @@ class WasmSharedModuleData : public FixedArray {
kScript,
kAsmJsOffsetTable,
kBreakPointInfos,
+ kLazyCompilationOrchestrator,
kFieldCount
};
@@ -212,8 +217,38 @@ class WasmSharedModuleData : public FixedArray {
static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
Handle<WasmInstanceObject>);
+
+ static void PrepareForLazyCompilation(Handle<WasmSharedModuleData>);
+
+ private:
+ DECLARE_OPTIONAL_GETTER(lazy_compilation_orchestrator, Foreign);
+ friend class WasmCompiledModule;
};
+// This represents the set of wasm compiled functions, together
+// with all the information necessary for re-specializing them.
+//
+// We specialize wasm functions to their instance by embedding:
+// - raw interior pointers into the backing store of the array buffer
+// used as memory of a particular WebAssembly.Instance object.
+// - bounds check limits, computed at compile time, relative to the
+// size of the memory.
+// - the objects representing the function tables and signature tables
+// - raw pointer to the globals buffer.
+//
+// Even without instantiating, we need values for all of these parameters.
+// We need to track these values to be able to create new instances and
+// to be able to serialize/deserialize.
+// The design decisions for how we track these values is not too immediate,
+// and it deserves a summary. The "tricky" ones are: memory, globals, and
+// the tables (signature and functions).
+// The first 2 (memory & globals) are embedded as raw pointers to native
+// buffers. All we need to track them is the start addresses and, in the
+// case of memory, the size. We model all of them as HeapNumbers, because
+// we need to store size_t values (for addresses), and potentially full
+// 32 bit unsigned values for the size. Smis are 31 bits.
+// For tables, we need to hold a reference to the JS Heap object, because
+// we embed them as objects, and they may move.
class WasmCompiledModule : public FixedArray {
public:
enum Fields { kFieldCount };
@@ -223,78 +258,116 @@ class WasmCompiledModule : public FixedArray {
return reinterpret_cast<WasmCompiledModule*>(fixed_array);
}
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK) \
- Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
- \
- MaybeHandle<TYPE> maybe_##NAME() const { \
- if (has_##NAME()) return NAME(); \
- return MaybeHandle<TYPE>(); \
- } \
- \
- TYPE* maybe_ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- if (!(TYPE_CHECK)) return nullptr; \
- return TYPE::cast(obj); \
- } \
- \
- TYPE* ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- DCHECK(TYPE_CHECK); \
- return TYPE::cast(obj); \
- } \
- \
- void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
- \
- void set_ptr_to_##NAME(TYPE* value) { set(ID, value); } \
- \
- bool has_##NAME() const { \
- Object* obj = get(ID); \
- return TYPE_CHECK; \
- } \
- \
- void reset_##NAME() { set_undefined(ID); }
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
+ public: \
+ Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
+ \
+ MaybeHandle<TYPE> maybe_##NAME() const { \
+ if (has_##NAME()) return NAME(); \
+ return MaybeHandle<TYPE>(); \
+ } \
+ \
+ TYPE* maybe_ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ if (!(TYPE_CHECK)) return nullptr; \
+ return TYPE::cast(obj); \
+ } \
+ \
+ TYPE* ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ DCHECK(TYPE_CHECK); \
+ return TYPE::cast(obj); \
+ } \
+ \
+ bool has_##NAME() const { \
+ Object* obj = get(ID); \
+ return TYPE_CHECK; \
+ } \
+ \
+ void reset_##NAME() { set_undefined(ID); } \
+ \
+ SETTER_MODIFIER: \
+ void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
+ void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }
#define WCM_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE())
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
+
+#define WCM_CONST_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), private)
#define WCM_WASM_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj))
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
-#define WCM_SMALL_NUMBER(TYPE, NAME) \
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
+ public: \
TYPE NAME() const { \
return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
} \
+ \
+ private: \
void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
-#define WCM_WEAK_LINK(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell()); \
- \
- Handle<TYPE> NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
+ public) \
+ \
+ public: \
+ Handle<TYPE> NAME() const { \
+ return handle(TYPE::cast(weak_##NAME()->value())); \
}
-#define CORE_WCM_PROPERTY_TABLE(MACRO) \
- MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
- MACRO(OBJECT, Context, native_context) \
- MACRO(SMALL_NUMBER, uint32_t, num_imported_functions) \
- MACRO(OBJECT, FixedArray, code_table) \
- MACRO(OBJECT, FixedArray, weak_exported_functions) \
- MACRO(OBJECT, FixedArray, function_tables) \
- MACRO(OBJECT, FixedArray, signature_tables) \
- MACRO(OBJECT, FixedArray, empty_function_tables) \
- MACRO(OBJECT, JSArrayBuffer, memory) \
- MACRO(SMALL_NUMBER, uint32_t, min_mem_pages) \
- MACRO(SMALL_NUMBER, uint32_t, max_mem_pages) \
- MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
- MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
- MACRO(WEAK_LINK, JSObject, owning_instance) \
+#define WCM_LARGE_NUMBER(TYPE, NAME) \
+ public: \
+ TYPE NAME() const { \
+ Object* value = get(kID_##NAME); \
+ DCHECK(value->IsMutableHeapNumber()); \
+ return static_cast<TYPE>(HeapNumber::cast(value)->value()); \
+ } \
+ \
+ void set_##NAME(TYPE value) { \
+ Object* number = get(kID_##NAME); \
+ DCHECK(number->IsMutableHeapNumber()); \
+ HeapNumber::cast(number)->set_value(static_cast<double>(value)); \
+ } \
+ \
+ static void recreate_##NAME(Handle<WasmCompiledModule> obj, \
+ Factory* factory, TYPE init_val) { \
+ Handle<HeapNumber> number = factory->NewHeapNumber( \
+ static_cast<double>(init_val), MutableMode::MUTABLE, TENURED); \
+ obj->set(kID_##NAME, *number); \
+ } \
+ bool has_##NAME() const { return get(kID_##NAME)->IsMutableHeapNumber(); }
+
+// Add values here if they are required for creating new instances or
+// for deserialization, and if they are serializable.
+// By default, instance values go to WasmInstanceObject, however, if
+// we embed the generated code with a value, then we track that value here.
+#define CORE_WCM_PROPERTY_TABLE(MACRO) \
+ MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
+ MACRO(OBJECT, Context, native_context) \
+ MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
+ MACRO(CONST_OBJECT, FixedArray, code_table) \
+ MACRO(OBJECT, FixedArray, weak_exported_functions) \
+ MACRO(OBJECT, FixedArray, function_tables) \
+ MACRO(OBJECT, FixedArray, signature_tables) \
+ MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
+ MACRO(LARGE_NUMBER, size_t, embedded_mem_start) \
+ MACRO(LARGE_NUMBER, size_t, globals_start) \
+ MACRO(LARGE_NUMBER, uint32_t, embedded_mem_size) \
+ MACRO(SMALL_CONST_NUMBER, uint32_t, min_mem_pages) \
+ MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
+ MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
+ MACRO(WEAK_LINK, JSObject, owning_instance) \
MACRO(WEAK_LINK, WasmModuleObject, wasm_module)
#if DEBUG
-#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
+#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_CONST_NUMBER, uint32_t, instance_id)
#else
#define DEBUG_ONLY_TABLE(IGNORE)
- uint32_t instance_id() const { return -1; }
+
+ public:
+ uint32_t instance_id() const { return static_cast<uint32_t>(-1); }
#endif
#define WCM_PROPERTY_TABLE(MACRO) \
@@ -309,28 +382,48 @@ class WasmCompiledModule : public FixedArray {
};
public:
- static Handle<WasmCompiledModule> New(Isolate* isolate,
- Handle<WasmSharedModuleData> shared);
+ static Handle<WasmCompiledModule> New(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared,
+ Handle<FixedArray> code_table,
+ MaybeHandle<FixedArray> maybe_empty_function_tables,
+ MaybeHandle<FixedArray> maybe_signature_tables);
static Handle<WasmCompiledModule> Clone(Isolate* isolate,
- Handle<WasmCompiledModule> module) {
- Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
- isolate->factory()->CopyFixedArray(module));
- ret->InitId();
- ret->reset_weak_owning_instance();
- ret->reset_weak_next_instance();
- ret->reset_weak_prev_instance();
- ret->reset_weak_exported_functions();
- return ret;
+ Handle<WasmCompiledModule> module);
+ static void Reset(Isolate* isolate, WasmCompiledModule* module);
+
+ Address GetEmbeddedMemStartOrNull() const {
+ DisallowHeapAllocation no_gc;
+ if (has_embedded_mem_start()) {
+ return reinterpret_cast<Address>(embedded_mem_start());
+ }
+ return nullptr;
+ }
+
+ Address GetGlobalsStartOrNull() const {
+ DisallowHeapAllocation no_gc;
+ if (has_globals_start()) {
+ return reinterpret_cast<Address>(globals_start());
+ }
+ return nullptr;
}
uint32_t mem_size() const;
uint32_t default_mem_size() const;
+ void ResetSpecializationMemInfoIfNeeded();
+ static void SetSpecializationMemInfoFrom(
+ Factory* factory, Handle<WasmCompiledModule> compiled_module,
+ Handle<JSArrayBuffer> buffer);
+ static void SetGlobalsStartAddressFrom(
+ Factory* factory, Handle<WasmCompiledModule> compiled_module,
+ Handle<JSArrayBuffer> buffer);
+
#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
WCM_PROPERTY_TABLE(DECLARATION)
#undef DECLARATION
+ public:
// Allow to call method on WasmSharedModuleData also on this object.
#define FORWARD_SHARED(type, name) \
type name() { return shared()->name(); }
@@ -405,7 +498,7 @@ class WasmCompiledModule : public FixedArray {
// Get a list of all possible breakpoints within a given range of this module.
bool GetPossibleBreakpoints(const debug::Location& start,
const debug::Location& end,
- std::vector<debug::Location>* locations);
+ std::vector<debug::BreakLocation>* locations);
// Set a breakpoint on the given byte position inside the given module.
// This will affect all live and future instances of the module.
@@ -420,6 +513,23 @@ class WasmCompiledModule : public FixedArray {
// FixedArray with all hit breakpoint objects.
MaybeHandle<FixedArray> CheckBreakPoints(int position);
+ // Compile lazily the function called in the given caller code object at the
+ // given offset.
+ // If the called function cannot be determined from the caller (indirect
+ // call / exported function), func_index must be set. Otherwise it can be -1.
+ // If patch_caller is set, then all direct calls to functions which were
+ // already lazily compiled are patched (at least the given call site).
+ // Returns the Code to be called at the given call site, or an empty Handle if
+ // an error occured during lazy compilation. In this case, an exception has
+ // been set on the isolate.
+ static MaybeHandle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int offset,
+ int func_index, bool patch_caller);
+
+ void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table) {
+ set_code_table(testing_table);
+ }
+
private:
void InitId();
@@ -447,13 +557,21 @@ class WasmDebugInfo : public FixedArray {
// interpreter and will always pause at the given offset.
static void SetBreakpoint(Handle<WasmDebugInfo>, int func_index, int offset);
- // Make a function always execute in the interpreter without setting a
+ // Make a set of functions always execute in the interpreter without setting
// breakpoints.
- static void RedirectToInterpreter(Handle<WasmDebugInfo>, int func_index);
+ static void RedirectToInterpreter(Handle<WasmDebugInfo>,
+ Vector<int> func_indexes);
void PrepareStep(StepAction);
- void RunInterpreter(int func_index, uint8_t* arg_buffer);
+ // Execute the specified funtion in the interpreter. Read arguments from
+ // arg_buffer.
+ // The frame_pointer will be used to identify the new activation of the
+ // interpreter for unwinding and frame inspection.
+ // Returns true if exited regularly, false if a trap occured. In the latter
+ // case, a pending exception will have been set on the isolate.
+ bool RunInterpreter(Address frame_pointer, int func_index,
+ uint8_t* arg_buffer);
// Get the stack of the wasm interpreter as pairs of <function index, byte
// offset>. The list is ordered bottom-to-top, i.e. caller before callee.
@@ -463,10 +581,18 @@ class WasmDebugInfo : public FixedArray {
std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
Address frame_pointer, int idx);
+ // Unwind the interpreted stack belonging to the passed interpreter entry
+ // frame.
+ void Unwind(Address frame_pointer);
+
// Returns the number of calls / function frames executed in the interpreter.
uint64_t NumInterpretedCalls();
DECLARE_GETTER(wasm_instance, WasmInstanceObject);
+
+ // Update the memory view of the interpreter after executing GrowMemory in
+ // compiled code.
+ void UpdateMemory(JSArrayBuffer* new_memory);
};
class WasmInstanceWrapper : public FixedArray {
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index ec1cbd59b2..10dcfe59a7 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -11,8 +11,6 @@ namespace v8 {
namespace internal {
namespace wasm {
-typedef Signature<ValueType> FunctionSig;
-
#define CASE_OP(name, str) \
case kExpr##name: \
return str;
@@ -20,14 +18,17 @@ typedef Signature<ValueType> FunctionSig;
#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
-#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
+#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_S1x4_OP(name, str) CASE_OP(S1x4##name, "s1x4." str)
+#define CASE_S1x8_OP(name, str) CASE_OP(S1x8##name, "s1x8." str)
+#define CASE_S1x16_OP(name, str) CASE_OP(S1x16##name, "s1x16." str)
#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
@@ -126,10 +127,12 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(INT, LoadMem8, "load8")
CASE_SIGN_OP(INT, LoadMem16, "load16")
CASE_SIGN_OP(I64, LoadMem32, "load32")
+ CASE_S128_OP(LoadMem, "load128")
CASE_ALL_OP(StoreMem, "store")
CASE_INT_OP(StoreMem8, "store8")
CASE_INT_OP(StoreMem16, "store16")
CASE_I64_OP(StoreMem32, "store32")
+ CASE_S128_OP(StoreMem, "store128")
// Non-standard opcodes.
CASE_OP(Try, "try")
@@ -175,7 +178,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_F32x4_OP(Sqrt, "sqrt")
CASE_F32x4_OP(Div, "div")
CASE_F32x4_OP(RecipApprox, "recip_approx")
- CASE_F32x4_OP(SqrtApprox, "sqrt_approx")
+ CASE_F32x4_OP(RecipRefine, "recip_refine")
+ CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
+ CASE_F32x4_OP(RecipSqrtRefine, "recip_sqrt_refine")
CASE_F32x4_OP(Min, "min")
CASE_F32x4_OP(Max, "max")
CASE_F32x4_OP(MinNum, "min_num")
@@ -186,6 +191,12 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_F32x4_OP(Ge, "ge")
CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
CASE_F32x4_OP(ExtractLane, "extract_lane")
CASE_F32x4_OP(ReplaceLane, "replace_lane")
CASE_SIMDI_OP(ExtractLane, "extract_lane")
@@ -202,9 +213,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_S128_OP(And, "and")
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
- CASE_S128_OP(And, "and")
CASE_S128_OP(Not, "not")
CASE_S32x4_OP(Select, "select")
CASE_S32x4_OP(Swizzle, "swizzle")
@@ -215,6 +226,24 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S8x16_OP(Select, "select")
CASE_S8x16_OP(Swizzle, "swizzle")
CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_S1x4_OP(And, "and")
+ CASE_S1x4_OP(Or, "or")
+ CASE_S1x4_OP(Xor, "xor")
+ CASE_S1x4_OP(Not, "not")
+ CASE_S1x4_OP(AnyTrue, "any_true")
+ CASE_S1x4_OP(AllTrue, "all_true")
+ CASE_S1x8_OP(And, "and")
+ CASE_S1x8_OP(Or, "or")
+ CASE_S1x8_OP(Xor, "xor")
+ CASE_S1x8_OP(Not, "not")
+ CASE_S1x8_OP(AnyTrue, "any_true")
+ CASE_S1x8_OP(AllTrue, "all_true")
+ CASE_S1x16_OP(And, "and")
+ CASE_S1x16_OP(Or, "or")
+ CASE_S1x16_OP(Xor, "xor")
+ CASE_S1x16_OP(Not, "not")
+ CASE_S1x16_OP(AnyTrue, "any_true")
+ CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
CASE_L32_OP(AtomicAdd, "atomic_add")
@@ -244,17 +273,24 @@ bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
- for (size_t i = 0; i < sig.return_count(); ++i) {
- os << WasmOpcodes::ShortNameOf(sig.GetReturn(i));
+ for (auto ret : sig.returns()) {
+ os << WasmOpcodes::ShortNameOf(ret);
}
os << "_";
if (sig.parameter_count() == 0) os << "v";
- for (size_t i = 0; i < sig.parameter_count(); ++i) {
- os << WasmOpcodes::ShortNameOf(sig.GetParam(i));
+ for (auto param : sig.parameters()) {
+ os << WasmOpcodes::ShortNameOf(param);
}
return os;
}
+bool IsJSCompatibleSignature(const FunctionSig* sig) {
+ for (auto type : sig->all()) {
+ if (type == wasm::kWasmI64 || type == wasm::kWasmS128) return false;
+ }
+ return true;
+}
+
#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index a4812f500a..22a84e519a 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -32,25 +32,26 @@ static const uint8_t kMultivalBlock = 0x41;
// We reuse the internal machine type to represent WebAssembly types.
// A typedef improves readability without adding a whole new type system.
-typedef MachineRepresentation ValueType;
-const ValueType kWasmStmt = MachineRepresentation::kNone;
-const ValueType kWasmI32 = MachineRepresentation::kWord32;
-const ValueType kWasmI64 = MachineRepresentation::kWord64;
-const ValueType kWasmF32 = MachineRepresentation::kFloat32;
-const ValueType kWasmF64 = MachineRepresentation::kFloat64;
-const ValueType kWasmS128 = MachineRepresentation::kSimd128;
-const ValueType kWasmS1x4 = MachineRepresentation::kSimd1x4;
-const ValueType kWasmS1x8 = MachineRepresentation::kSimd1x8;
-const ValueType kWasmS1x16 = MachineRepresentation::kSimd1x16;
-const ValueType kWasmVar = MachineRepresentation::kTagged;
-
-typedef Signature<ValueType> FunctionSig;
+using ValueType = MachineRepresentation;
+constexpr ValueType kWasmStmt = MachineRepresentation::kNone;
+constexpr ValueType kWasmI32 = MachineRepresentation::kWord32;
+constexpr ValueType kWasmI64 = MachineRepresentation::kWord64;
+constexpr ValueType kWasmF32 = MachineRepresentation::kFloat32;
+constexpr ValueType kWasmF64 = MachineRepresentation::kFloat64;
+constexpr ValueType kWasmS128 = MachineRepresentation::kSimd128;
+constexpr ValueType kWasmS1x4 = MachineRepresentation::kSimd1x4;
+constexpr ValueType kWasmS1x8 = MachineRepresentation::kSimd1x8;
+constexpr ValueType kWasmS1x16 = MachineRepresentation::kSimd1x16;
+constexpr ValueType kWasmVar = MachineRepresentation::kTagged;
+
+using FunctionSig = Signature<ValueType>;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
+bool IsJSCompatibleSignature(const FunctionSig* sig);
-typedef Vector<const char> WasmName;
+using WasmName = Vector<const char>;
-typedef int WasmCodePosition;
-const WasmCodePosition kNoCodePosition = -1;
+using WasmCodePosition = int;
+constexpr WasmCodePosition kNoCodePosition = -1;
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
@@ -100,7 +101,8 @@ const WasmCodePosition kNoCodePosition = -1;
V(I64LoadMem16S, 0x32, l_i) \
V(I64LoadMem16U, 0x33, l_i) \
V(I64LoadMem32S, 0x34, l_i) \
- V(I64LoadMem32U, 0x35, l_i)
+ V(I64LoadMem32U, 0x35, l_i) \
+ V(S128LoadMem, 0xc0, s_i)
// Store memory expressions.
#define FOREACH_STORE_MEM_OPCODE(V) \
@@ -112,7 +114,8 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32StoreMem16, 0x3b, i_ii) \
V(I64StoreMem8, 0x3c, l_il) \
V(I64StoreMem16, 0x3d, l_il) \
- V(I64StoreMem32, 0x3e, l_il)
+ V(I64StoreMem32, 0x3e, l_il) \
+ V(S128StoreMem, 0xc1, s_is)
// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
@@ -247,17 +250,17 @@ const WasmCodePosition kNoCodePosition = -1;
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
- V(F64Acos, 0xc0, d_d) \
- V(F64Asin, 0xc1, d_d) \
- V(F64Atan, 0xc2, d_d) \
- V(F64Cos, 0xc3, d_d) \
- V(F64Sin, 0xc4, d_d) \
- V(F64Tan, 0xc5, d_d) \
- V(F64Exp, 0xc6, d_d) \
- V(F64Log, 0xc7, d_d) \
- V(F64Atan2, 0xc8, d_dd) \
- V(F64Pow, 0xc9, d_dd) \
- V(F64Mod, 0xca, d_dd) \
+ V(F64Acos, 0xc2, d_d) \
+ V(F64Asin, 0xc3, d_d) \
+ V(F64Atan, 0xc4, d_d) \
+ V(F64Cos, 0xc5, d_d) \
+ V(F64Sin, 0xc6, d_d) \
+ V(F64Tan, 0xc7, d_d) \
+ V(F64Exp, 0xc8, d_d) \
+ V(F64Log, 0xc9, d_d) \
+ V(F64Atan2, 0xca, d_dd) \
+ V(F64Pow, 0xcb, d_dd) \
+ V(F64Mod, 0xcc, d_dd) \
V(I32AsmjsDivS, 0xd0, i_ii) \
V(I32AsmjsDivU, 0xd1, i_ii) \
V(I32AsmjsRemS, 0xd2, i_ii) \
@@ -285,7 +288,7 @@ const WasmCodePosition kNoCodePosition = -1;
V(F32x4Neg, 0xe504, s_s) \
V(F32x4Sqrt, 0xe505, s_s) \
V(F32x4RecipApprox, 0xe506, s_s) \
- V(F32x4SqrtApprox, 0xe507, s_s) \
+ V(F32x4RecipSqrtApprox, 0xe507, s_s) \
V(F32x4Add, 0xe508, s_ss) \
V(F32x4Sub, 0xe509, s_ss) \
V(F32x4Mul, 0xe50a, s_ss) \
@@ -294,6 +297,8 @@ const WasmCodePosition kNoCodePosition = -1;
V(F32x4Max, 0xe50d, s_ss) \
V(F32x4MinNum, 0xe50e, s_ss) \
V(F32x4MaxNum, 0xe50f, s_ss) \
+ V(F32x4RecipRefine, 0xe592, s_ss) \
+ V(F32x4RecipSqrtRefine, 0xe593, s_ss) \
V(F32x4Eq, 0xe510, s1x4_ss) \
V(F32x4Ne, 0xe511, s1x4_ss) \
V(F32x4Lt, 0xe512, s1x4_ss) \
@@ -316,13 +321,17 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32x4GtS, 0xe52a, s1x4_ss) \
V(I32x4GeS, 0xe52b, s1x4_ss) \
V(I32x4SConvertF32x4, 0xe52f, s_s) \
+ V(I32x4UConvertF32x4, 0xe537, s_s) \
+ V(I32x4SConvertI16x8Low, 0xe594, s_s) \
+ V(I32x4SConvertI16x8High, 0xe595, s_s) \
+ V(I32x4UConvertI16x8Low, 0xe596, s_s) \
+ V(I32x4UConvertI16x8High, 0xe597, s_s) \
V(I32x4MinU, 0xe530, s_ss) \
V(I32x4MaxU, 0xe531, s_ss) \
V(I32x4LtU, 0xe533, s1x4_ss) \
V(I32x4LeU, 0xe534, s1x4_ss) \
V(I32x4GtU, 0xe535, s1x4_ss) \
V(I32x4GeU, 0xe536, s1x4_ss) \
- V(I32x4UConvertF32x4, 0xe537, s_s) \
V(I16x8Splat, 0xe538, s_i) \
V(I16x8Neg, 0xe53b, s_s) \
V(I16x8Add, 0xe53c, s_ss) \
@@ -346,6 +355,12 @@ const WasmCodePosition kNoCodePosition = -1;
V(I16x8LeU, 0xe554, s1x8_ss) \
V(I16x8GtU, 0xe555, s1x8_ss) \
V(I16x8GeU, 0xe556, s1x8_ss) \
+ V(I16x8SConvertI32x4, 0xe598, s_ss) \
+ V(I16x8UConvertI32x4, 0xe599, s_ss) \
+ V(I16x8SConvertI8x16Low, 0xe59a, s_s) \
+ V(I16x8SConvertI8x16High, 0xe59b, s_s) \
+ V(I16x8UConvertI8x16Low, 0xe59c, s_s) \
+ V(I16x8UConvertI8x16High, 0xe59d, s_s) \
V(I8x16Splat, 0xe557, s_i) \
V(I8x16Neg, 0xe55a, s_s) \
V(I8x16Add, 0xe55b, s_ss) \
@@ -369,6 +384,8 @@ const WasmCodePosition kNoCodePosition = -1;
V(I8x16LeU, 0xe573, s1x16_ss) \
V(I8x16GtU, 0xe574, s1x16_ss) \
V(I8x16GeU, 0xe575, s1x16_ss) \
+ V(I8x16SConvertI16x8, 0xe59e, s_ss) \
+ V(I8x16UConvertI16x8, 0xe59f, s_ss) \
V(S128And, 0xe576, s_ss) \
V(S128Or, 0xe577, s_ss) \
V(S128Xor, 0xe578, s_ss) \
@@ -381,7 +398,25 @@ const WasmCodePosition kNoCodePosition = -1;
V(S16x8Shuffle, 0xe54d, s_ss) \
V(S8x16Select, 0xe56a, s_s1x16ss) \
V(S8x16Swizzle, 0xe56b, s_s) \
- V(S8x16Shuffle, 0xe56c, s_ss)
+ V(S8x16Shuffle, 0xe56c, s_ss) \
+ V(S1x4And, 0xe580, s1x4_s1x4s1x4) \
+ V(S1x4Or, 0xe581, s1x4_s1x4s1x4) \
+ V(S1x4Xor, 0xe582, s1x4_s1x4s1x4) \
+ V(S1x4Not, 0xe583, s1x4_s1x4) \
+ V(S1x4AnyTrue, 0xe584, i_s1x4) \
+ V(S1x4AllTrue, 0xe585, i_s1x4) \
+ V(S1x8And, 0xe586, s1x8_s1x8s1x8) \
+ V(S1x8Or, 0xe587, s1x8_s1x8s1x8) \
+ V(S1x8Xor, 0xe588, s1x8_s1x8s1x8) \
+ V(S1x8Not, 0xe589, s1x8_s1x8) \
+ V(S1x8AnyTrue, 0xe58a, i_s1x8) \
+ V(S1x8AllTrue, 0xe58b, i_s1x8) \
+ V(S1x16And, 0xe58c, s1x16_s1x16s1x16) \
+ V(S1x16Or, 0xe58d, s1x16_s1x16s1x16) \
+ V(S1x16Xor, 0xe58e, s1x16_s1x16s1x16) \
+ V(S1x16Not, 0xe58f, s1x16_s1x16) \
+ V(S1x16AnyTrue, 0xe590, i_s1x16) \
+ V(S1x16AllTrue, 0xe591, i_s1x16)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
V(F32x4ExtractLane, 0xe501, _) \
@@ -483,19 +518,28 @@ const WasmCodePosition kNoCodePosition = -1;
V(f_if, kWasmF32, kWasmI32, kWasmF32) \
V(l_il, kWasmI64, kWasmI32, kWasmI64)
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kWasmS128, kWasmS128) \
- V(s_f, kWasmS128, kWasmF32) \
- V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
- V(s1x4_ss, kWasmS1x4, kWasmS128, kWasmS128) \
- V(s1x8_ss, kWasmS1x8, kWasmS128, kWasmS128) \
- V(s1x16_ss, kWasmS1x16, kWasmS128, kWasmS128) \
- V(s_i, kWasmS128, kWasmI32) \
- V(s_si, kWasmS128, kWasmS128, kWasmI32) \
- V(i_s, kWasmI32, kWasmS128) \
- V(s_s1x4ss, kWasmS128, kWasmS1x4, kWasmS128, kWasmS128) \
- V(s_s1x8ss, kWasmS128, kWasmS1x8, kWasmS128, kWasmS128) \
- V(s_s1x16ss, kWasmS128, kWasmS1x16, kWasmS128, kWasmS128)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s1x4_ss, kWasmS1x4, kWasmS128, kWasmS128) \
+ V(s1x8_ss, kWasmS1x8, kWasmS128, kWasmS128) \
+ V(s1x16_ss, kWasmS1x16, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32) \
+ V(i_s, kWasmI32, kWasmS128) \
+ V(i_s1x4, kWasmI32, kWasmS1x4) \
+ V(i_s1x8, kWasmI32, kWasmS1x8) \
+ V(i_s1x16, kWasmI32, kWasmS1x16) \
+ V(s_s1x4ss, kWasmS128, kWasmS1x4, kWasmS128, kWasmS128) \
+ V(s_s1x8ss, kWasmS128, kWasmS1x8, kWasmS128, kWasmS128) \
+ V(s_s1x16ss, kWasmS128, kWasmS1x16, kWasmS128, kWasmS128) \
+ V(s1x4_s1x4, kWasmS1x4, kWasmS1x4) \
+ V(s1x4_s1x4s1x4, kWasmS1x4, kWasmS1x4, kWasmS1x4) \
+ V(s1x8_s1x8, kWasmS1x8, kWasmS1x8) \
+ V(s1x8_s1x8s1x8, kWasmS1x8, kWasmS1x8, kWasmS1x8) \
+ V(s1x16_s1x16, kWasmS1x16, kWasmS1x16) \
+ V(s1x16_s1x16s1x16, kWasmS1x16, kWasmS1x16, kWasmS1x16)
#define FOREACH_PREFIX(V) \
V(Simd, 0xe5) \
@@ -655,6 +699,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return store ? kExprF32StoreMem : kExprF32LoadMem;
} else if (type == MachineType::Float64()) {
return store ? kExprF64StoreMem : kExprF64LoadMem;
+ } else if (type == MachineType::Simd128()) {
+ return store ? kExprS128StoreMem : kExprS128LoadMem;
} else {
UNREACHABLE();
return kExprNop;
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index e22f9ad442..2f702551ee 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -15,34 +15,29 @@ namespace v8 {
namespace internal {
namespace wasm {
-std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code) {
- switch (error_code) {
- case kSuccess:
- os << "Success";
- break;
- default: // TODO(titzer): render error codes
- os << "Error";
- break;
- }
- return os;
-}
-
void ErrorThrower::Format(i::Handle<i::JSFunction> constructor,
const char* format, va_list args) {
// Only report the first error.
if (error()) return;
- char buffer[256];
- base::OS::VSNPrintF(buffer, 255, format, args);
+ constexpr int kMaxErrorMessageLength = 256;
+ EmbeddedVector<char, kMaxErrorMessageLength> buffer;
- std::ostringstream str;
- if (context_ != nullptr) {
- str << context_ << ": ";
+ int context_len = 0;
+ if (context_) {
+ context_len = SNPrintF(buffer, "%s: ", context_);
+ CHECK_LE(0, context_len); // check for overflow.
}
- str << buffer;
+ int message_len =
+ VSNPrintF(buffer.SubVector(context_len, buffer.length()), format, args);
+ CHECK_LE(0, message_len); // check for overflow.
+
+ Vector<char> whole_message = buffer.SubVector(0, context_len + message_len);
i::Handle<i::String> message =
- isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+ isolate_->factory()
+ ->NewStringFromOneByte(Vector<uint8_t>::cast(whole_message))
+ .ToHandleChecked();
exception_ = isolate_->factory()->NewError(constructor, message);
}
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 004ac22d33..79d06758b1 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -5,9 +5,11 @@
#ifndef V8_WASM_RESULT_H_
#define V8_WASM_RESULT_H_
+#include <cstdarg>
#include <memory>
#include "src/base/compiler-specific.h"
+#include "src/utils.h"
#include "src/handles.h"
#include "src/globals.h"
@@ -19,71 +21,74 @@ class Isolate;
namespace wasm {
-// Error codes for programmatic checking of the decoder's verification.
-enum ErrorCode {
- kSuccess,
- kError, // TODO(titzer): introduce real error codes
-};
-
// The overall result of decoding a function or a module.
template <typename T>
-struct Result {
- Result() : val(), error_code(kSuccess), start(nullptr), error_pc(nullptr) {}
- Result(Result&& other) { *this = std::move(other); }
- Result& operator=(Result&& other) {
- MoveFrom(other);
- val = other.val;
- return *this;
- }
+class Result {
+ public:
+ Result() = default;
+
+ template <typename S>
+ explicit Result(S&& value) : val(value) {}
- T val;
- ErrorCode error_code;
- const byte* start;
- const byte* error_pc;
- const byte* error_pt;
- std::unique_ptr<char[]> error_msg;
+ template <typename S>
+ Result(Result<S>&& other)
+ : val(std::move(other.val)),
+ error_offset(other.error_offset),
+ error_msg(std::move(other.error_msg)) {}
- bool ok() const { return error_code == kSuccess; }
- bool failed() const { return error_code != kSuccess; }
+ Result& operator=(Result&& other) = default;
+
+ T val = T{};
+ uint32_t error_offset = 0;
+ std::string error_msg;
+
+ bool ok() const { return error_msg.empty(); }
+ bool failed() const { return !ok(); }
template <typename V>
- void MoveFrom(Result<V>& that) {
- error_code = that.error_code;
- start = that.start;
- error_pc = that.error_pc;
- error_pt = that.error_pt;
- error_msg = std::move(that.error_msg);
+ void MoveErrorFrom(Result<V>& that) {
+ error_offset = that.error_offset;
+ // Use {swap()} + {clear()} instead of move assign, as {that} might still be
+ // used afterwards.
+ error_msg.swap(that.error_msg);
+ that.error_msg.clear();
}
- private:
- DISALLOW_COPY_AND_ASSIGN(Result);
-};
+ void PRINTF_FORMAT(2, 3) error(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ verror(format, args);
+ va_end(args);
+ }
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const Result<T>& result) {
- os << "Result = ";
- if (result.ok()) {
- if (result.val != nullptr) {
- os << *result.val;
- } else {
- os << "success (no value)";
- }
- } else if (result.error_msg.get() != nullptr) {
- ptrdiff_t offset = result.error_pc - result.start;
- if (offset < 0) {
- os << result.error_msg.get() << " @" << offset;
- } else {
- os << result.error_msg.get() << " @+" << offset;
+ void PRINTF_FORMAT(2, 0) verror(const char* format, va_list args) {
+ size_t len = base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(strlen(format)));
+ // Allocate increasingly large buffers until the message fits.
+ for (;; len *= 2) {
+ DCHECK_GE(kMaxInt, len);
+ error_msg.resize(len);
+ int written =
+ VSNPrintF(Vector<char>(&error_msg.front(), static_cast<int>(len)),
+ format, args);
+ if (written < 0) continue; // not enough space.
+ if (written == 0) error_msg = "Error"; // assign default message.
+ return;
}
- } else {
- os << result.error_code;
}
- os << std::endl;
- return os;
-}
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const ErrorCode& error_code);
+ static Result<T> PRINTF_FORMAT(1, 2) Error(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ Result<T> result;
+ result.verror(format, args);
+ va_end(args);
+ return result;
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Result);
+};
// A helper for generating error messages that bubble up to JS exceptions.
class V8_EXPORT_PRIVATE ErrorThrower {
@@ -100,9 +105,9 @@ class V8_EXPORT_PRIVATE ErrorThrower {
template <typename T>
void CompileFailed(const char* error, Result<T>& result) {
- std::ostringstream str;
- str << error << result;
- CompileError("%s", str.str().c_str());
+ DCHECK(result.failed());
+ CompileError("%s: %s @+%u", error, result.error_msg.c_str(),
+ result.error_offset);
}
i::Handle<i::Object> Reify() {
@@ -122,6 +127,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
i::Handle<i::Object> exception_;
bool wasm_error_ = false;
};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 9ad86fbb14..1656ffbd42 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -52,18 +52,16 @@ void wasm::PrintWasmText(const WasmModule *module,
os << " $";
os.write(fun_name.start(), fun_name.length());
}
- size_t param_count = fun->sig->parameter_count();
- if (param_count) {
+ if (fun->sig->parameter_count()) {
os << " (param";
- for (size_t i = 0; i < param_count; ++i)
- os << ' ' << WasmOpcodes::TypeName(fun->sig->GetParam(i));
+ for (auto param : fun->sig->parameters())
+ os << ' ' << WasmOpcodes::TypeName(param);
os << ')';
}
- size_t return_count = fun->sig->return_count();
- if (return_count) {
+ if (fun->sig->return_count()) {
os << " (result";
- for (size_t i = 0; i < return_count; ++i)
- os << ' ' << WasmOpcodes::TypeName(fun->sig->GetReturn(i));
+ for (auto ret : fun->sig->returns())
+ os << ' ' << WasmOpcodes::TypeName(ret);
os << ')';
}
os << "\n";
@@ -104,7 +102,7 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeOperand operand(&i, i.pc());
+ BlockTypeOperand<false> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode);
for (unsigned i = 0; i < operand.arity; i++) {
os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
@@ -114,7 +112,7 @@ void wasm::PrintWasmText(const WasmModule *module,
}
case kExprBr:
case kExprBrIf: {
- BreakDepthOperand operand(&i, i.pc());
+ BreakDepthOperand<false> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.depth;
break;
}
@@ -126,20 +124,20 @@ void wasm::PrintWasmText(const WasmModule *module,
os << "end";
break;
case kExprBrTable: {
- BranchTableOperand operand(&i, i.pc());
- BranchTableIterator iterator(&i, operand);
+ BranchTableOperand<false> operand(&i, i.pc());
+ BranchTableIterator<false> iterator(&i, operand);
os << "br_table";
while (iterator.has_next()) os << ' ' << iterator.next();
break;
}
case kExprCallIndirect: {
- CallIndirectOperand operand(&i, i.pc());
+ CallIndirectOperand<false> operand(&i, i.pc());
DCHECK_EQ(0, operand.table_index);
os << "call_indirect " << operand.index;
break;
}
case kExprCallFunction: {
- CallFunctionOperand operand(&i, i.pc());
+ CallFunctionOperand<false> operand(&i, i.pc());
os << "call " << operand.index;
break;
}
@@ -147,19 +145,19 @@ void wasm::PrintWasmText(const WasmModule *module,
case kExprSetLocal:
case kExprTeeLocal:
case kExprCatch: {
- LocalIndexOperand operand(&i, i.pc());
+ LocalIndexOperand<false> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
case kExprGetGlobal:
case kExprSetGlobal: {
- GlobalIndexOperand operand(&i, i.pc());
+ GlobalIndexOperand<false> operand(&i, i.pc());
os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
break;
}
#define CASE_CONST(type, str, cast_type) \
case kExpr##type##Const: { \
- Imm##type##Operand operand(&i, i.pc()); \
+ Imm##type##Operand<false> operand(&i, i.pc()); \
os << #str ".const " << static_cast<cast_type>(operand.value); \
break; \
}
@@ -171,7 +169,7 @@ void wasm::PrintWasmText(const WasmModule *module,
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
- MemoryAccessOperand operand(&i, i.pc(), kMaxUInt32);
+ MemoryAccessOperand<false> operand(&i, i.pc(), kMaxUInt32);
os << WasmOpcodes::OpcodeName(opcode) << " offset=" << operand.offset
<< " align=" << (1ULL << operand.alignment);
break;