summaryrefslogtreecommitdiff
path: root/deps/v8/src/execution
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/execution')
-rw-r--r--deps/v8/src/execution/arguments-inl.h6
-rw-r--r--deps/v8/src/execution/arguments.h20
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc236
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc130
-rw-r--r--deps/v8/src/execution/frames.cc24
-rw-r--r--deps/v8/src/execution/frames.h20
-rw-r--r--deps/v8/src/execution/isolate-inl.h55
-rw-r--r--deps/v8/src/execution/isolate.cc377
-rw-r--r--deps/v8/src/execution/isolate.h104
-rw-r--r--deps/v8/src/execution/messages.cc6
-rw-r--r--deps/v8/src/execution/protectors.cc23
-rw-r--r--deps/v8/src/execution/protectors.h79
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h16
-rw-r--r--deps/v8/src/execution/simulator.h16
-rw-r--r--deps/v8/src/execution/stack-guard.cc8
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h2
16 files changed, 433 insertions, 689 deletions
diff --git a/deps/v8/src/execution/arguments-inl.h b/deps/v8/src/execution/arguments-inl.h
index ecdc4ef359..4565f5d265 100644
--- a/deps/v8/src/execution/arguments-inl.h
+++ b/deps/v8/src/execution/arguments-inl.h
@@ -14,15 +14,15 @@ namespace v8 {
namespace internal {
template <class S>
-Handle<S> Arguments::at(int index) {
+Handle<S> Arguments::at(int index) const {
return Handle<S>::cast(at<Object>(index));
}
-int Arguments::smi_at(int index) {
+int Arguments::smi_at(int index) const {
return Smi::ToInt(Object(*address_of_arg_at(index)));
}
-double Arguments::number_at(int index) { return (*this)[index].Number(); }
+double Arguments::number_at(int index) const { return (*this)[index].Number(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/arguments.h b/deps/v8/src/execution/arguments.h
index 8f07dd9db3..77bbe62dfc 100644
--- a/deps/v8/src/execution/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -37,24 +37,26 @@ class Arguments {
DCHECK_GE(length_, 0);
}
- Object operator[](int index) { return Object(*address_of_arg_at(index)); }
+ Object operator[](int index) const {
+ return Object(*address_of_arg_at(index));
+ }
template <class S = Object>
- inline Handle<S> at(int index);
+ inline Handle<S> at(int index) const;
- inline int smi_at(int index);
+ inline int smi_at(int index) const;
- inline double number_at(int index);
+ inline double number_at(int index) const;
inline void set_at(int index, Object value) {
*address_of_arg_at(index) = value.ptr();
}
- inline FullObjectSlot slot_at(int index) {
+ inline FullObjectSlot slot_at(int index) const {
return FullObjectSlot(address_of_arg_at(index));
}
- inline Address* address_of_arg_at(int index) {
+ inline Address* address_of_arg_at(int index) const {
DCHECK_LT(static_cast<uint32_t>(index), static_cast<uint32_t>(length_));
return reinterpret_cast<Address*>(reinterpret_cast<Address>(arguments_) -
index * kSystemPointerSize);
@@ -64,8 +66,8 @@ class Arguments {
int length() const { return static_cast<int>(length_); }
// Arguments on the stack are in reverse order (compared to an array).
- FullObjectSlot first_slot() { return slot_at(length() - 1); }
- FullObjectSlot last_slot() { return slot_at(0); }
+ FullObjectSlot first_slot() const { return slot_at(length() - 1); }
+ FullObjectSlot last_slot() const { return slot_at(0); }
private:
intptr_t length_;
@@ -73,7 +75,7 @@ class Arguments {
};
template <>
-inline Handle<Object> Arguments::at(int index) {
+inline Handle<Object> Arguments::at(int index) const {
return Handle<Object>(address_of_arg_at(index));
}
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index 2677135096..841ff4bfd4 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -12,6 +12,8 @@
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
+#include "src/base/memory.h"
+#include "src/base/overflowing-math.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -899,16 +901,14 @@ int Simulator::ReadW(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<intptr_t>(addr);
}
int Simulator::ReadExW(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<intptr_t>(addr);
}
void Simulator::WriteW(int32_t addr, int value) {
@@ -917,8 +917,7 @@ void Simulator::WriteW(int32_t addr, int value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue<intptr_t>(addr, value);
}
int Simulator::WriteExW(int32_t addr, int value) {
@@ -926,8 +925,7 @@ int Simulator::WriteExW(int32_t addr, int value) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue<intptr_t>(addr, value);
return 0;
} else {
return 1;
@@ -939,8 +937,7 @@ uint16_t Simulator::ReadHU(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint16_t>(addr);
}
int16_t Simulator::ReadH(int32_t addr) {
@@ -948,16 +945,14 @@ int16_t Simulator::ReadH(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<int16_t>(addr);
}
uint16_t Simulator::ReadExHU(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint16_t>(addr);
}
void Simulator::WriteH(int32_t addr, uint16_t value) {
@@ -966,8 +961,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
void Simulator::WriteH(int32_t addr, int16_t value) {
@@ -976,8 +970,7 @@ void Simulator::WriteH(int32_t addr, int16_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
int Simulator::WriteExH(int32_t addr, uint16_t value) {
@@ -985,8 +978,7 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
return 0;
} else {
return 1;
@@ -996,39 +988,34 @@ int Simulator::WriteExH(int32_t addr, uint16_t value) {
uint8_t Simulator::ReadBU(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint8_t>(addr);
}
int8_t Simulator::ReadB(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<int8_t>(addr);
}
uint8_t Simulator::ReadExBU(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
+ return base::ReadUnalignedValue<uint8_t>(addr);
}
void Simulator::WriteB(int32_t addr, uint8_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
void Simulator::WriteB(int32_t addr, int8_t value) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
}
int Simulator::WriteExB(int32_t addr, uint8_t value) {
@@ -1036,8 +1023,7 @@ int Simulator::WriteExB(int32_t addr, uint8_t value) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
+ base::WriteUnalignedValue(addr, value);
return 0;
} else {
return 1;
@@ -1049,16 +1035,14 @@ int32_t* Simulator::ReadDW(int32_t addr) {
// check the alignment here.
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoad(addr);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
+ return reinterpret_cast<int32_t*>(addr);
}
int32_t* Simulator::ReadExDW(int32_t addr) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord);
GlobalMonitor::Get()->NotifyLoadExcl_Locked(addr, &global_monitor_processor_);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
+ return reinterpret_cast<int32_t*>(addr);
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
@@ -1067,9 +1051,8 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
local_monitor_.NotifyStore(addr);
GlobalMonitor::Get()->NotifyStore_Locked(addr, &global_monitor_processor_);
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
+ base::WriteUnalignedValue(addr, value1);
+ base::WriteUnalignedValue(addr + sizeof(value1), value2);
}
int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
@@ -1077,9 +1060,8 @@ int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) &&
GlobalMonitor::Get()->NotifyStoreExcl_Locked(
addr, &global_monitor_processor_)) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
+ base::WriteUnalignedValue(addr, value1);
+ base::WriteUnalignedValue(addr + sizeof(value1), value2);
return 0;
} else {
return 1;
@@ -1291,9 +1273,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
- result <<= (shift_amount - 1);
+ result = static_cast<uint32_t>(result) << (shift_amount - 1);
*carry_out = (result < 0);
- result <<= 1;
+ result = static_cast<uint32_t>(result) << 1;
}
break;
}
@@ -1316,9 +1298,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
+ result = base::bits::RotateRight32(result, shift_amount);
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break;
@@ -1358,9 +1338,9 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else if (shift_amount < 32) {
- result <<= (shift_amount - 1);
+ result = static_cast<uint32_t>(result) << (shift_amount - 1);
*carry_out = (result < 0);
- result <<= 1;
+ result = static_cast<uint32_t>(result) << 1;
} else if (shift_amount == 32) {
*carry_out = (result & 1) == 1;
result = 0;
@@ -1395,9 +1375,8 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
+ // Avoid undefined behavior. Rotating by multiples of 32 is no-op.
+ result = base::bits::RotateRight32(result, shift_amount & 31);
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
break;
@@ -1580,6 +1559,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1);
using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1,
void* arg2);
+// Separate for fine-grained UBSan blacklisting. Casting any given C++
+// function to {SimulatorRuntimeCall} is undefined behavior; but since
+// the target function can indeed be any function that's exposed via
+// the "fast C call" mechanism, we can't reconstruct its signature here.
+int64_t UnsafeGenericFunctionCall(intptr_t function, int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3, int32_t arg4,
+ int32_t arg5, int32_t arg6, int32_t arg7,
+ int32_t arg8, int32_t arg9) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(function);
+ return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+}
+void UnsafeDirectApiCall(intptr_t function, int32_t arg0) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(function);
+ target(arg0);
+}
+void UnsafeProfilingApiCall(intptr_t function, int32_t arg0, int32_t arg1) {
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+}
+void UnsafeDirectGetterCall(intptr_t function, int32_t arg0, int32_t arg1) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function);
+ target(arg0, arg1);
+}
+
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
void Simulator::SoftwareInterrupt(Instruction* instr) {
@@ -1710,9 +1717,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- target(arg0);
+ UnsafeDirectApiCall(external, arg0);
} else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1723,9 +1728,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
- target(arg0, Redirection::ReverseRedirection(arg1));
+ UnsafeProfilingApiCall(external, arg0, arg1);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
@@ -1736,9 +1739,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- target(arg0, arg1);
+ UnsafeDirectGetterCall(external, arg0, arg1);
} else if (redirection->type() ==
ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1757,14 +1758,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// builtin call.
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
- reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
- arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ reinterpret_cast<void*>(external), arg0, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8, arg9);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1772,7 +1771,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
int64_t result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ UnsafeGenericFunctionCall(external, arg0, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7, arg8, arg9);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -1938,7 +1938,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Rn field to encode it.
// Format(instr, "mul'cond's 'rn, 'rm, 'rs");
int rd = rn; // Remap the rn field to the Rd register.
- int32_t alu_out = rm_val * rs_val;
+ int32_t alu_out = base::MulWithWraparound(rm_val, rs_val);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -1952,13 +1952,13 @@ void Simulator::DecodeType01(Instruction* instr) {
// Rn field to encode the Rd register and the Rd field to encode
// the Rn register.
// Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value + mul_out;
+ int32_t mul_out = base::MulWithWraparound(rm_val, rs_val);
+ int32_t result = base::AddWithWraparound(acc_value, mul_out);
set_register(rn, result);
} else {
// Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value - mul_out;
+ int32_t mul_out = base::MulWithWraparound(rm_val, rs_val);
+ int32_t result = base::SubWithWraparound(acc_value, mul_out);
set_register(rn, result);
}
}
@@ -2096,7 +2096,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val -= rm_val;
+ rn_val = base::SubWithWraparound(rn_val, rm_val);
set_register(rn, rn_val);
break;
}
@@ -2104,13 +2104,13 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val += rm_val;
+ rn_val = base::AddWithWraparound(rn_val, rm_val);
set_register(rn, rn_val);
break;
}
case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- rn_val -= rm_val;
+ rn_val = base::SubWithWraparound(rn_val, rm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2119,7 +2119,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- rn_val += rm_val;
+ rn_val = base::AddWithWraparound(rn_val, rm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2139,7 +2139,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val -= imm_val;
+ rn_val = base::SubWithWraparound(rn_val, imm_val);
set_register(rn, rn_val);
break;
}
@@ -2147,13 +2147,13 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
DCHECK(!instr->HasW());
addr = rn_val;
- rn_val += imm_val;
+ rn_val = base::AddWithWraparound(rn_val, imm_val);
set_register(rn, rn_val);
break;
}
case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- rn_val -= imm_val;
+ rn_val = base::SubWithWraparound(rn_val, imm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2162,7 +2162,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- rn_val += imm_val;
+ rn_val = base::AddWithWraparound(rn_val, imm_val);
addr = rn_val;
if (instr->HasW()) {
set_register(rn, rn_val);
@@ -2328,7 +2328,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case SUB: {
// Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "sub'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
+ alu_out = base::SubWithWraparound(rn_val, shifter_operand);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2341,7 +2341,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case RSB: {
// Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
- alu_out = shifter_operand - rn_val;
+ alu_out = base::SubWithWraparound(shifter_operand, rn_val);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2354,7 +2354,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case ADD: {
// Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "add'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
+ alu_out = base::AddWithWraparound(rn_val, shifter_operand);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2367,7 +2367,8 @@ void Simulator::DecodeType01(Instruction* instr) {
case ADC: {
// Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "adc'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand + GetCarry();
+ alu_out = base::AddWithWraparound(
+ base::AddWithWraparound(rn_val, shifter_operand), GetCarry());
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2380,7 +2381,9 @@ void Simulator::DecodeType01(Instruction* instr) {
case SBC: {
// Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
- alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
+ alu_out = base::SubWithWraparound(
+ base::SubWithWraparound(rn_val, shifter_operand),
+ (GetCarry() ? 0 : 1));
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
@@ -2430,7 +2433,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (instr->HasS()) {
// Format(instr, "cmp'cond 'rn, 'shift_rm");
// Format(instr, "cmp'cond 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
+ alu_out = base::SubWithWraparound(rn_val, shifter_operand);
SetNZFlags(alu_out);
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
@@ -2447,7 +2450,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (instr->HasS()) {
// Format(instr, "cmn'cond 'rn, 'shift_rm");
// Format(instr, "cmn'cond 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
+ alu_out = base::AddWithWraparound(rn_val, shifter_operand);
SetNZFlags(alu_out);
SetCFlag(CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
@@ -2937,7 +2940,7 @@ void Simulator::DecodeType3(Instruction* instr) {
} else {
// sbfx - signed bitfield extract.
int32_t rm_val = get_register(instr->RmValue());
- int32_t extr_val = rm_val << (31 - msbit);
+ int32_t extr_val = static_cast<uint32_t>(rm_val) << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
set_register(instr->RdValue(), extr_val);
}
@@ -2969,7 +2972,7 @@ void Simulator::DecodeType3(Instruction* instr) {
return;
} else {
// Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- addr = rn_val + shifter_operand;
+ addr = base::AddWithWraparound(rn_val, shifter_operand);
if (instr->HasW()) {
set_register(rn, addr);
}
@@ -3010,7 +3013,8 @@ void Simulator::DecodeType4(Instruction* instr) {
void Simulator::DecodeType5(Instruction* instr) {
// Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Value() << 2);
+ int off =
+ static_cast<int>(static_cast<uint32_t>(instr->SImmed24Value()) << 2);
intptr_t pc_address = get_pc();
if (instr->HasLink()) {
set_register(lr, pc_address + kInstrSize);
@@ -3259,14 +3263,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn).get_scalar();
double dm_value = get_double_from_d_register(vm).get_scalar();
- double dd_value = dn_value / dm_value;
+ double dd_value = base::Divide(dn_value, dm_value);
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
- float sd_value = sn_value / sm_value;
+ float sd_value = base::Divide(sn_value, sm_value);
div_zero_vfp_flag_ = (sm_value == 0);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -3594,10 +3598,22 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
VFPRoundingMode mode) {
- // TODO(jkummerow): These casts are undefined behavior if the integral
- // part of {val} does not fit into the destination type.
- int32_t result =
- unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
+ int32_t result;
+ if (unsigned_integer) {
+ // The FastD2UI helper does not have the rounding behavior we want here
+ // (it doesn't guarantee any particular rounding, and it doesn't check
+ // for or handle overflow), so do the conversion by hand.
+ using limits = std::numeric_limits<uint32_t>;
+ if (val > limits::max()) {
+ result = limits::max();
+ } else if (!(val >= 0)) { // Negation to catch NaNs.
+ result = 0;
+ } else {
+ result = static_cast<uint32_t>(val);
+ }
+ } else {
+ result = FastD2IChecked(val);
+ }
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
@@ -3617,7 +3633,9 @@ int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
result += val_sign;
} else if (abs_diff == 0.5) {
// Round to even if exactly halfway.
- result = ((result % 2) == 0) ? result : result + val_sign;
+ result = ((result % 2) == 0)
+ ? result
+ : base::AddWithWraparound(result, val_sign);
}
break;
}
@@ -3873,7 +3891,11 @@ void Neg(Simulator* simulator, int Vd, int Vm) {
T src[kElems];
simulator->get_neon_register<T, SIZE>(Vm, src);
for (int i = 0; i < kElems; i++) {
- src[i] = -src[i];
+ if (src[i] != std::numeric_limits<T>::min()) {
+ src[i] = -src[i];
+ } else {
+ // The respective minimum (negative) value maps to itself.
+ }
}
simulator->set_neon_register<T, SIZE>(Vd, src);
}
@@ -3998,6 +4020,17 @@ void Sub(Simulator* simulator, int Vd, int Vm, int Vn) {
simulator->set_neon_register<T, SIZE>(Vd, src1);
}
+namespace {
+uint32_t Multiply(uint32_t a, uint32_t b) { return a * b; }
+uint8_t Multiply(uint8_t a, uint8_t b) { return a * b; }
+// 16-bit integers are special due to C++'s implicit conversion rules.
+// See https://bugs.llvm.org/show_bug.cgi?id=25580.
+uint16_t Multiply(uint16_t a, uint16_t b) {
+ uint32_t result = static_cast<uint32_t>(a) * static_cast<uint32_t>(b);
+ return static_cast<uint16_t>(result);
+}
+} // namespace
+
template <typename T, int SIZE>
void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
static const int kElems = SIZE / sizeof(T);
@@ -4005,7 +4038,7 @@ void Mul(Simulator* simulator, int Vd, int Vm, int Vn) {
simulator->get_neon_register<T, SIZE>(Vn, src1);
simulator->get_neon_register<T, SIZE>(Vm, src2);
for (int i = 0; i < kElems; i++) {
- src1[i] *= src2[i];
+ src1[i] = Multiply(src1[i], src2[i]);
}
simulator->set_neon_register<T, SIZE>(Vd, src1);
}
@@ -4090,7 +4123,8 @@ void ShiftByRegister(Simulator* simulator, int Vd, int Vm, int Vn) {
if (shift_value >= size) {
src[i] = 0;
} else {
- src[i] <<= shift_value;
+ using unsignedT = typename std::make_unsigned<T>::type;
+ src[i] = static_cast<unsignedT>(src[i]) << shift_value;
}
} else {
// If the shift value is greater/equal than size, always end up with -1.
@@ -5721,7 +5755,7 @@ void Simulator::Execute() {
// should be stopping at a particular executed instruction.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
+ icount_ = base::AddWithWraparound(icount_, 1);
InstructionDecode(instr);
program_counter = get_pc();
}
@@ -5730,7 +5764,7 @@ void Simulator::Execute() {
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
+ icount_ = base::AddWithWraparound(icount_, 1);
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
ArmDebugger dbg(this);
dbg.Debug();
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 71fedd5b2f..d3a73cbad8 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -12,6 +12,7 @@
#include <type_traits>
#include "src/base/lazy-instance.h"
+#include "src/base/overflowing-math.h"
#include "src/codegen/arm64/decoder-arm64-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler.h"
@@ -154,6 +155,22 @@ void Simulator::CallImpl(Address entry, CallArgument* args) {
set_sp(original_stack);
}
+#ifdef DEBUG
+namespace {
+int PopLowestIndexAsCode(CPURegList* list) {
+ if (list->IsEmpty()) {
+ return -1;
+ }
+ RegList reg_list = list->list();
+ int index = base::bits::CountTrailingZeros(reg_list);
+ DCHECK((1LL << index) & reg_list);
+ list->Remove(index);
+
+ return index;
+}
+} // namespace
+#endif
+
void Simulator::CheckPCSComplianceAndRun() {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
@@ -171,10 +188,10 @@ void Simulator::CheckPCSComplianceAndRun() {
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
// x31 is not a caller saved register, so no need to specify if we want
// the stack or zero.
- saved_registers[i] = xreg(register_list.PopLowestIndex().code());
+ saved_registers[i] = xreg(PopLowestIndexAsCode(&register_list));
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
- saved_fpregisters[i] = dreg_bits(fpregister_list.PopLowestIndex().code());
+ saved_fpregisters[i] = dreg_bits(PopLowestIndexAsCode(&fpregister_list));
}
int64_t original_stack = sp();
#endif
@@ -186,11 +203,11 @@ void Simulator::CheckPCSComplianceAndRun() {
register_list = kCalleeSaved;
fpregister_list = kCalleeSavedV;
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
- DCHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
+ DCHECK_EQ(saved_registers[i], xreg(PopLowestIndexAsCode(&register_list)));
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
DCHECK(saved_fpregisters[i] ==
- dreg_bits(fpregister_list.PopLowestIndex().code()));
+ dreg_bits(PopLowestIndexAsCode(&fpregister_list)));
}
// Corrupt caller saved register minus the return regiters.
@@ -217,13 +234,13 @@ void Simulator::CheckPCSComplianceAndRun() {
void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
if (list->type() == CPURegister::kRegister) {
while (!list->IsEmpty()) {
- unsigned code = list->PopLowestIndex().code();
+ unsigned code = PopLowestIndexAsCode(list);
set_xreg(code, value | code);
}
} else {
DCHECK_EQ(list->type(), CPURegister::kVRegister);
while (!list->IsEmpty()) {
- unsigned code = list->PopLowestIndex().code();
+ unsigned code = PopLowestIndexAsCode(list);
set_dreg_bits(code, value | code);
}
}
@@ -414,6 +431,34 @@ using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
void* arg2);
+// Separate for fine-grained UBSan blacklisting. Casting any given C++
+// function to {SimulatorRuntimeCall} is undefined behavior; but since
+// the target function can indeed be any function that's exposed via
+// the "fast C call" mechanism, we can't reconstruct its signature here.
+ObjectPair UnsafeGenericFunctionCall(int64_t function, int64_t arg0,
+ int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6,
+ int64_t arg7, int64_t arg8, int64_t arg9) {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(function);
+ return target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+}
+void UnsafeDirectApiCall(int64_t function, int64_t arg0) {
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(function);
+ target(arg0);
+}
+void UnsafeProfilingApiCall(int64_t function, int64_t arg0, void* arg1) {
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(function);
+ target(arg0, arg1);
+}
+void UnsafeDirectGetterCall(int64_t function, int64_t arg0, int64_t arg1) {
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(function);
+ target(arg0, arg1);
+}
+
void Simulator::DoRuntimeCall(Instruction* instr) {
Redirection* redirection = Redirection::FromInstruction(instr);
@@ -515,10 +560,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
", "
"0x%016" PRIx64 ", 0x%016" PRIx64,
arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- ObjectPair result =
- target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ ObjectPair result = UnsafeGenericFunctionCall(
+ external, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
TraceSim("Returned: {%p, %p}\n", reinterpret_cast<void*>(result.x),
reinterpret_cast<void*>(result.y));
#ifdef DEBUG
@@ -532,10 +575,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::DIRECT_API_CALL: {
// void f(v8::FunctionCallbackInfo&)
TraceSim("Type: DIRECT_API_CALL\n");
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
- target(xreg(0));
+ UnsafeDirectApiCall(external, xreg(0));
TraceSim("No return value.");
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -606,11 +647,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::DIRECT_GETTER_CALL: {
// void f(Local<String> property, PropertyCallbackInfo& info)
TraceSim("Type: DIRECT_GETTER_CALL\n");
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n", xreg(0),
xreg(1));
- target(xreg(0), xreg(1));
+ UnsafeDirectGetterCall(external, xreg(0), xreg(1));
TraceSim("No return value.");
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -621,11 +660,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::PROFILING_API_CALL: {
// void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
TraceSim("Type: PROFILING_API_CALL\n");
- SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
void* arg1 = Redirection::ReverseRedirection(xreg(1));
TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
- target(xreg(0), arg1);
+ UnsafeProfilingApiCall(external, xreg(0), arg1);
TraceSim("No return value.");
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
@@ -849,10 +886,12 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
if (amount == 0) {
return value;
}
+ // Larger shift {amount}s would be undefined behavior in C++.
+ DCHECK(amount < sizeof(value) * kBitsPerByte);
switch (shift_type) {
case LSL:
- return value << amount;
+ return static_cast<unsignedT>(value) << amount;
case LSR:
return static_cast<unsignedT>(value) >> amount;
case ASR:
@@ -873,6 +912,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
const unsigned kSignExtendHShift = (sizeof(T) - 2) * 8;
const unsigned kSignExtendWShift = (sizeof(T) - 4) * 8;
+ using unsignedT = typename std::make_unsigned<T>::type;
switch (extend_type) {
case UXTB:
@@ -885,13 +925,19 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
value &= kWordMask;
break;
case SXTB:
- value = (value << kSignExtendBShift) >> kSignExtendBShift;
+ value =
+ static_cast<T>(static_cast<unsignedT>(value) << kSignExtendBShift) >>
+ kSignExtendBShift;
break;
case SXTH:
- value = (value << kSignExtendHShift) >> kSignExtendHShift;
+ value =
+ static_cast<T>(static_cast<unsignedT>(value) << kSignExtendHShift) >>
+ kSignExtendHShift;
break;
case SXTW:
- value = (value << kSignExtendWShift) >> kSignExtendWShift;
+ value =
+ static_cast<T>(static_cast<unsignedT>(value) << kSignExtendWShift) >>
+ kSignExtendWShift;
break;
case UXTX:
case SXTX:
@@ -899,7 +945,7 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
default:
UNREACHABLE();
}
- return value << left_shift;
+ return static_cast<T>(static_cast<unsignedT>(value) << left_shift);
}
template <typename T>
@@ -2283,7 +2329,9 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
break;
case CSNEG_w:
case CSNEG_x:
- new_val = (uint64_t)(-(int64_t)new_val);
+ // Simulate two's complement (instead of casting to signed and negating)
+ // to avoid undefined behavior on signed overflow.
+ new_val = (~new_val) + 1;
break;
default:
UNIMPLEMENTED();
@@ -2446,23 +2494,27 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
switch (instr->Mask(DataProcessing3SourceMask)) {
case MADD_w:
case MADD_x:
- result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ result = base::AddWithWraparound(
+ xreg(instr->Ra()),
+ base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
break;
case MSUB_w:
case MSUB_x:
- result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ result = base::SubWithWraparound(
+ xreg(instr->Ra()),
+ base::MulWithWraparound(xreg(instr->Rn()), xreg(instr->Rm())));
break;
case SMADDL_x:
- result = xreg(instr->Ra()) + (rn_s32 * rm_s32);
+ result = base::AddWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
break;
case SMSUBL_x:
- result = xreg(instr->Ra()) - (rn_s32 * rm_s32);
+ result = base::SubWithWraparound(xreg(instr->Ra()), (rn_s32 * rm_s32));
break;
case UMADDL_x:
- result = xreg(instr->Ra()) + (rn_u32 * rm_u32);
+ result = static_cast<uint64_t>(xreg(instr->Ra())) + (rn_u32 * rm_u32);
break;
case UMSUBL_x:
- result = xreg(instr->Ra()) - (rn_u32 * rm_u32);
+ result = static_cast<uint64_t>(xreg(instr->Ra())) - (rn_u32 * rm_u32);
break;
case SMULH_x:
DCHECK_EQ(instr->Ra(), kZeroRegCode);
@@ -2488,10 +2540,10 @@ void Simulator::BitfieldHelper(Instruction* instr) {
T diff = S - R;
T mask;
if (diff >= 0) {
- mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
+ mask = diff < reg_size - 1 ? (static_cast<unsignedT>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
- uint64_t umask = ((1LL << (S + 1)) - 1);
+ uint64_t umask = ((1ULL << (S + 1)) - 1);
umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size;
@@ -2522,11 +2574,15 @@ void Simulator::BitfieldHelper(Instruction* instr) {
T dst = inzero ? 0 : reg<T>(instr->Rd());
T src = reg<T>(instr->Rn());
// Rotate source bitfield into place.
- T result = (static_cast<unsignedT>(src) >> R) | (src << (reg_size - R));
+ T result = R == 0 ? src
+ : (static_cast<unsignedT>(src) >> R) |
+ (static_cast<unsignedT>(src) << (reg_size - R));
// Determine the sign extension.
- T topbits_preshift = (static_cast<T>(1) << (reg_size - diff - 1)) - 1;
- T signbits = (extend && ((src >> S) & 1) ? topbits_preshift : 0)
- << (diff + 1);
+ T topbits_preshift = (static_cast<unsignedT>(1) << (reg_size - diff - 1)) - 1;
+ T signbits =
+ diff >= reg_size - 1
+ ? 0
+ : ((extend && ((src >> S) & 1) ? topbits_preshift : 0) << (diff + 1));
// Merge sign extension, dest/zero and bitfield.
result = signbits | (result & mask) | (dst & ~mask);
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index 3b334739da..04768a365c 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -553,14 +553,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return WASM_EXIT;
case wasm::WasmCode::kWasmToJsWrapper:
return WASM_TO_JS;
- case wasm::WasmCode::kRuntimeStub:
- // Some stubs, like e.g. {WasmCode::kWasmCompileLazy} build their own
- // specialized frame which already carries a type marker.
- // TODO(mstarzinger): This is only needed for the case where embedded
- // builtins are disabled. It can be removed once all non-embedded
- // builtins are gone.
- if (StackFrame::IsTypeMarker(marker)) break;
- return STUB;
case wasm::WasmCode::kInterpreterEntry:
return WASM_INTERPRETER_ENTRY;
default:
@@ -1079,13 +1071,12 @@ Address StubFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+int StubFrame::LookupExceptionHandlerInTable() {
Code code = LookupCode();
DCHECK(code.is_turbofanned());
DCHECK_EQ(code.kind(), Code::BUILTIN);
HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code.InstructionStart());
- *stack_slots = code.stack_slots();
return table.LookupReturn(pc_offset);
}
@@ -1271,6 +1262,7 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
if (maybe_script.IsScript()) {
Script script = Script::cast(maybe_script);
ic_info.line_num = script.GetLineNumber(source_pos) + 1;
+ ic_info.column_num = script.GetColumnNumber(source_pos);
ic_info.script_name = ic_stats->GetOrCacheScriptName(script);
}
}
@@ -1627,7 +1619,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
}
int OptimizedFrame::LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction) {
+ int* data, HandlerTable::CatchPrediction* prediction) {
// We cannot perform exception prediction on optimized code. Instead, we need
// to use FrameSummary to find the corresponding code offset in unoptimized
// code to perform prediction there.
@@ -1635,7 +1627,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
Code code = LookupCode();
HandlerTable table(code);
int pc_offset = static_cast<int>(pc() - code.InstructionStart());
- if (stack_slots) *stack_slots = code.stack_slots();
+ DCHECK_NULL(data); // Data is not used and will not return a value.
// When the return pc has been replaced by a trampoline there won't be
// a handler for this trampoline. Thus we need to use the return pc that
@@ -1676,8 +1668,8 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
Object OptimizedFrame::receiver() const {
Code code = LookupCode();
if (code.kind() == Code::BUILTIN) {
- Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset;
- intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr);
+ intptr_t argc = static_cast<int>(
+ Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
intptr_t args_size =
(StandardFrameConstants::kFixedSlotCountAboveFp + argc) *
kSystemPointerSize;
@@ -1950,15 +1942,13 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
return !!pos;
}
-int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
- DCHECK_NOT_NULL(stack_slots);
+int WasmCompiledFrame::LookupExceptionHandlerInTable() {
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_size() > 0) {
HandlerTable table(code->handler_table(), code->handler_table_size(),
HandlerTable::kReturnAddressBasedEncoding);
int pc_offset = static_cast<int>(pc() - code->instruction_start());
- *stack_slots = static_cast<int>(code->stack_slots());
return table.LookupReturn(pc_offset);
}
return -1;
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index d1e7a7890d..165ff85464 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -145,7 +145,12 @@ class StackFrame {
intptr_t type = marker >> kSmiTagSize;
// TODO(petermarshall): There is a bug in the arm simulators that causes
// invalid frame markers.
-#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM))
+#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)
+ if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
+ // Appease UBSan.
+ return Type::NUMBER_OF_TYPES;
+ }
+#else
DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES);
#endif
return static_cast<Type>(type);
@@ -733,7 +738,7 @@ class JavaScriptFrame : public StandardFrame {
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns data associated with the handler site specific to the frame type:
- // - OptimizedFrame : Data is the stack slot count of the entire frame.
+ // - OptimizedFrame : Data is not used and will not return a value.
// - InterpretedFrame: Data is the register index holding the context.
virtual int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction);
@@ -783,10 +788,8 @@ class StubFrame : public StandardFrame {
Code unchecked_code() const override;
// Lookup exception handler for current {pc}, returns -1 if none found. Only
- // TurboFan stub frames are supported. Also returns data associated with the
- // handler site:
- // - TurboFan stub: Data is the stack slot count of the entire frame.
- int LookupExceptionHandlerInTable(int* data);
+ // TurboFan stub frames are supported.
+ int LookupExceptionHandlerInTable();
protected:
inline explicit StubFrame(StackFrameIteratorBase* iterator);
@@ -938,9 +941,8 @@ class WasmCompiledFrame : public StandardFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
- // Lookup exception handler for current {pc}, returns -1 if none found. Also
- // returns the stack slot count of the entire frame.
- int LookupExceptionHandlerInTable(int* data);
+ // Lookup exception handler for current {pc}, returns -1 if none found.
+ int LookupExceptionHandlerInTable();
// Determine the code for the frame.
Code unchecked_code() const override;
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index e1b021b921..091b185a30 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -113,61 +113,6 @@ Isolate::ExceptionScope::~ExceptionScope() {
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
-bool Isolate::IsArrayConstructorIntact() {
- Cell array_constructor_cell =
- Cell::cast(root(RootIndex::kArrayConstructorProtector));
- return array_constructor_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector));
- return species_cell.value().IsSmi() &&
- Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
-bool Isolate::IsPromiseSpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector));
- return species_cell.value().IsSmi() &&
- Smi::ToInt(species_cell.value()) == kProtectorValid;
-}
-
-bool Isolate::IsStringLengthOverflowIntact() {
- Cell string_length_cell = Cell::cast(root(RootIndex::kStringLengthProtector));
- return string_length_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsArrayBufferDetachingIntact() {
- PropertyCell buffer_detaching =
- PropertyCell::cast(root(RootIndex::kArrayBufferDetachingProtector));
- return buffer_detaching.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsArrayIteratorLookupChainIntact() {
- PropertyCell array_iterator_cell =
- PropertyCell::cast(root(RootIndex::kArrayIteratorProtector));
- return array_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsMapIteratorLookupChainIntact() {
- PropertyCell map_iterator_cell =
- PropertyCell::cast(root(RootIndex::kMapIteratorProtector));
- return map_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsSetIteratorLookupChainIntact() {
- PropertyCell set_iterator_cell =
- PropertyCell::cast(root(RootIndex::kSetIteratorProtector));
- return set_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
-bool Isolate::IsStringIteratorLookupChainIntact() {
- PropertyCell string_iterator_cell =
- PropertyCell::cast(root(RootIndex::kStringIteratorProtector));
- return string_iterator_cell.value() == Smi::FromInt(kProtectorValid);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 3ba39562b1..2b6bb76d8a 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -15,7 +15,6 @@
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
-#include "src/base/adapters.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
@@ -36,6 +35,7 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/messages.h"
#include "src/execution/microtask-queue.h"
+#include "src/execution/protectors-inl.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/execution/v8threads.h"
@@ -51,6 +51,7 @@
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/backing-store.h"
#include "src/objects/elements.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
@@ -320,7 +321,9 @@ Isolate::FindOrAllocatePerThreadDataForThisThread() {
base::MutexGuard lock_guard(&thread_data_table_mutex_);
per_thread = thread_data_table_.Lookup(thread_id);
if (per_thread == nullptr) {
- base::OS::AdjustSchedulingParams();
+ if (FLAG_adjust_os_scheduling_parameters) {
+ base::OS::AdjustSchedulingParams();
+ }
per_thread = new PerIsolateThreadData(this, thread_id);
thread_data_table_.Insert(per_thread);
}
@@ -1091,12 +1094,14 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
} else {
Handle<JSAsyncGeneratorObject> async_generator_object =
Handle<JSAsyncGeneratorObject>::cast(generator_object);
- Handle<AsyncGeneratorRequest> async_generator_request(
- AsyncGeneratorRequest::cast(async_generator_object->queue()),
- isolate);
- Handle<JSPromise> promise(
- JSPromise::cast(async_generator_request->promise()), isolate);
- CaptureAsyncStackTrace(isolate, promise, &builder);
+ Handle<Object> queue(async_generator_object->queue(), isolate);
+ if (!queue->IsUndefined(isolate)) {
+ Handle<AsyncGeneratorRequest> async_generator_request =
+ Handle<AsyncGeneratorRequest>::cast(queue);
+ Handle<JSPromise> promise(
+ JSPromise::cast(async_generator_request->promise()), isolate);
+ CaptureAsyncStackTrace(isolate, promise, &builder);
+ }
}
}
} else {
@@ -1701,22 +1706,20 @@ Object Isolate::UnwindAndFindHandler() {
// currently being executed.
wasm::WasmCodeRefScope code_ref_scope;
WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
- int stack_slots = 0; // Will contain stack slot count of frame.
- int offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
+ wasm::WasmCode* wasm_code =
+ wasm_engine()->code_manager()->LookupCode(frame->pc());
+ int offset = wasm_frame->LookupExceptionHandlerInTable();
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures that
// argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kSystemPointerSize;
+ wasm_code->stack_slots() * kSystemPointerSize;
// This is going to be handled by Wasm, so we need to set the TLS flag
// again. It was cleared above assuming the frame would be unwound.
trap_handler::SetThreadInWasm();
- // Gather information from the frame.
- wasm::WasmCode* wasm_code =
- wasm_engine()->code_manager()->LookupCode(frame->pc());
return FoundHandler(Context(), wasm_code->instruction_start(), offset,
wasm_code->constant_pool(), return_sp, frame->fp());
}
@@ -1735,18 +1738,14 @@ Object Isolate::UnwindAndFindHandler() {
// For optimized frames we perform a lookup in the handler table.
if (!catchable_by_js) break;
OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
- int stack_slots = 0; // Will contain stack slot count of frame.
- int offset =
- js_frame->LookupExceptionHandlerInTable(&stack_slots, nullptr);
+ Code code = frame->LookupCode();
+ int offset = js_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kSystemPointerSize;
-
- // Gather information from the frame.
- Code code = frame->LookupCode();
+ code.stack_slots() * kSystemPointerSize;
// TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
// but do not have a code kind of OPTIMIZED_FUNCTION.
@@ -1767,31 +1766,24 @@ Object Isolate::UnwindAndFindHandler() {
// Some stubs are able to handle exceptions.
if (!catchable_by_js) break;
StubFrame* stub_frame = static_cast<StubFrame*>(frame);
+#ifdef DEBUG
wasm::WasmCodeRefScope code_ref_scope;
- wasm::WasmCode* wasm_code =
- wasm_engine()->code_manager()->LookupCode(frame->pc());
- if (wasm_code != nullptr) {
- // It is safe to skip Wasm runtime stubs as none of them contain local
- // exception handlers.
- CHECK_EQ(wasm::WasmCode::kRuntimeStub, wasm_code->kind());
- CHECK_EQ(0, wasm_code->handler_table_size());
- break;
- }
+ DCHECK_NULL(wasm_engine()->code_manager()->LookupCode(frame->pc()));
+#endif // DEBUG
Code code = stub_frame->LookupCode();
if (!code.IsCode() || code.kind() != Code::BUILTIN ||
!code.has_handler_table() || !code.is_turbofanned()) {
break;
}
- int stack_slots = 0; // Will contain stack slot count of frame.
- int offset = stub_frame->LookupExceptionHandlerInTable(&stack_slots);
+ int offset = stub_frame->LookupExceptionHandlerInTable();
if (offset < 0) break;
// Compute the stack pointer from the frame pointer. This ensures
// that argument slots on the stack are dropped as returning would.
Address return_sp = frame->fp() +
StandardFrameConstants::kFixedFrameSizeAboveFp -
- stack_slots * kSystemPointerSize;
+ code.stack_slots() * kSystemPointerSize;
return FoundHandler(Context(), code.InstructionStart(), offset,
code.constant_pool(), return_sp, frame->fp());
@@ -2063,7 +2055,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
for (int i = 0; i < frames->length(); ++i) {
Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
- SerializeStackTraceFrame(this, frame, builder);
+ SerializeStackTraceFrame(this, frame, &builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2821,7 +2813,7 @@ Isolate* Isolate::New(IsolateAllocationMode mode) {
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
- base::make_unique<IsolateAllocator>(mode);
+ std::make_unique<IsolateAllocator>(mode);
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
@@ -2986,7 +2978,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_engine()->memory_tracker()->DeleteSharedMemoryObjectsOnIsolate(this);
+ BackingStore::RemoveSharedWasmMemoryObjects(this);
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
@@ -3805,308 +3797,12 @@ bool Isolate::IsInAnyContext(Object object, uint32_t index) {
return false;
}
-bool Isolate::IsNoElementsProtectorIntact(Context context) {
- PropertyCell no_elements_cell = heap()->no_elements_protector();
- bool cell_reports_intact =
- no_elements_cell.value().IsSmi() &&
- Smi::ToInt(no_elements_cell.value()) == kProtectorValid;
-
-#ifdef DEBUG
- Context native_context = context.native_context();
-
- Map root_array_map =
- native_context.GetInitialJSArrayMap(GetInitialFastElementsKind());
- JSObject initial_array_proto = JSObject::cast(
- native_context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- JSObject initial_object_proto = JSObject::cast(
- native_context.get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX));
- JSObject initial_string_proto = JSObject::cast(
- native_context.get(Context::INITIAL_STRING_PROTOTYPE_INDEX));
-
- if (root_array_map.is_null() || initial_array_proto == initial_object_proto) {
- // We are in the bootstrapping process, and the entire check sequence
- // shouldn't be performed.
- return cell_reports_intact;
- }
-
- // Check that the array prototype hasn't been altered WRT empty elements.
- if (root_array_map.prototype() != initial_array_proto) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- FixedArrayBase elements = initial_array_proto.elements();
- ReadOnlyRoots roots(heap());
- if (elements != roots.empty_fixed_array() &&
- elements != roots.empty_slow_element_dictionary()) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- // Check that the Object.prototype hasn't been altered WRT empty elements.
- elements = initial_object_proto.elements();
- if (elements != roots.empty_fixed_array() &&
- elements != roots.empty_slow_element_dictionary()) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- // Check that the Array.prototype has the Object.prototype as its
- // [[Prototype]] and that the Object.prototype has a null [[Prototype]].
- PrototypeIterator iter(this, initial_array_proto);
- if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) {
- DCHECK_EQ(false, cell_reports_intact);
- DCHECK(!has_pending_exception());
- return cell_reports_intact;
- }
- iter.Advance();
- if (!iter.IsAtEnd()) {
- DCHECK_EQ(false, cell_reports_intact);
- DCHECK(!has_pending_exception());
- return cell_reports_intact;
- }
- DCHECK(!has_pending_exception());
-
- // Check that the String.prototype hasn't been altered WRT empty elements.
- elements = initial_string_proto.elements();
- if (elements != roots.empty_fixed_array() &&
- elements != roots.empty_slow_element_dictionary()) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-
- // Check that the String.prototype has the Object.prototype
- // as its [[Prototype]] still.
- if (initial_string_proto.map().prototype() != initial_object_proto) {
- DCHECK_EQ(false, cell_reports_intact);
- return cell_reports_intact;
- }
-#endif
-
- return cell_reports_intact;
-}
-
-bool Isolate::IsNoElementsProtectorIntact() {
- return Isolate::IsNoElementsProtectorIntact(context());
-}
-
-bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
- Cell is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
- bool is_is_concat_spreadable_set =
- Smi::ToInt(is_concat_spreadable_cell.value()) == kProtectorInvalid;
-#ifdef DEBUG
- Map root_array_map =
- raw_native_context().GetInitialJSArrayMap(GetInitialFastElementsKind());
- if (root_array_map.is_null()) {
- // Ignore the value of is_concat_spreadable during bootstrap.
- return !is_is_concat_spreadable_set;
- }
- Handle<Object> array_prototype(array_function()->prototype(), this);
- Handle<Symbol> key = factory()->is_concat_spreadable_symbol();
- Handle<Object> value;
- LookupIterator it(this, array_prototype, key);
- if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined(this)) {
- // TODO(cbruni): Currently we do not revert if we unset the
- // @@isConcatSpreadable property on Array.prototype or Object.prototype
- // hence the reverse implication doesn't hold.
- DCHECK(is_is_concat_spreadable_set);
- return false;
- }
-#endif // DEBUG
-
- return !is_is_concat_spreadable_set;
-}
-
-bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver) {
- if (!IsIsConcatSpreadableLookupChainIntact()) return false;
- return !receiver.HasProxyInPrototype(this);
-}
-
-bool Isolate::IsPromiseHookProtectorIntact() {
- PropertyCell promise_hook_cell = heap()->promise_hook_protector();
- bool is_promise_hook_protector_intact =
- Smi::ToInt(promise_hook_cell.value()) == kProtectorValid;
- DCHECK_IMPLIES(is_promise_hook_protector_intact,
- !promise_hook_or_async_event_delegate_);
- DCHECK_IMPLIES(is_promise_hook_protector_intact,
- !promise_hook_or_debug_is_active_or_async_event_delegate_);
- return is_promise_hook_protector_intact;
-}
-
-bool Isolate::IsPromiseResolveLookupChainIntact() {
- Cell promise_resolve_cell = heap()->promise_resolve_protector();
- bool is_promise_resolve_protector_intact =
- Smi::ToInt(promise_resolve_cell.value()) == kProtectorValid;
- return is_promise_resolve_protector_intact;
-}
-
-bool Isolate::IsPromiseThenLookupChainIntact() {
- PropertyCell promise_then_cell = heap()->promise_then_protector();
- bool is_promise_then_protector_intact =
- Smi::ToInt(promise_then_cell.value()) == kProtectorValid;
- return is_promise_then_protector_intact;
-}
-
-bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) {
- DisallowHeapAllocation no_gc;
- if (!receiver->IsJSPromise()) return false;
- if (!IsInAnyContext(receiver->map().prototype(),
- Context::PROMISE_PROTOTYPE_INDEX)) {
- return false;
- }
- return IsPromiseThenLookupChainIntact();
-}
-
void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->map().is_prototype_map()) return;
- if (!IsNoElementsProtectorIntact()) return;
+ if (!Protectors::IsNoElementsIntact(this)) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
- PropertyCell::SetValueWithInvalidation(
- this, "no_elements_protector", factory()->no_elements_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
-}
-
-void Isolate::TraceProtectorInvalidation(const char* protector_name) {
- static constexpr char kInvalidateProtectorTracingCategory[] =
- "V8.InvalidateProtector";
- static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
-
- DCHECK(FLAG_trace_protector_invalidation);
-
- // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
- i::PrintF("Invalidating protector cell %s in isolate %p\n", protector_name,
- this);
- TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
- TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
- protector_name);
-}
-
-void Isolate::InvalidateIsConcatSpreadableProtector() {
- DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi());
- DCHECK(IsIsConcatSpreadableLookupChainIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("is_concat_spreadable_protector");
- }
- factory()->is_concat_spreadable_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsIsConcatSpreadableLookupChainIntact());
-}
-
-void Isolate::InvalidateArrayConstructorProtector() {
- DCHECK(factory()->array_constructor_protector()->value().IsSmi());
- DCHECK(IsArrayConstructorIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("array_constructor_protector");
- }
- factory()->array_constructor_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsArrayConstructorIntact());
-}
-
-void Isolate::InvalidateTypedArraySpeciesProtector() {
- DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
- DCHECK(IsTypedArraySpeciesLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "typed_array_species_protector",
- factory()->typed_array_species_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsTypedArraySpeciesLookupChainIntact());
-}
-
-void Isolate::InvalidatePromiseSpeciesProtector() {
- DCHECK(factory()->promise_species_protector()->value().IsSmi());
- DCHECK(IsPromiseSpeciesLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "promise_species_protector", factory()->promise_species_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsPromiseSpeciesLookupChainIntact());
-}
-
-void Isolate::InvalidateStringLengthOverflowProtector() {
- DCHECK(factory()->string_length_protector()->value().IsSmi());
- DCHECK(IsStringLengthOverflowIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("string_length_protector");
- }
- factory()->string_length_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsStringLengthOverflowIntact());
-}
-
-void Isolate::InvalidateArrayIteratorProtector() {
- DCHECK(factory()->array_iterator_protector()->value().IsSmi());
- DCHECK(IsArrayIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "array_iterator_protector", factory()->array_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsArrayIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateMapIteratorProtector() {
- DCHECK(factory()->map_iterator_protector()->value().IsSmi());
- DCHECK(IsMapIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "map_iterator_protector", factory()->map_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsMapIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateSetIteratorProtector() {
- DCHECK(factory()->set_iterator_protector()->value().IsSmi());
- DCHECK(IsSetIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "set_iterator_protector", factory()->set_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsSetIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateStringIteratorProtector() {
- DCHECK(factory()->string_iterator_protector()->value().IsSmi());
- DCHECK(IsStringIteratorLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "string_iterator_protector", factory()->string_iterator_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsStringIteratorLookupChainIntact());
-}
-
-void Isolate::InvalidateArrayBufferDetachingProtector() {
- DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi());
- DCHECK(IsArrayBufferDetachingIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "array_buffer_detaching_protector",
- factory()->array_buffer_detaching_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsArrayBufferDetachingIntact());
-}
-
-void Isolate::InvalidatePromiseHookProtector() {
- DCHECK(factory()->promise_hook_protector()->value().IsSmi());
- DCHECK(IsPromiseHookProtectorIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "promise_hook_protector", factory()->promise_hook_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsPromiseHookProtectorIntact());
-}
-
-void Isolate::InvalidatePromiseResolveProtector() {
- DCHECK(factory()->promise_resolve_protector()->value().IsSmi());
- DCHECK(IsPromiseResolveLookupChainIntact());
- if (FLAG_trace_protector_invalidation) {
- TraceProtectorInvalidation("promise_resolve_protector");
- }
- factory()->promise_resolve_protector()->set_value(
- Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsPromiseResolveLookupChainIntact());
-}
-
-void Isolate::InvalidatePromiseThenProtector() {
- DCHECK(factory()->promise_then_protector()->value().IsSmi());
- DCHECK(IsPromiseThenLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- this, "promise_then_protector", factory()->promise_then_protector(),
- handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsPromiseThenLookupChainIntact());
+ Protectors::InvalidateNoElements(this);
}
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
@@ -4256,9 +3952,9 @@ void Isolate::PromiseHookStateUpdated() {
bool promise_hook_or_debug_is_active_or_async_event_delegate =
promise_hook_or_async_event_delegate || debug()->is_active();
if (promise_hook_or_debug_is_active_or_async_event_delegate &&
- IsPromiseHookProtectorIntact()) {
+ Protectors::IsPromiseHookIntact(this)) {
HandleScope scope(this);
- InvalidatePromiseHookProtector();
+ Protectors::InvalidatePromiseHook(this);
}
promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate;
promise_hook_or_debug_is_active_or_async_event_delegate_ =
@@ -4584,6 +4280,15 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
heap()->set_detached_contexts(*detached_contexts);
}
+void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
+ HandleScope scope(this);
+ Handle<WeakArrayList> shared_wasm_memories =
+ factory()->shared_wasm_memories();
+ shared_wasm_memories = WeakArrayList::AddToEnd(
+ this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object));
+ heap()->set_shared_wasm_memories(*shared_wasm_memories);
+}
+
void Isolate::CheckDetachedContextsAfterGC() {
HandleScope scope(this);
Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 4eadb42438..20aea6066c 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -1163,87 +1163,8 @@ class Isolate final : private HiddenFactory {
#endif // V8_INTL_SUPPORT
- static const int kProtectorValid = 1;
- static const int kProtectorInvalid = 0;
-
- inline bool IsArrayConstructorIntact();
-
- // The version with an explicit context parameter can be used when
- // Isolate::context is not set up, e.g. when calling directly into C++ from
- // CSA.
- bool IsNoElementsProtectorIntact(Context context);
- V8_EXPORT_PRIVATE bool IsNoElementsProtectorIntact();
-
bool IsArrayOrObjectOrStringPrototype(Object object);
- inline bool IsTypedArraySpeciesLookupChainIntact();
-
- // Check that the @@species protector is intact, which guards the lookup of
- // "constructor" on JSPromise instances, whose [[Prototype]] is the initial
- // %PromisePrototype%, and the Symbol.species lookup on the
- // %PromisePrototype%.
- inline bool IsPromiseSpeciesLookupChainIntact();
-
- bool IsIsConcatSpreadableLookupChainIntact();
- bool IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver);
- inline bool IsStringLengthOverflowIntact();
- inline bool IsArrayIteratorLookupChainIntact();
-
- // The MapIterator protector protects the original iteration behaviors of
- // Map.prototype.keys(), Map.prototype.values(), and Set.prototype.entries().
- // It does not protect the original iteration behavior of
- // Map.prototype[Symbol.iterator](). The protector is invalidated when:
- // * The 'next' property is set on an object where the property holder is the
- // %MapIteratorPrototype% (e.g. because the object is that very prototype).
- // * The 'Symbol.iterator' property is set on an object where the property
- // holder is the %IteratorPrototype%. Note that this also invalidates the
- // SetIterator protector (see below).
- inline bool IsMapIteratorLookupChainIntact();
-
- // The SetIterator protector protects the original iteration behavior of
- // Set.prototype.keys(), Set.prototype.values(), Set.prototype.entries(),
- // and Set.prototype[Symbol.iterator](). The protector is invalidated when:
- // * The 'next' property is set on an object where the property holder is the
- // %SetIteratorPrototype% (e.g. because the object is that very prototype).
- // * The 'Symbol.iterator' property is set on an object where the property
- // holder is the %SetPrototype% OR %IteratorPrototype%. This means that
- // setting Symbol.iterator on a MapIterator object can also invalidate the
- // SetIterator protector, and vice versa, setting Symbol.iterator on a
- // SetIterator object can also invalidate the MapIterator. This is an over-
- // approximation for the sake of simplicity.
- inline bool IsSetIteratorLookupChainIntact();
-
- // The StringIteratorProtector protects the original string iteration behavior
- // for primitive strings. As long as the StringIteratorProtector is valid,
- // iterating over a primitive string is guaranteed to be unobservable from
- // user code and can thus be cut short. More specifically, the protector gets
- // invalidated as soon as either String.prototype[Symbol.iterator] or
- // String.prototype[Symbol.iterator]().next is modified. This guarantee does
- // not apply to string objects (as opposed to primitives), since they could
- // define their own Symbol.iterator.
- // String.prototype itself does not need to be protected, since it is
- // non-configurable and non-writable.
- inline bool IsStringIteratorLookupChainIntact();
-
- // Make sure we do check for detached array buffers.
- inline bool IsArrayBufferDetachingIntact();
-
- // Disable promise optimizations if promise (debug) hooks have ever been
- // active, because those can observe promises.
- bool IsPromiseHookProtectorIntact();
-
- // Make sure a lookup of "resolve" on the %Promise% intrinsic object
- // yeidls the initial Promise.resolve method.
- bool IsPromiseResolveLookupChainIntact();
-
- // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
- // initial %PromisePrototype% yields the initial method. In addition this
- // protector also guards the negative lookup of "then" on the intrinsic
- // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
- // undefined without triggering any side-effects.
- bool IsPromiseThenLookupChainIntact();
- bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
-
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
// object prototype. Also ensure that changes to prototype chain between
@@ -1259,24 +1180,6 @@ class Isolate final : private HiddenFactory {
UpdateNoElementsProtectorOnSetElement(object);
}
- // The `protector_name` C string must be statically allocated.
- void TraceProtectorInvalidation(const char* protector_name);
-
- void InvalidateArrayConstructorProtector();
- void InvalidateTypedArraySpeciesProtector();
- void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
- void InvalidatePromiseSpeciesProtector();
- void InvalidateIsConcatSpreadableProtector();
- void InvalidateStringLengthOverflowProtector();
- void InvalidateArrayIteratorProtector();
- void InvalidateMapIteratorProtector();
- void InvalidateSetIteratorProtector();
- void InvalidateStringIteratorProtector();
- void InvalidateArrayBufferDetachingProtector();
- V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
- void InvalidatePromiseResolveProtector();
- void InvalidatePromiseThenProtector();
-
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1406,6 +1309,8 @@ class Isolate final : private HiddenFactory {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
+ void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
+
std::vector<Object>* partial_snapshot_cache() {
return &partial_snapshot_cache_;
}
@@ -1513,6 +1418,11 @@ class Isolate final : private HiddenFactory {
bool HasPrepareStackTraceCallback() const;
void SetAddCrashKeyCallback(AddCrashKeyCallback callback);
+ void AddCrashKey(CrashKeyId id, const std::string& value) {
+ if (add_crash_key_callback_) {
+ add_crash_key_callback_(id, value);
+ }
+ }
void SetRAILMode(RAILMode rail_mode);
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index 63d1e2be1f..96fb94cd4e 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -603,7 +603,7 @@ int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
int WasmStackFrame::GetModuleOffset() const {
const int function_offset =
- wasm_instance_->module_object().GetFunctionOffset(wasm_func_index_);
+ GetWasmFunctionOffset(wasm_instance_->module(), wasm_func_index_);
return function_offset + GetPosition();
}
@@ -631,7 +631,7 @@ Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
- // TODO(clemensh): Return lazily created JSFunction.
+ // TODO(clemensb): Return lazily created JSFunction.
return Null();
}
@@ -894,7 +894,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
isolate);
- SerializeStackTraceFrame(isolate, frame, builder);
+ SerializeStackTraceFrame(isolate, frame, &builder);
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
diff --git a/deps/v8/src/execution/protectors.cc b/deps/v8/src/execution/protectors.cc
index 3ac07eede3..b5b4c47a1b 100644
--- a/deps/v8/src/execution/protectors.cc
+++ b/deps/v8/src/execution/protectors.cc
@@ -16,12 +16,32 @@
namespace v8 {
namespace internal {
+namespace {
+void TraceProtectorInvalidation(const char* protector_name) {
+ DCHECK(FLAG_trace_protector_invalidation);
+ static constexpr char kInvalidateProtectorTracingCategory[] =
+ "V8.InvalidateProtector";
+ static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
+
+ DCHECK(FLAG_trace_protector_invalidation);
+
+ // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
+ i::PrintF("Invalidating protector cell %s", protector_name);
+ TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
+ TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
+ protector_name);
+}
+} // namespace
+
#define INVALIDATE_PROTECTOR_ON_NATIVE_CONTEXT_DEFINITION(name, cell) \
void Protectors::Invalidate##name(Isolate* isolate, \
Handle<NativeContext> native_context) { \
DCHECK_EQ(*native_context, isolate->raw_native_context()); \
DCHECK(native_context->cell().value().IsSmi()); \
DCHECK(Is##name##Intact(native_context)); \
+ if (FLAG_trace_protector_invalidation) { \
+ TraceProtectorInvalidation(#name); \
+ } \
Handle<PropertyCell> species_cell(native_context->cell(), isolate); \
PropertyCell::SetValueWithInvalidation( \
isolate, #cell, species_cell, \
@@ -36,6 +56,9 @@ DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(
void Protectors::Invalidate##name(Isolate* isolate) { \
DCHECK(isolate->factory()->cell()->value().IsSmi()); \
DCHECK(Is##name##Intact(isolate)); \
+ if (FLAG_trace_protector_invalidation) { \
+ TraceProtectorInvalidation(#name); \
+ } \
PropertyCell::SetValueWithInvalidation( \
isolate, #cell, isolate->factory()->cell(), \
handle(Smi::FromInt(kProtectorInvalid), isolate)); \
diff --git a/deps/v8/src/execution/protectors.h b/deps/v8/src/execution/protectors.h
index 5c54613bb1..4601f16cf0 100644
--- a/deps/v8/src/execution/protectors.h
+++ b/deps/v8/src/execution/protectors.h
@@ -18,19 +18,82 @@ class Protectors : public AllStatic {
#define DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(V) \
V(RegExpSpeciesLookupChainProtector, regexp_species_protector)
-#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
- V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector)
+#define DECLARED_PROTECTORS_ON_ISOLATE(V) \
+ V(ArrayBufferDetaching, ArrayBufferDetachingProtector, \
+ array_buffer_detaching_protector) \
+ V(ArrayConstructor, ArrayConstructorProtector, array_constructor_protector) \
+ V(ArrayIteratorLookupChain, ArrayIteratorProtector, \
+ array_iterator_protector) \
+ V(ArraySpeciesLookupChain, ArraySpeciesProtector, array_species_protector) \
+ V(IsConcatSpreadableLookupChain, IsConcatSpreadableProtector, \
+ is_concat_spreadable_protector) \
+ V(NoElements, NoElementsProtector, no_elements_protector) \
+ \
+ /* The MapIterator protector protects the original iteration behaviors */ \
+ /* of Map.prototype.keys(), Map.prototype.values(), and */ \
+ /* Set.prototype.entries(). It does not protect the original iteration */ \
+ /* behavior of Map.prototype[Symbol.iterator](). */ \
+ /* The protector is invalidated when: */ \
+ /* * The 'next' property is set on an object where the property holder */ \
+ /* is the %MapIteratorPrototype% (e.g. because the object is that very */ \
+ /* prototype). */ \
+ /* * The 'Symbol.iterator' property is set on an object where the */ \
+ /* property holder is the %IteratorPrototype%. Note that this also */ \
+ /* invalidates the SetIterator protector (see below). */ \
+ V(MapIteratorLookupChain, MapIteratorProtector, map_iterator_protector) \
+ V(PromiseHook, PromiseHookProtector, promise_hook_protector) \
+ V(PromiseThenLookupChain, PromiseThenProtector, promise_then_protector) \
+ V(PromiseResolveLookupChain, PromiseResolveProtector, \
+ promise_resolve_protector) \
+ V(PromiseSpeciesLookupChain, PromiseSpeciesProtector, \
+ promise_species_protector) \
+ \
+ /* The SetIterator protector protects the original iteration behavior of */ \
+ /* Set.prototype.keys(), Set.prototype.values(), */ \
+ /* Set.prototype.entries(), and Set.prototype[Symbol.iterator](). The */ \
+ /* protector is invalidated when: */ \
+ /* * The 'next' property is set on an object where the property holder */ \
+ /* is the %SetIteratorPrototype% (e.g. because the object is that very */ \
+ /* prototype). */ \
+ /* * The 'Symbol.iterator' property is set on an object where the */ \
+ /* property holder is the %SetPrototype% OR %IteratorPrototype%. This */ \
+ /* means that setting Symbol.iterator on a MapIterator object can also */ \
+ /* invalidate the SetIterator protector, and vice versa, setting */ \
+ /* Symbol.iterator on a SetIterator object can also invalidate the */ \
+ /* MapIterator. This is an over-approximation for the sake of */ \
+ /* simplicity. */ \
+ V(SetIteratorLookupChain, SetIteratorProtector, set_iterator_protector) \
+ \
+ /* The StringIteratorProtector protects the original string iteration */ \
+ /* behavior for primitive strings. As long as the */ \
+ /* StringIteratorProtector is valid, iterating over a primitive string */ \
+ /* is guaranteed to be unobservable from user code and can thus be cut */ \
+ /* short. More specifically, the protector gets invalidated as soon as */ \
+ /* either String.prototype[Symbol.iterator] or */ \
+ /* String.prototype[Symbol.iterator]().next is modified. This guarantee */ \
+ /* does not apply to string objects (as opposed to primitives), since */ \
+ /* they could define their own Symbol.iterator. */ \
+ /* String.prototype itself does not need to be protected, since it is */ \
+ /* non-configurable and non-writable. */ \
+ V(StringIteratorLookupChain, StringIteratorProtector, \
+ string_iterator_protector) \
+ V(StringLengthOverflowLookupChain, StringLengthProtector, \
+ string_length_protector) \
+ V(TypedArraySpeciesLookupChain, TypedArraySpeciesProtector, \
+ typed_array_species_protector)
+
+#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \
+ V8_EXPORT_PRIVATE static inline bool Is##name##Intact( \
+ Handle<NativeContext> native_context); \
+ V8_EXPORT_PRIVATE static void Invalidate##name( \
+ Isolate* isolate, Handle<NativeContext> native_context);
-#define DECLARE_PROTECTOR_ON_NATIVE_CONTEXT(name, unused_cell) \
- static inline bool Is##name##Intact(Handle<NativeContext> native_context); \
- static void Invalidate##name(Isolate* isolate, \
- Handle<NativeContext> native_context);
DECLARED_PROTECTORS_ON_NATIVE_CONTEXT(DECLARE_PROTECTOR_ON_NATIVE_CONTEXT)
#undef DECLARE_PROTECTOR_ON_NATIVE_CONTEXT
#define DECLARE_PROTECTOR_ON_ISOLATE(name, unused_root_index, unused_cell) \
- static inline bool Is##name##Intact(Isolate* isolate); \
- static void Invalidate##name(Isolate* isolate);
+ V8_EXPORT_PRIVATE static inline bool Is##name##Intact(Isolate* isolate); \
+ V8_EXPORT_PRIVATE static void Invalidate##name(Isolate* isolate);
DECLARED_PROTECTORS_ON_ISOLATE(DECLARE_PROTECTOR_ON_ISOLATE)
#undef DECLARE_PROTECTOR_ON_ISOLATE
diff --git a/deps/v8/src/execution/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index a48a78fd42..34ae136aad 100644
--- a/deps/v8/src/execution/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -14,7 +14,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
static constexpr int kCallerFPOffset =
- -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+ -(StandardFrameConstants::kFixedFrameSizeFromFp + kSystemPointerSize);
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgvOffset = 20 * kSystemPointerSize;
};
@@ -25,13 +25,13 @@ class ExitFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1);
// The caller fields are below the frame pointer on the stack.
- static constexpr int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kSystemPointerSize;
// The calling JS function is below FP.
- static constexpr int kCallerPCOffset = 1 * kPointerSize;
+ static constexpr int kCallerPCOffset = 1 * kSystemPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = 2 * kSystemPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
@@ -47,7 +47,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
- kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
@@ -56,13 +56,13 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static constexpr int kLocal0Offset =
StandardFrameConstants::kExpressionsOffset;
- static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kLastParameterOffset = +2 * kSystemPointerSize;
static constexpr int kFunctionOffset =
StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static constexpr int kParam0Offset = -2 * kPointerSize;
- static constexpr int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kSystemPointerSize;
+ static constexpr int kReceiverOffset = -1 * kSystemPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 9f98f2039b..9c5cae7e97 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -115,15 +115,26 @@ class GeneratedCode {
#ifdef USE_SIMULATOR
// Defined in simulator-base.h.
Return Call(Args... args) {
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
return Simulator::current(isolate_)->template Call<Return>(
reinterpret_cast<Address>(fn_ptr_), args...);
}
- DISABLE_CFI_ICALL Return CallIrregexp(Args... args) { return Call(args...); }
+ DISABLE_CFI_ICALL Return CallIrregexp(Args... args) {
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ return Call(args...);
+ }
#else
DISABLE_CFI_ICALL Return Call(Args... args) {
// When running without a simulator we call the entry directly.
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
#if V8_OS_AIX
// AIX ABI requires function descriptors (FD). Artificially create a pseudo
// FD to ensure correct dispatch to generated code. The 'volatile'
@@ -141,6 +152,9 @@ class GeneratedCode {
DISABLE_CFI_ICALL Return CallIrregexp(Args... args) {
// When running without a simulator we call the entry directly.
+#if defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
+ FATAL("Generated code execution not possible during cross-compilation.");
+#endif // defined(V8_TARGET_OS_WIN) && !defined(V8_OS_WIN)
return fn_ptr_(args...);
}
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
index 1cf4c4605a..d37327f1c3 100644
--- a/deps/v8/src/execution/stack-guard.cc
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -10,6 +10,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/logging/counters.h"
+#include "src/objects/backing-store.h"
#include "src/roots/roots-inl.h"
#include "src/utils/memcopy.h"
#include "src/wasm/wasm-engine.h"
@@ -86,6 +87,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
current->intercepted_flags_ &= ~scope->intercept_mask_;
}
thread_local_.interrupt_flags_ |= restored_flags;
+
+ if (has_pending_interrupts(access)) set_interrupt_limits(access);
}
if (!has_pending_interrupts(access)) reset_limits(access);
// Add scope to the chain.
@@ -271,8 +274,7 @@ Object StackGuard::HandleInterrupts() {
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"V8.WasmGrowSharedMemory");
- isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
- isolate_);
+ BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
}
if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
@@ -305,8 +307,6 @@ Object StackGuard::HandleInterrupts() {
}
isolate_->counters()->stack_interrupts()->Increment();
- isolate_->counters()->runtime_profiler_ticks()->Increment();
- isolate_->runtime_profiler()->MarkCandidatesForOptimization();
return ReadOnlyRoots(isolate_).undefined_value();
}
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index 5af35b1b3b..49d69829f0 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -13,7 +13,7 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
-#ifdef _WIN64
+#ifdef V8_TARGET_OS_WIN
static constexpr int kCalleeSaveXMMRegisters = 10;
static constexpr int kXMMRegisterSize = 16;
static constexpr int kXMMRegistersBlockSize =