summaryrefslogtreecommitdiff
path: root/deps/v8/src/assembler.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/assembler.cc')
-rw-r--r--deps/v8/src/assembler.cc317
1 files changed, 301 insertions, 16 deletions
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index d81d4ae614..8536ca006f 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -91,6 +91,7 @@ namespace internal {
struct DoubleConstant BASE_EMBEDDED {
double min_int;
double one_half;
+ double minus_one_half;
double minus_zero;
double zero;
double uint8_max_value;
@@ -103,18 +104,110 @@ static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
+static bool math_exp_data_initialized = false;
+static Mutex* math_exp_data_mutex = NULL;
+static double* math_exp_constants_array = NULL;
+static double* math_exp_log_table_array = NULL;
+
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
-AssemblerBase::AssemblerBase(Isolate* isolate)
+AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
: isolate_(isolate),
- jit_cookie_(0) {
+ jit_cookie_(0),
+ enabled_cpu_features_(0),
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
+
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+ if (isolate->assembler_spare_buffer() != NULL) {
+ buffer = isolate->assembler_spare_buffer();
+ isolate->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
+ own_buffer_ = true;
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ own_buffer_ = false;
+ }
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+
+ pc_ = buffer_;
+}
+
+
+AssemblerBase::~AssemblerBase() {
+ if (own_buffer_) {
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of PredictableCodeSizeScope
+
+PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
+ int expected_size)
+ : assembler_(assembler),
+ expected_size_(expected_size),
+ start_offset_(assembler->pc_offset()),
+ old_value_(assembler->predictable_code_size()) {
+ assembler_->set_predictable_code_size(true);
+}
+
+
+PredictableCodeSizeScope::~PredictableCodeSizeScope() {
+ // TODO(svenpanne) Remove the 'if' when everything works.
+ if (expected_size_ >= 0) {
+ CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
+ }
+ assembler_->set_predictable_code_size(old_value_);
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatureScope
+
+#ifdef DEBUG
+CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
+ : assembler_(assembler) {
+ ASSERT(CpuFeatures::IsSafeForSnapshot(f));
+ old_enabled_ = assembler_->enabled_cpu_features();
+ uint64_t mask = static_cast<uint64_t>(1) << f;
+ // TODO(svenpanne) This special case below doesn't belong here!
+#if V8_TARGET_ARCH_ARM
+ // VFP2 and ARMv7 are implied by VFP3.
+ if (f == VFP3) {
+ mask |=
+ static_cast<uint64_t>(1) << VFP2 |
+ static_cast<uint64_t>(1) << ARMv7;
+ }
+#endif
+ assembler_->set_enabled_cpu_features(old_enabled_ | mask);
}
+CpuFeatureScope::~CpuFeatureScope() {
+ assembler_->set_enabled_cpu_features(old_enabled_);
+}
+#endif
+
+
// -----------------------------------------------------------------------------
// Implementation of Label
@@ -313,6 +406,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
+ ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
@@ -570,6 +664,15 @@ void RelocIterator::next() {
}
}
}
+ if (code_age_sequence_ != NULL) {
+ byte* old_code_age_sequence = code_age_sequence_;
+ code_age_sequence_ = NULL;
+ if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
+ rinfo_.data_ = 0;
+ rinfo_.pc_ = old_code_age_sequence;
+ return;
+ }
+ }
done_ = true;
}
@@ -585,6 +688,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
+ byte* sequence = code->FindCodeAgeSequence();
+ if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
+ code_age_sequence_ = sequence;
+ } else {
+ code_age_sequence_ = NULL;
+ }
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -600,6 +709,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
+ code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -609,11 +719,28 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
// Implementation of RelocInfo
+#ifdef DEBUG
+bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
+ // Ensure there are no code targets or embedded objects present in the
+ // deoptimization entries, they would require relocation after code
+ // generation.
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::kApplyMask;
+ RelocIterator it(desc, mode_mask);
+ return !it.done();
+}
+#endif
+
+
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case RelocInfo::NONE:
- return "no reloc";
+ case RelocInfo::NONE32:
+ return "no reloc 32";
+ case RelocInfo::NONE64:
+ return "no reloc 64";
case RelocInfo::EMBEDDED_OBJECT:
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
@@ -652,6 +779,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
UNREACHABLE();
#endif
return "debug break slot";
+ case RelocInfo::CODE_AGE_SEQUENCE:
+ return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@@ -660,7 +789,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
}
-void RelocInfo::Print(FILE* out) {
+void RelocInfo::Print(Isolate* isolate, FILE* out) {
PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) {
PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
@@ -682,11 +811,11 @@ void RelocInfo::Print(FILE* out) {
}
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
- } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
- Isolate::Current()->deoptimizer_data() != NULL) {
+ } else if (IsRuntimeEntry(rmode_) &&
+ isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
- target_address(), Deoptimizer::EAGER);
+ isolate, target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
PrintF(out, " (deoptimization bailout %d)", id);
}
@@ -734,11 +863,15 @@ void RelocInfo::Verify() {
case INTERNAL_REFERENCE:
case CONST_POOL:
case DEBUG_BREAK_SLOT:
- case NONE:
+ case NONE32:
+ case NONE64:
break;
case NUMBER_OF_MODES:
UNREACHABLE();
break;
+ case CODE_AGE_SEQUENCE:
+ ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
+ break;
}
}
#endif // VERIFY_HEAP
@@ -750,12 +883,77 @@ void RelocInfo::Verify() {
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
+ double_constants.minus_one_half = -0.5;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
+
+ math_exp_data_mutex = OS::CreateMutex();
+}
+
+
+void ExternalReference::InitializeMathExpData() {
+ // Early return?
+ if (math_exp_data_initialized) return;
+
+ math_exp_data_mutex->Lock();
+ if (!math_exp_data_initialized) {
+ // If this is changed, generated code must be adapted too.
+ const int kTableSizeBits = 11;
+ const int kTableSize = 1 << kTableSizeBits;
+ const double kTableSizeDouble = static_cast<double>(kTableSize);
+
+ math_exp_constants_array = new double[9];
+ // Input values smaller than this always return 0.
+ math_exp_constants_array[0] = -708.39641853226408;
+ // Input values larger than this always return +Infinity.
+ math_exp_constants_array[1] = 709.78271289338397;
+ math_exp_constants_array[2] = V8_INFINITY;
+ // The rest is black magic. Do not attempt to understand it. It is
+ // loosely based on the "expd" function published at:
+ // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
+ const double constant3 = (1 << kTableSizeBits) / log(2.0);
+ math_exp_constants_array[3] = constant3;
+ math_exp_constants_array[4] =
+ static_cast<double>(static_cast<int64_t>(3) << 51);
+ math_exp_constants_array[5] = 1 / constant3;
+ math_exp_constants_array[6] = 3.0000000027955394;
+ math_exp_constants_array[7] = 0.16666666685227835;
+ math_exp_constants_array[8] = 1;
+
+ math_exp_log_table_array = new double[kTableSize];
+ for (int i = 0; i < kTableSize; i++) {
+ double value = pow(2, i / kTableSizeDouble);
+
+ uint64_t bits = BitCast<uint64_t, double>(value);
+ bits &= (static_cast<uint64_t>(1) << 52) - 1;
+ double mantissa = BitCast<double, uint64_t>(bits);
+
+ // <just testing>
+ uint64_t doublebits;
+ memcpy(&doublebits, &value, sizeof doublebits);
+ doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
+ double mantissa2;
+ memcpy(&mantissa2, &doublebits, sizeof mantissa2);
+ CHECK_EQ(mantissa, mantissa2);
+ // </just testing>
+
+ math_exp_log_table_array[i] = mantissa;
+ }
+
+ math_exp_data_initialized = true;
+ }
+ math_exp_data_mutex->Unlock();
+}
+
+
+void ExternalReference::TearDownMathExpData() {
+ delete[] math_exp_constants_array;
+ delete[] math_exp_log_table_array;
+ delete math_exp_data_mutex;
}
@@ -874,6 +1072,13 @@ ExternalReference ExternalReference::get_date_field_function(
}
+ExternalReference ExternalReference::get_make_code_young_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
+}
+
+
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@@ -900,6 +1105,20 @@ ExternalReference ExternalReference::compute_output_frames_function(
}
+ExternalReference ExternalReference::log_enter_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
+}
+
+
+ExternalReference ExternalReference::log_leave_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
+}
+
+
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
@@ -969,18 +1188,35 @@ ExternalReference ExternalReference::new_space_allocation_limit_address(
}
-ExternalReference ExternalReference::handle_scope_level_address() {
- return ExternalReference(HandleScope::current_level_address());
+ExternalReference ExternalReference::old_pointer_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldPointerSpaceAllocationTopAddress());
+}
+
+
+ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldPointerSpaceAllocationLimitAddress());
}
-ExternalReference ExternalReference::handle_scope_next_address() {
- return ExternalReference(HandleScope::current_next_address());
+ExternalReference ExternalReference::handle_scope_level_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_level_address(isolate));
}
-ExternalReference ExternalReference::handle_scope_limit_address() {
- return ExternalReference(HandleScope::current_limit_address());
+ExternalReference ExternalReference::handle_scope_next_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_next_address(isolate));
+}
+
+
+ExternalReference ExternalReference::handle_scope_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_limit_address(isolate));
}
@@ -1018,6 +1254,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
+ExternalReference ExternalReference::address_of_minus_one_half() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.minus_one_half));
+}
+
+
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_zero));
@@ -1186,12 +1428,45 @@ ExternalReference ExternalReference::math_log_double_function(
}
+ExternalReference ExternalReference::math_exp_constants(int constant_index) {
+ ASSERT(math_exp_data_initialized);
+ return ExternalReference(
+ reinterpret_cast<void*>(math_exp_constants_array + constant_index));
+}
+
+
+ExternalReference ExternalReference::math_exp_log_table() {
+ ASSERT(math_exp_data_initialized);
+ return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
+}
+
+
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+ return ExternalReference(entry);
+}
+
+
+double power_helper(double x, double y) {
+ int y_int = static_cast<int>(y);
+ if (y == y_int) {
+ return power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ }
+ if (y == 0.5) {
+ return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0.
+ }
+ if (y == -0.5) {
+ return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
+ }
+ return power_double_double(x, y);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@@ -1212,7 +1487,8 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
-#ifdef __MINGW64_VERSION_MAJOR
+#if defined(__MINGW64_VERSION_MAJOR) && \
+ (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
// MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different.
if ((x == 0.0 || isinf(x)) && isfinite(y)) {
@@ -1330,6 +1606,10 @@ void PositionsRecorder::RecordPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
}
#endif
+ LOG_CODE_EVENT(assembler_->isolate(),
+ CodeLinePosInfoAddPositionEvent(jit_handler_data_,
+ assembler_->pc_offset(),
+ pos));
}
@@ -1342,6 +1622,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
}
#endif
+ LOG_CODE_EVENT(assembler_->isolate(),
+ CodeLinePosInfoAddStatementPositionEvent(
+ jit_handler_data_,
+ assembler_->pc_offset(),
+ pos));
}