diff options
125 files changed, 6615 insertions, 4013 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index f69be973f0..1d91fcded1 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,16 @@ +2011-02-24: Version 3.1.6 + + Fixed a number of crash bugs. + + Added support for Cygwin (issue 64). + + Improved Crankshaft for x64 and ARM. + + Added Crankshaft support for stores to pixel arrays. + + Fixed issue in CPU profiler with Crankshaft. + + 2011-02-16: Version 3.1.5 Change RegExp parsing to disallow /(*)/. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 436581a9ba..84707e9847 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -27,7 +27,6 @@ import platform import re -import subprocess import sys import os from os.path import join, dirname, abspath @@ -143,9 +142,6 @@ LIBRARY_FLAGS = { # Use visibility=default to disable this. 'CXXFLAGS': ['-fvisibility=hidden'] }, - 'strictaliasing:off': { - 'CCFLAGS': ['-fno-strict-aliasing'] - }, 'mode:debug': { 'CCFLAGS': ['-g', '-O0'], 'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'], @@ -306,6 +302,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -655,16 +652,8 @@ def Abort(message): sys.exit(1) -def GuessOS(env): - return utils.GuessOS() - - -def GuessArch(env): - return utils.GuessArchitecture() - - -def GuessToolchain(env): - tools = env['TOOLS'] +def GuessToolchain(os): + tools = Environment()['TOOLS'] if 'gcc' in tools: return 'gcc' elif 'msvc' in tools: @@ -673,9 +662,7 @@ def GuessToolchain(env): return None -def GuessVisibility(env): - os = env['os'] - toolchain = env['toolchain']; +def GuessVisibility(os, toolchain): if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc': # MinGW / Cygwin can't do it. return 'default' @@ -685,35 +672,27 @@ def GuessVisibility(env): return 'hidden' -def GuessStrictAliasing(env): - # There seems to be a problem with gcc 4.5.x - # see http://code.google.com/p/v8/issues/detail?id=884 - # it can be worked around by disabling strict aliasing - toolchain = env['toolchain']; - if toolchain == 'gcc': - env = Environment(tools=['gcc']) - version = subprocess.Popen([env['CC'], '-dumpversion'], - stdout=subprocess.PIPE).communicate()[0] - if version.find('4.5.') == 0: - return 'off' - return 'default' +OS_GUESS = utils.GuessOS() +TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS) +ARCH_GUESS = utils.GuessArchitecture() +VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS) SIMPLE_OPTIONS = { 'toolchain': { 'values': ['gcc', 'msvc'], - 'guess': GuessToolchain, - 'help': 'the toolchain to use' + 'default': TOOLCHAIN_GUESS, + 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS }, 'os': { 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], - 'guess': GuessOS, - 'help': 'the os to build for' + 'default': OS_GUESS, + 'help': 'the os to build for (%s)' % OS_GUESS }, 'arch': { 'values':['arm', 'ia32', 'x64', 'mips'], - 'guess': GuessArch, - 'help': 'the architecture to build for' + 'default': ARCH_GUESS, + 'help': 'the architecture to build for (%s)' % ARCH_GUESS }, 'regexp': { 'values': ['native', 'interpreted'], @@ -822,15 +801,8 @@ SIMPLE_OPTIONS = { }, 'visibility': { 'values': ['default', 'hidden'], - 'guess': GuessVisibility, - 'depends': ['os', 'toolchain'], - 'help': 'shared library symbol visibility' - }, - 'strictaliasing': { - 'values': ['default', 'off'], - 'guess': GuessStrictAliasing, - 'depends': ['toolchain'], - 'help': 'assume strict aliasing while optimizing' + 'default': VISIBILITY_GUESS, + 'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS }, 'pgo': { 'values': ['off', 'instrument', 'optimize'], @@ -840,26 +812,6 @@ SIMPLE_OPTIONS = { } -def AddOption(result, name, option): - if 'guess' in option: - # Option has a guess function - guess = option.get('guess') - guess_env = Environment(options=result) - # Check if all options that the guess function depends on are set - if 'depends' in option: - for dependency in option.get('depends'): - if not dependency in guess_env: - return False - default = guess(guess_env) - else: - # Option has a fixed default - default = option.get('default') - - help = '%s (%s)' % (option.get('help'), ", ".join(option['values'])) - result.Add(name, help, default) - return True - - def GetOptions(): result = Options() result.Add('mode', 'compilation mode (debug, release)', 'release') @@ -867,28 +819,12 @@ def GetOptions(): result.Add('cache', 'directory to use for scons build cache', '') result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '') result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '') - options = SIMPLE_OPTIONS - while len(options): - postpone = {} - for (name, option) in options.iteritems(): - if not AddOption(result, name, option): - postpone[name] = option - options = postpone + for (name, option) in SIMPLE_OPTIONS.iteritems(): + help = '%s (%s)' % (name, ", ".join(option['values'])) + result.Add(name, help, option.get('default')) return result -def GetTools(opts): - env = Environment(options=opts) - os = env['os'] - toolchain = env['toolchain'] - if os == 'win32' and toolchain == 'gcc': - return ['mingw'] - elif os == 'win32' and toolchain == 'msvc': - return ['msvc', 'mslink', 'mslib', 'msvs'] - else: - return ['default'] - - def GetVersionComponents(): MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)") MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)") @@ -969,7 +905,7 @@ def VerifyOptions(env): print env['simulator'] Abort("Option unalignedaccesses only supported for the ARM architecture.") for (name, option) in SIMPLE_OPTIONS.iteritems(): - if (not name in env): + if (not option.get('default')) and (name not in ARGUMENTS): message = ("A value for option %s must be specified (%s)." % (name, ", ".join(option['values']))) Abort(message) @@ -1097,7 +1033,7 @@ def ParseEnvOverrides(arg, imports): return overrides -def BuildSpecific(env, mode, env_overrides, tools): +def BuildSpecific(env, mode, env_overrides): options = {'mode': mode} for option in SIMPLE_OPTIONS: options[option] = env[option] @@ -1150,7 +1086,7 @@ def BuildSpecific(env, mode, env_overrides, tools): (object_files, shell_files, mksnapshot) = env.SConscript( join('src', 'SConscript'), build_dir=join('obj', target_id), - exports='context tools', + exports='context', duplicate=False ) @@ -1170,21 +1106,21 @@ def BuildSpecific(env, mode, env_overrides, tools): library = env.SharedLibrary(library_name, object_files, PDB=pdb_name) context.library_targets.append(library) - d8_env = Environment(tools=tools) + d8_env = Environment() d8_env.Replace(**context.flags['d8']) context.ApplyEnvOverrides(d8_env) shell = d8_env.Program('d8' + suffix, object_files + shell_files) context.d8_targets.append(shell) for sample in context.samples: - sample_env = Environment(tools=tools) + sample_env = Environment() sample_env.Replace(**context.flags['sample']) sample_env.Prepend(LIBS=[library_name]) context.ApplyEnvOverrides(sample_env) sample_object = sample_env.SConscript( join('samples', 'SConscript'), build_dir=join('obj', 'sample', sample, target_id), - exports='sample context tools', + exports='sample context', duplicate=False ) sample_name = sample + suffix @@ -1197,7 +1133,7 @@ def BuildSpecific(env, mode, env_overrides, tools): cctest_program = cctest_env.SConscript( join('test', 'cctest', 'SConscript'), build_dir=join('obj', 'test', target_id), - exports='context object_files tools', + exports='context object_files', duplicate=False ) context.cctest_targets.append(cctest_program) @@ -1207,9 +1143,7 @@ def BuildSpecific(env, mode, env_overrides, tools): def Build(): opts = GetOptions() - tools = GetTools(opts) - env = Environment(options=opts, tools=tools) - + env = Environment(options=opts) Help(opts.GenerateHelpText(env)) VerifyOptions(env) env_overrides = ParseEnvOverrides(env['env'], env['importenv']) @@ -1223,7 +1157,7 @@ def Build(): d8s = [] modes = SplitList(env['mode']) for mode in modes: - context = BuildSpecific(env.Copy(), mode, env_overrides, tools) + context = BuildSpecific(env.Copy(), mode, env_overrides) libraries += context.library_targets mksnapshots += context.mksnapshot_targets cctests += context.cctest_targets diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 2e54295e88..34ca91ca65 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -31,7 +31,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath) sys.path.append(join(root_dir, 'tools')) import js2c Import('context') -Import('tools') SOURCES = { @@ -154,6 +153,7 @@ SOURCES = { arm/jump-target-arm.cc arm/lithium-arm.cc arm/lithium-codegen-arm.cc + arm/lithium-gap-resolver-arm.cc arm/macro-assembler-arm.cc arm/regexp-macro-assembler-arm.cc arm/register-allocator-arm.cc @@ -305,7 +305,7 @@ def Abort(message): def ConfigureObjectFiles(): - env = Environment(tools=tools) + env = Environment() env.Replace(**context.flags['v8']) context.ApplyEnvOverrides(env) env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C) diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index f6d1daf67a..18264254b8 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -446,8 +446,15 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { bool found_it = false; JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); if (!found_it) return Heap::undefined_value(); + while (!function->should_have_prototype()) { + found_it = false; + function = FindInPrototypeChain<JSFunction>(object->GetPrototype(), + &found_it); + // There has to be one because we hit the getter. + ASSERT(found_it); + } + if (!function->has_prototype()) { - if (!function->should_have_prototype()) return Heap::undefined_value(); Object* prototype; { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function); if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; @@ -467,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, bool found_it = false; JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); if (!found_it) return Heap::undefined_value(); + if (!function->should_have_prototype()) { + // Since we hit this accessor, object will have no prototype property. + return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(), + value, + NONE); + } + if (function->has_initial_map()) { // If the function has allocated the initial map // replace it with a copy containing the new prototype. diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index fb9bb488c9..c91d4ba2bc 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vldr(const DwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; dst.split_code(&sd, &d); + ASSERT(offset >= 0); + + if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vldr(const SwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vstr(const DwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vstr(src, operand.rn(), operand.offset(), cond); } @@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; src.split_code(&sd, &d); - emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | - 0xA*B8 | ((offset / 4) & 255)); + ASSERT(offset >= 0); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | + 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vstr(const SwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(src, operand.rn(), operand.offset(), cond); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 3941c84b34..954b9cff33 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -387,7 +387,7 @@ class Operand BASE_EMBEDDED { // Return true if this is a register operand. INLINE(bool is_reg() const); - // Return true of this operand fits in one instruction so that no + // Return true if this operand fits in one instruction so that no // 2-instruction solution with a load into the ip register is necessary. bool is_single_instruction() const; bool must_use_constant_pool() const; @@ -439,7 +439,7 @@ class MemOperand BASE_EMBEDDED { offset_ = offset; } - uint32_t offset() { + uint32_t offset() const { ASSERT(rm_.is(no_reg)); return offset_; } @@ -447,6 +447,10 @@ class MemOperand BASE_EMBEDDED { Register rn() const { return rn_; } Register rm() const { return rm_; } + bool OffsetIsUint12Encodable() const { + return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); + } + private: Register rn_; // base Register rm_; // register offset @@ -902,22 +906,34 @@ class Assembler : public Malloced { void vldr(const DwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const DwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vldr(const SwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const SwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vstr(const DwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const DwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vstr(const SwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const SwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vmov(const DwVfpRegister dst, diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index cc49f7e4e5..87fa87df0c 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -2661,8 +2661,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. Register result = r5; - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); // Load the operands. if (smi_operands) { @@ -2811,8 +2811,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. __ bind(&result_not_a_smi); - __ AllocateHeapNumber( - r5, scratch1, scratch2, heap_number_map, gc_required); + Register result = r5; + if (smi_operands) { + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + } else { + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); + } // r2: Answer as signed int32. // r5: Heap number to write answer into. @@ -2934,45 +2940,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime; + Label call_runtime, call_string_add_or_runtime; GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); - // If all else fails, use the runtime system to get the correct - // result. - __ bind(&call_runtime); + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); - // Try to add strings before calling runtime. + __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { GenerateAddStrings(masm); } - GenericBinaryOpStub stub(op_, mode_, r1, r0); - __ TailCallStub(&stub); + __ bind(&call_runtime); + GenerateCallRuntime(masm); } void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { ASSERT(op_ == Token::ADD); + Label left_not_string, call_runtime; Register left = r1; Register right = r0; - Label call_runtime; - // Check if first argument is a string. - __ JumpIfSmi(left, &call_runtime); + // Check if left argument is a string. + __ JumpIfSmi(left, &left_not_string); __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); + __ b(ge, &left_not_string); - // First argument is a a string, test second. + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); + + // Left operand is not a string, test right. + __ bind(&left_not_string); __ JumpIfSmi(right, &call_runtime); __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &call_runtime); - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); + __ TailCallStub(&string_add_right_stub); // At least one argument is not a string. __ bind(&call_runtime); @@ -3706,7 +3714,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // The offset was stored in r4 safepoint slot. // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) - __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); + __ LoadFromSafepointRegisterSlot(scratch, r4); __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); @@ -5438,18 +5446,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; + // Stack on entry: - // sp[0]: second argument. - // sp[4]: first argument. + // sp[0]: second argument (right). + // sp[4]: first argument (left). // Load the two arguments. __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - STATIC_ASSERT(kSmiTag == 0); + if (flags_ == NO_STRING_ADD_FLAGS) { __ JumpIfEitherSmi(r0, r1, &string_add_runtime); // Load instance types. __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -5461,13 +5470,27 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kIsNotStringMask)); __ tst(r5, Operand(kIsNotStringMask), eq); __ b(ne, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } } // Both arguments are strings. // r0: first string // r1: second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) { Label strings_not_empty; // Check if either of the strings are empty. In that case return the other. @@ -5495,8 +5518,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; // Adding two lengths can't overflow. @@ -5508,7 +5531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(ne, &longer_than_two); // Check that both strings are non-external ascii strings. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5556,7 +5579,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat, allocate a cons string object. // If both strings are ascii the result is an ascii cons string. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5604,11 +5627,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // r6: sum of lengths. __ bind(&string_add_flat_result); - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5706,6 +5729,60 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_JS); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); + __ b(lt, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + scratch4, + false, + ¬_cached); + __ mov(arg, scratch1); + __ str(arg, MemOperand(sp, stack_offset)); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CompareObjectType( + arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. + __ b(ne, slow); + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ and_(scratch2, + scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ cmp(scratch2, + Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ b(ne, slow); + __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); + __ str(arg, MemOperand(sp, stack_offset)); + + __ bind(&done); } diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index baaa2f2bda..475fbd70e8 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -335,24 +335,36 @@ class TypeRecordingBinaryOpStub: public CodeStub { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB }; class StringAddStub: public CodeStub { public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} private: Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } + int MinorKey() { return flags_; } void Generate(MacroAssembler* masm); - // Should the stub check whether arguments are strings? - bool string_check_; + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow); + + const StringAddFlags flags_; }; diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index a3921d8efc..3e125a33fc 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -5850,8 +5850,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } else if (variable != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index 507954d9e1..51c84b3354 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -50,6 +50,11 @@ void CPU::Setup() { void CPU::FlushICache(void* start, size_t size) { + // Nothing to do flushing no instructions. + if (size == 0) { + return; + } + #if defined (USE_SIMULATOR) // Not generating ARM instructions for C-code. This means that we are // building an ARM emulator based target. We should notify the simulator diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 9af7a8d190..9a5aa902b8 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -429,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast<intptr_t>(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) { output_frame->SetRegister(cp.code(), value); diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index f04a00e052..fea9a8cfb7 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { Move(dot_arguments_slot, r3, r1, r2); } - { Comment cmnt(masm_, "[ Declarations"); - // For named function expressions, declare the function name as a - // constant. - if (scope()->is_function_scope() && scope()->function() != NULL) { - EmitDeclaration(scope()->function(), Variable::CONST, NULL); - } - // Visit all the explicit declarations unless there is an illegal - // redeclaration. - if (scope()->HasIllegalRedeclaration()) { - scope()->VisitIllegalRedeclaration(this); - } else { - VisitDeclarations(scope()->declarations()); - } - } - if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); } - // Check the stack for overflow or break request. - { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailout(info->function(), NO_REGISTERS); - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); - } + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); - { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); - VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + } else { + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + EmitDeclaration(scope()->function(), Variable::CONST, NULL); + } + VisitDeclarations(scope()->declarations()); + } + + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailout(info->function(), NO_REGISTERS); + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + StackCheckStub stub; + __ CallStub(&stub); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); + VisitStatements(function()->body()); + ASSERT(loop_depth() == 0); + } } + // Always emit a 'return undefined' in case control fell off the end of + // the body. { Comment cmnt(masm_, "[ return <undefined>;"); - // Emit a 'return undefined' in case control fell off the end of the - // body. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); } EmitReturnSequence(); @@ -694,10 +695,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // We bypass the general EmitSlotSearch because we know more about // this specific context. - // The variable in the decl always resides in the current context. + // The variable in the decl always resides in the current function + // context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { - // Check if we have the correct context pointer. + // Check that we're not inside a 'with'. __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); __ cmp(r1, cp); __ Check(eq, "Unexpected declaration in current context."); @@ -1037,7 +1039,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( Slot* slot, Label* slow) { ASSERT(slot->type() == Slot::CONTEXT); - Register current = cp; + Register context = cp; Register next = r3; Register temp = r4; @@ -1045,22 +1047,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( if (s->num_heap_slots() > 0) { if (s->calls_eval()) { // Check that extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); } - __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX)); __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); // Walk the rest of the chain without clobbering cp. - current = next; + context = next; } } // Check that last extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); - __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX)); - return ContextOperand(temp, slot->index()); + + // This function is used only for loads, not stores, so it's safe to + // return an cp-based operand (the write barrier cannot be allowed to + // destroy the cp register). + return ContextOperand(context, slot->index()); } @@ -2004,34 +2009,60 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); - } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { - // Perform the assignment for non-const variables and for initialization - // of const variables. Const assignments are simply skipped. - Label done; + } else if (op == Token::INIT_CONST) { + // Like var declarations, const declarations are hoisted to function + // scope. However, unlike var initializers, const initializers are able + // to drill a hole to that function context, even from inside a 'with' + // context. We thus bypass the normal static scope lookup. + Slot* slot = var->AsSlot(); + Label skip; + switch (slot->type()) { + case Slot::PARAMETER: + // No const parameters. + UNREACHABLE(); + break; + case Slot::LOCAL: + // Detect const reinitialization by checking for the hole value. + __ ldr(r1, MemOperand(fp, SlotOffset(slot))); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &skip); + __ str(result_register(), MemOperand(fp, SlotOffset(slot))); + break; + case Slot::CONTEXT: { + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ ldr(r2, ContextOperand(r1, slot->index())); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r2, ip); + __ b(ne, &skip); + __ str(r0, ContextOperand(r1, slot->index())); + int offset = Context::SlotOffset(slot->index()); + __ mov(r3, r0); // Preserve the stored value in r0. + __ RecordWrite(r1, Operand(offset), r3, r2); + break; + } + case Slot::LOOKUP: + __ push(r0); + __ mov(r0, Operand(slot->var()->name())); + __ Push(cp, r0); // Context and name. + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + break; + } + __ bind(&skip); + + } else if (var->mode() != Variable::CONST) { + // Perform the assignment for non-const variables. Const assignments + // are simply skipped. Slot* slot = var->AsSlot(); switch (slot->type()) { case Slot::PARAMETER: case Slot::LOCAL: - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r1, MemOperand(fp, SlotOffset(slot))); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r1, ip); - __ b(ne, &done); - } // Perform the assignment. __ str(result_register(), MemOperand(fp, SlotOffset(slot))); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, r1); - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r2, target); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r2, ip); - __ b(ne, &done); - } // Perform the assignment and issue the write barrier. __ str(result_register(), target); // RecordWrite may destroy all its register arguments. @@ -2042,20 +2073,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, } case Slot::LOOKUP: - // Call the runtime for the assignment. The runtime will ignore - // const reinitialization. + // Call the runtime for the assignment. __ push(r0); // Value. __ mov(r0, Operand(slot->var()->name())); __ Push(cp, r0); // Context and name. - if (op == Token::INIT_CONST) { - // The runtime will ignore const redeclaration. - __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); - } else { - __ CallRuntime(Runtime::kStoreContextSlot, 3); - } + __ CallRuntime(Runtime::kStoreContextSlot, 3); break; } - __ bind(&done); } } @@ -3373,8 +3397,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } } else if (var != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); if (var->is_global()) { __ ldr(r2, GlobalObjectOperand()); __ mov(r1, Operand(var->name())); @@ -3414,17 +3438,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 903f77bbf0..d3c9fee8e3 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -355,7 +355,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -1204,8 +1222,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { case kMathSqrt: return DefineSameAsFirst(result); case kMathRound: - Abort("MathRound LUnaryMathOperation not implemented"); - return NULL; + return AssignEnvironment(DefineAsRegister(result)); case kMathPowHalf: Abort("MathPowHalf LUnaryMathOperation not implemented"); return NULL; @@ -1418,8 +1435,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoPower(HPower* instr) { - Abort("LPower instruction not implemented on ARM"); - return NULL; + ASSERT(instr->representation().IsDouble()); + // We call a C function for double power. It can't trigger a GC. + // We need to use fixed result register for the call. + Representation exponent_type = instr->right()->representation(); + ASSERT(instr->left()->representation().IsDouble()); + LOperand* left = UseFixedDouble(instr->left(), d1); + LOperand* right = exponent_type.IsDouble() ? + UseFixedDouble(instr->right(), d2) : + UseFixed(instr->right(), r0); + LPower* result = new LPower(left, right); + return MarkAsCall(DefineFixedDouble(result, d3), + instr, + CAN_DEOPTIMIZE_EAGERLY); } @@ -1709,11 +1737,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context = UseTempRegister(instr->context()); + LOperand* context; LOperand* value; if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); value = UseTempRegister(instr->value()); } else { + context = UseRegister(instr->context()); value = UseRegister(instr->value()); } return new LStoreContextSlot(context, value); @@ -1806,6 +1836,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + Abort("DoStorePixelArrayElement not implemented"); + return NULL; +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* obj = UseFixed(instr->object(), r2); LOperand* key = UseFixed(instr->key(), r1); @@ -1911,8 +1948,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 57338f16d5..77d6b71a93 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -135,6 +133,7 @@ class LCodeGen; V(OuterContext) \ V(Parameter) \ V(PixelArrayLength) \ + V(Power) \ V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ @@ -1058,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> { }; +class LPower: public LTemplateInstruction<1, 2, 0> { + public: + LPower(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(Power, "power") + DECLARE_HYDROGEN_ACCESSOR(Power) +}; + + class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) @@ -1510,32 +1521,22 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 0> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamed(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val) { inputs_[0] = obj; inputs_[1] = val; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle<Object> name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle<Object> name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1543,25 +1544,35 @@ class LStoreNamedField: public LStoreNamed { }; -class LStoreNamedGeneric: public LStoreNamed { +class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } + LStoreNamedGeneric(LOperand* obj, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = val; + } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + Handle<Object> name() const { return hydrogen()->name(); } }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1571,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) -}; + LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = key; + inputs_[2] = val; + } + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") -class LStoreKeyedGeneric: public LStoreKeyed { - public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } + virtual void PrintDataTo(StringStream* stream); - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 1bfb3ad943..ca64442270 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "arm/lithium-codegen-arm.h" +#include "arm/lithium-gap-resolver-arm.h" #include "code-stubs.h" #include "stub-cache.h" @@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator { }; -class LGapNode: public ZoneObject { - public: - explicit LGapNode(LOperand* operand) - : operand_(operand), resolved_(false), visited_id_(-1) { } - - LOperand* operand() const { return operand_; } - bool IsResolved() const { return !IsAssigned() || resolved_; } - void MarkResolved() { - ASSERT(!IsResolved()); - resolved_ = true; - } - int visited_id() const { return visited_id_; } - void set_visited_id(int id) { - ASSERT(id > visited_id_); - visited_id_ = id; - } - - bool IsAssigned() const { return assigned_from_.is_set(); } - LGapNode* assigned_from() const { return assigned_from_.get(); } - void set_assigned_from(LGapNode* n) { assigned_from_.set(n); } - - private: - LOperand* operand_; - SetOncePointer<LGapNode> assigned_from_; - bool resolved_; - int visited_id_; -}; - - -LGapResolver::LGapResolver() - : nodes_(32), - identified_cycles_(4), - result_(16), - next_visited_id_(0) { -} - - -const ZoneList<LMoveOperands>* LGapResolver::Resolve( - const ZoneList<LMoveOperands>* moves, - LOperand* marker_operand) { - nodes_.Rewind(0); - identified_cycles_.Rewind(0); - result_.Rewind(0); - next_visited_id_ = 0; - - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) RegisterMove(move); - } - - for (int i = 0; i < identified_cycles_.length(); ++i) { - ResolveCycle(identified_cycles_[i], marker_operand); - } - - int unresolved_nodes; - do { - unresolved_nodes = 0; - for (int j = 0; j < nodes_.length(); j++) { - LGapNode* node = nodes_[j]; - if (!node->IsResolved() && node->assigned_from()->IsResolved()) { - AddResultMove(node->assigned_from(), node); - node->MarkResolved(); - } - if (!node->IsResolved()) ++unresolved_nodes; - } - } while (unresolved_nodes > 0); - return &result_; -} - - -void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) { - AddResultMove(from->operand(), to->operand()); -} - - -void LGapResolver::AddResultMove(LOperand* from, LOperand* to) { - result_.Add(LMoveOperands(from, to)); -} - - -void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) { - ZoneList<LOperand*> cycle_operands(8); - cycle_operands.Add(marker_operand); - LGapNode* cur = start; - do { - cur->MarkResolved(); - cycle_operands.Add(cur->operand()); - cur = cur->assigned_from(); - } while (cur != start); - cycle_operands.Add(marker_operand); - - for (int i = cycle_operands.length() - 1; i > 0; --i) { - LOperand* from = cycle_operands[i]; - LOperand* to = cycle_operands[i - 1]; - AddResultMove(from, to); - } -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) { - ASSERT(a != b); - LGapNode* cur = a; - while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) { - cur->set_visited_id(visited_id); - cur = cur->assigned_from(); - } - - return cur == b; -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) { - ASSERT(a != b); - return CanReach(a, b, next_visited_id_++); -} - - -void LGapResolver::RegisterMove(LMoveOperands move) { - if (move.source()->IsConstantOperand()) { - // Constant moves should be last in the machine code. Therefore add them - // first to the result set. - AddResultMove(move.source(), move.destination()); - } else { - LGapNode* from = LookupNode(move.source()); - LGapNode* to = LookupNode(move.destination()); - if (to->IsAssigned() && to->assigned_from() == from) { - move.Eliminate(); - return; - } - ASSERT(!to->IsAssigned()); - if (CanReach(from, to)) { - // This introduces a cycle. Save. - identified_cycles_.Add(from); - } - to->set_assigned_from(from); - } -} - - -LGapNode* LGapResolver::LookupNode(LOperand* operand) { - for (int i = 0; i < nodes_.length(); ++i) { - if (nodes_[i]->operand()->Equals(operand)) return nodes_[i]; - } - - // No node found => create a new one. - LGapNode* result = new LGapNode(operand); - nodes_.Add(result); - return result; -} - - #define __ masm()-> bool LCodeGen::GenerateCode() { @@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is in r1. + __ push(r1); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both r0 and cp. It replaces the context + // passed to us. It's saved in the stack and kept live in cp. + __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ ldr(r0, MemOperand(fp, parameter_offset)); + // Store it in the context. + __ mov(r1, Operand(Context::SlotOffset(slot->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); @@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) { MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - // TODO(regis): Revisit. ASSERT(!op->IsRegister()); ASSERT(!op->IsDoubleRegister()); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); @@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const { } +MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { + ASSERT(op->IsDoubleStackSlot()); + int index = op->index(); + if (index >= 0) { + // Local or spill slot. Skip the frame pointer, function, context, + // and the first word of the double in the fixed part of the frame. + return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); + } else { + // Incoming parameter. Skip the return address and the first word of + // the double. + return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); + } +} + + void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation) { if (environment == NULL) return; @@ -751,6 +653,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -787,116 +695,7 @@ void LCodeGen::DoLabel(LLabel* label) { void LCodeGen::DoParallelMove(LParallelMove* move) { - // d0 must always be a scratch register. - DoubleRegister dbl_scratch = d0; - LUnallocated marker_operand(LUnallocated::NONE); - - Register core_scratch = scratch0(); - bool destroys_core_scratch = false; - - const ZoneList<LMoveOperands>* moves = - resolver_.Resolve(move->move_operands(), &marker_operand); - for (int i = moves->length() - 1; i >= 0; --i) { - LMoveOperands move = moves->at(i); - LOperand* from = move.source(); - LOperand* to = move.destination(); - ASSERT(!from->IsDoubleRegister() || - !ToDoubleRegister(from).is(dbl_scratch)); - ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch)); - ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch)); - ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch)); - if (from == &marker_operand) { - if (to->IsRegister()) { - __ mov(ToRegister(to), core_scratch); - ASSERT(destroys_core_scratch); - } else if (to->IsStackSlot()) { - __ str(core_scratch, ToMemOperand(to)); - ASSERT(destroys_core_scratch); - } else if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), dbl_scratch); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } else if (to == &marker_operand) { - if (from->IsRegister() || from->IsConstantOperand()) { - __ mov(core_scratch, ToOperand(from)); - destroys_core_scratch = true; - } else if (from->IsStackSlot()) { - __ ldr(core_scratch, ToMemOperand(from)); - destroys_core_scratch = true; - } else if (from->IsDoubleRegister()) { - __ vmov(dbl_scratch, ToDoubleRegister(from)); - } else { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - } - } else if (from->IsConstantOperand()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ mov(ip, ToOperand(from)); - __ str(ip, ToMemOperand(to)); - } - } else if (from->IsRegister()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ str(ToRegister(from), ToMemOperand(to)); - } - } else if (to->IsRegister()) { - ASSERT(from->IsStackSlot()); - __ ldr(ToRegister(to), ToMemOperand(from)); - } else if (from->IsStackSlot()) { - ASSERT(to->IsStackSlot()); - __ ldr(ip, ToMemOperand(from)); - __ str(ip, ToMemOperand(to)); - } else if (from->IsDoubleRegister()) { - if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), ToDoubleRegister(from)); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset()); - } - } else if (to->IsDoubleRegister()) { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(ToDoubleRegister(to), ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset()); - } else { - ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } - - if (destroys_core_scratch) { - __ ldr(core_scratch, MemOperand(fp, -kPointerSize)); - } - - LInstruction* next = GetNextInstruction(); - if (next != NULL && next->IsLazyBailout()) { - int pc = masm()->pc_offset(); - safepoints_.SetPcAfterGap(pc); - } + resolver_.Resolve(move); } @@ -987,7 +786,7 @@ void LCodeGen::DoModI(LModI* instr) { DeferredModI(LCodeGen* codegen, LModI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD); + codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD); } private: LModI* instr_; @@ -1016,7 +815,7 @@ void LCodeGen::DoModI(LModI* instr) { __ bind(&ok); } - // Try a few common cases before using the generic stub. + // Try a few common cases before using the stub. Label call_stub; const int kUnfolds = 3; // Skip if either side is negative. @@ -1044,7 +843,7 @@ void LCodeGen::DoModI(LModI* instr) { __ and_(result, scratch, Operand(left)); __ bind(&call_stub); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredModI* deferred = new DeferredModI(this, instr); __ TrySmiTag(left, &deoptimize, scratch); @@ -1070,7 +869,7 @@ void LCodeGen::DoDivI(LDivI* instr) { DeferredDivI(LCodeGen* codegen, LDivI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV); + codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } private: LDivI* instr_; @@ -1123,7 +922,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); __ b(eq, &done); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredDivI* deferred = new DeferredDivI(this, instr); @@ -1145,19 +944,33 @@ void LCodeGen::DoDivI(LDivI* instr) { template<int T> -void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op) { +void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op) { Register left = ToRegister(instr->InputAt(0)); Register right = ToRegister(instr->InputAt(1)); __ PushSafepointRegistersAndDoubles(); - GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); + // Move left to r1 and right to r0 for the stub call. + if (left.is(r1)) { + __ Move(r0, right); + } else if (left.is(r0) && right.is(r1)) { + __ Swap(r0, r1, r2); + } else if (left.is(r0)) { + ASSERT(!right.is(r1)); + __ mov(r1, r0); + __ mov(r0, right); + } else { + ASSERT(!left.is(r0) && !right.is(r0)); + __ mov(r0, right); + __ mov(r1, left); + } + TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT); __ CallStub(&stub); RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Overwrite the stored value of r0 with the result of the stub. - __ StoreToSafepointRegistersAndDoublesSlot(r0); + __ StoreToSafepointRegistersAndDoublesSlot(r0, r0); __ PopSafepointRegistersAndDoubles(); } @@ -1413,7 +1226,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ vmov(r2, r3, right); __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); // Move the result in the double result register. - __ vmov(ToDoubleRegister(instr->result()), r0, r1); + __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result())); // Restore r0-r3. __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); @@ -1431,10 +1244,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->InputAt(1)).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); - // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current - // GenericBinaryOpStub: - // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); - GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0); + TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -2174,7 +1984,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ bind(&before_push_delta); __ BlockConstPoolFor(kAdditionalDelta); __ mov(temp, Operand(delta * kPointerSize)); - __ StoreToSafepointRegisterSlot(temp); + __ StoreToSafepointRegisterSlot(temp, temp); __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); ASSERT_EQ(kAdditionalDelta, masm_->InstructionsGeneratedSince(&before_push_delta)); @@ -2182,7 +1992,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Put the result value into the result register slot and // restore all registers. - __ StoreToSafepointRegisterSlot(result); + __ StoreToSafepointRegisterSlot(result, result); __ PopSafepointRegisters(); } @@ -2302,17 +2112,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ ldr(result, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); - __ ldr(result, ContextOperand(result, instr->slot_index())); + __ ldr(result, ContextOperand(context, instr->slot_index())); } void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - __ ldr(context, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); __ str(value, ContextOperand(context, instr->slot_index())); if (instr->needs_write_barrier()) { int offset = Context::SlotOffset(instr->slot_index()); @@ -2715,7 +2521,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { // Set the pointer to the new heap number in tmp. if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input); + __ LoadFromSafepointRegisterSlot(input, input); __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); __ bind(&allocated); @@ -2726,7 +2532,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - __ str(tmp1, masm()->SafepointRegisterSlot(input)); + __ StoreToSafepointRegisterSlot(tmp1, input); __ PopSafepointRegisters(); __ bind(&done); @@ -2843,6 +2649,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } +void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch1 = scratch0(); + Register scratch2 = result; + EmitVFPTruncate(kRoundToNearest, + double_scratch0().low(), + input, + scratch1, + scratch2); + DeoptimizeIf(ne, instr->environment()); + __ vmov(result, double_scratch0().low()); + + // Test for -0. + Label done; + __ cmp(result, Operand(0)); + __ b(ne, &done); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); +} + + void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input)); @@ -2850,6 +2680,64 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { } +void LCodeGen::DoPower(LPower* instr) { + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + Register scratch = scratch0(); + DoubleRegister result_reg = ToDoubleRegister(instr->result()); + Representation exponent_type = instr->hydrogen()->right()->representation(); + if (exponent_type.IsDouble()) { + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, ToDoubleRegister(right)); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } else if (exponent_type.IsInteger32()) { + ASSERT(ToRegister(right).is(r0)); + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ mov(r2, ToRegister(right)); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ CallCFunction(ExternalReference::power_double_int_function(), 4); + } else { + ASSERT(exponent_type.IsTagged()); + ASSERT(instr->hydrogen()->left()->representation().IsDouble()); + + Register right_reg = ToRegister(right); + + // Check for smi on the right hand side. + Label non_smi, call; + __ JumpIfNotSmi(right_reg, &non_smi); + + // Untag smi and convert it to a double. + __ SmiUntag(right_reg); + SwVfpRegister single_scratch = double_scratch0().low(); + __ vmov(single_scratch, right_reg); + __ vcvt_f64_s32(result_reg, single_scratch); + __ jmp(&call); + + // Heap number map check. + __ bind(&non_smi); + __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, Operand(ip)); + DeoptimizeIf(ne, instr->environment()); + int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag; + __ add(scratch, right_reg, Operand(value_offset)); + __ vldr(result_reg, scratch, 0); + + // Prepare arguments and call C function. + __ bind(&call); + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, result_reg); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } + // Store the result in the result register. + __ GetCFunctionDoubleResult(result_reg); +} + + void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { switch (instr->op()) { case kMathAbs: @@ -2858,6 +2746,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { case kMathFloor: DoMathFloor(instr); break; + case kMathRound: + DoMathRound(instr); + break; case kMathSqrt: DoMathSqrt(instr); break; @@ -3157,8 +3048,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ AbortIfNotSmi(r0); } __ SmiUntag(r0); - MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result); - __ str(r0, result_stack_slot); + __ StoreToSafepointRegisterSlot(r0, result); __ PopSafepointRegisters(); } @@ -3239,9 +3129,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // register is stored, as this register is in the pointer map, but contains an // integer value. __ mov(ip, Operand(0)); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize)); - + __ StoreToSafepointRegisterSlot(ip, reg); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); @@ -3252,7 +3140,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { __ bind(&done); __ sub(ip, reg, Operand(kHeapObjectTag)); __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); - __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(reg, reg); __ PopSafepointRegisters(); } @@ -3297,8 +3185,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(r0, reg); __ PopSafepointRegisters(); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 732db44517..2d9c6edcb6 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -29,7 +29,7 @@ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ #include "arm/lithium-arm.h" - +#include "arm/lithium-gap-resolver-arm.h" #include "deoptimizer.h" #include "safepoint-table.h" #include "scopes.h" @@ -39,31 +39,8 @@ namespace internal { // Forward declarations. class LDeferredCode; -class LGapNode; class SafepointGenerator; -class LGapResolver BASE_EMBEDDED { - public: - LGapResolver(); - const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves, - LOperand* marker_operand); - - private: - LGapNode* LookupNode(LOperand* operand); - bool CanReach(LGapNode* a, LGapNode* b, int visited_id); - bool CanReach(LGapNode* a, LGapNode* b); - void RegisterMove(LMoveOperands move); - void AddResultMove(LOperand* from, LOperand* to); - void AddResultMove(LGapNode* from, LGapNode* to); - void ResolveCycle(LGapNode* start, LOperand* marker_operand); - - ZoneList<LGapNode*> nodes_; - ZoneList<LGapNode*> identified_cycles_; - ZoneList<LMoveOperands> result_; - int next_visited_id_; -}; - - class LCodeGen BASE_EMBEDDED { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) @@ -79,10 +56,35 @@ class LCodeGen BASE_EMBEDDED { scope_(chunk->graph()->info()->scope()), status_(UNUSED), deferred_(8), - osr_pc_offset_(-1) { + osr_pc_offset_(-1), + resolver_(this) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } + + // Simple accessors. + MacroAssembler* masm() const { return masm_; } + + // Support for converting LOperands to assembler types. + // LOperand must be a register. + Register ToRegister(LOperand* op) const; + + // LOperand is loaded into scratch, unless already a register. + Register EmitLoadRegister(LOperand* op, Register scratch); + + // LOperand must be a double register. + DoubleRegister ToDoubleRegister(LOperand* op) const; + + // LOperand is loaded into dbl_scratch, unless already a double register. + DoubleRegister EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DoubleRegister dbl_scratch); + int ToInteger32(LConstantOperand* op) const; + Operand ToOperand(LOperand* op); + MemOperand ToMemOperand(LOperand* op) const; + // Returns a MemOperand pointing to the high word of a DoubleStackSlot. + MemOperand ToHighMemOperand(LOperand* op) const; + // Try to generate code for the entire chunk, but it may fail if the // chunk contains constructs we cannot handle. Returns true if the // code generation attempt succeeded. @@ -94,8 +96,8 @@ class LCodeGen BASE_EMBEDDED { // Deferred code support. template<int T> - void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op); + void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredTaggedToI(LTaggedToI* instr); @@ -136,7 +138,6 @@ class LCodeGen BASE_EMBEDDED { LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } - MacroAssembler* masm() const { return masm_; } Register scratch0() { return r9; } DwVfpRegister double_scratch0() { return d0; } @@ -202,24 +203,6 @@ class LCodeGen BASE_EMBEDDED { Register ToRegister(int index) const; DoubleRegister ToDoubleRegister(int index) const; - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); - - int ToInteger32(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); @@ -229,6 +212,7 @@ class LCodeGen BASE_EMBEDDED { Register scratch1, Register scratch2); void DoMathFloor(LUnaryMathOperation* instr); + void DoMathRound(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); // Support for recording safepoint and position information. @@ -237,6 +221,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc new file mode 100644 index 0000000000..1a2326b748 --- /dev/null +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -0,0 +1,303 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "arm/lithium-gap-resolver-arm.h" +#include "arm/lithium-codegen-arm.h" + +namespace v8 { +namespace internal { + +static const Register kSavedValueRegister = { 9 }; +static const DoubleRegister kSavedDoubleValueRegister = { 0 }; + +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), + saved_destination_(NULL) { } + + +void LGapResolver::Resolve(LParallelMove* parallel_move) { + ASSERT(moves_.is_empty()); + // Build up a worklist of moves. + BuildInitialMoveList(parallel_move); + + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands move = moves_[i]; + // Skip constants to perform them last. They don't block other moves + // and skipping such moves with register destinations keeps those + // registers free for the whole algorithm. + if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { + root_index_ = i; // Any cycle is found when by reaching this move again. + PerformMove(i); + if (in_cycle_) { + RestoreValue(); + } + } + } + + // Perform the moves with constant sources. + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated()) { + ASSERT(moves_[i].source()->IsConstantOperand()); + EmitMove(i); + } + } + + moves_.Rewind(0); +} + + +void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { + // Perform a linear sweep of the moves to add them to the initial list of + // moves to perform, ignoring any move that is redundant (the source is + // the same as the destination, the destination is ignored and + // unallocated, or the move was already eliminated). + const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); + for (int i = 0; i < moves->length(); ++i) { + LMoveOperands move = moves->at(i); + if (!move.IsRedundant()) moves_.Add(move); + } + Verify(); +} + + +void LGapResolver::PerformMove(int index) { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We + // mark a move as "pending" on entry to PerformMove in order to detect + // cycles in the move graph. + + // We can only find a cycle, when doing a depth-first traversal of moves, + // be encountering the starting move again. So by spilling the source of + // the starting move, we break the cycle. All moves are then unblocked, + // and the starting move is completed by writing the spilled value to + // its destination. All other moves from the spilled source have been + // completed prior to breaking the cycle. + // An additional complication is that moves to MemOperands with large + // offsets (more than 1K or 4K) require us to spill this spilled value to + // the stack, to free up the register. + ASSERT(!moves_[index].IsPending()); + ASSERT(!moves_[index].IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved in a stack allocated local. Multiple moves can + // be pending because this function is recursive. + ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + LOperand* destination = moves_[index].destination(); + moves_[index].set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve + // dependencies. Any unperformed, unpending move with a source the same + // as this one's destination blocks this one so recursively perform all + // such moves. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination) && !other_move.IsPending()) { + PerformMove(i); + // If there is a blocking, pending move it must be moves_[root_index_] + // and all other moves with the same source as moves_[root_index_] are + // sucessfully executed (because they are cycle-free) by this loop. + } + } + + // We are about to resolve this move and don't need it marked as + // pending, so restore its destination. + moves_[index].set_destination(destination); + + // The move may be blocked on a pending move, which must be the starting move. + // In this case, we have a cycle, and we save the source of this move to + // a scratch register to break it. + LMoveOperands other_move = moves_[root_index_]; + if (other_move.Blocks(destination)) { + ASSERT(other_move.IsPending()); + BreakCycle(index); + return; + } + + // This move is no longer blocked. + EmitMove(index); +} + + +void LGapResolver::Verify() { +#ifdef ENABLE_SLOW_ASSERTS + // No operand should be the destination for more than one move. + for (int i = 0; i < moves_.length(); ++i) { + LOperand* destination = moves_[i].destination(); + for (int j = i + 1; j < moves_.length(); ++j) { + SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + } + } +#endif +} + +#define __ ACCESS_MASM(cgen_->masm()) + +void LGapResolver::BreakCycle(int index) { + // We save in a register the value that should end up in the source of + // moves_[root_index]. After performing all moves in the tree rooted + // in that move, we save the value to that source. + ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); + ASSERT(!in_cycle_); + in_cycle_ = true; + LOperand* source = moves_[index].source(); + saved_destination_ = moves_[index].destination(); + if (source->IsRegister()) { + __ mov(kSavedValueRegister, cgen_->ToRegister(source)); + } else if (source->IsStackSlot()) { + __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); + } else if (source->IsDoubleRegister()) { + __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source)); + } else if (source->IsDoubleStackSlot()) { + __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source)); + } else { + UNREACHABLE(); + } + // This move will be done by restoring the saved value to the destination. + moves_[index].Eliminate(); +} + + +void LGapResolver::RestoreValue() { + ASSERT(in_cycle_); + ASSERT(saved_destination_ != NULL); + + // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. + if (saved_destination_->IsRegister()) { + __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); + } else if (saved_destination_->IsStackSlot()) { + __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); + } else if (saved_destination_->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(saved_destination_), + kSavedDoubleValueRegister); + } else if (saved_destination_->IsDoubleStackSlot()) { + __ vstr(kSavedDoubleValueRegister, + cgen_->ToMemOperand(saved_destination_)); + } else { + UNREACHABLE(); + } + + in_cycle_ = false; + saved_destination_ = NULL; +} + + +void LGapResolver::EmitMove(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + + if (source->IsRegister()) { + Register source_register = cgen_->ToRegister(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_register); + } else { + ASSERT(destination->IsStackSlot()); + __ str(source_register, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsRegister()) { + __ ldr(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + if (!destination_operand.OffsetIsUint12Encodable()) { + // ip is overwritten while saving the value to the destination. + // Therefore we can't use ip. It is OK if the read from the source + // destroys ip, since that happens before the value is read. + __ vldr(kSavedDoubleValueRegister.low(), source_operand); + __ vstr(kSavedDoubleValueRegister.low(), destination_operand); + } else { + __ ldr(ip, source_operand); + __ str(ip, destination_operand); + } + } else { + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + } + } + + } else if (source->IsConstantOperand()) { + Operand source_operand = cgen_->ToOperand(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ mov(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsDoubleRegister()) { + DoubleRegister source_register = cgen_->ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(destination), source_register); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ vstr(source_register, destination_operand); + } + + } else if (source->IsDoubleStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsDoubleRegister()) { + __ vldr(cgen_->ToDoubleRegister(destination), source_operand); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + // kSavedDoubleValueRegister was used to break the cycle, + // but kSavedValueRegister is free. + MemOperand source_high_operand = + cgen_->ToHighMemOperand(source); + MemOperand destination_high_operand = + cgen_->ToHighMemOperand(destination); + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + __ ldr(kSavedValueRegister, source_high_operand); + __ str(kSavedValueRegister, destination_high_operand); + } else { + __ vldr(kSavedDoubleValueRegister, source_operand); + __ vstr(kSavedDoubleValueRegister, destination_operand); + } + } + } else { + UNREACHABLE(); + } + + moves_[index].Eliminate(); +} + + +#undef __ + +} } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h new file mode 100644 index 0000000000..334d2920b6 --- /dev/null +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h @@ -0,0 +1,84 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ +#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ + +#include "v8.h" + +#include "lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; +class LGapResolver; + +class LGapResolver BASE_EMBEDDED { + public: + + explicit LGapResolver(LCodeGen* owner); + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(LParallelMove* parallel_move); + + private: + // Build the initial list of moves. + void BuildInitialMoveList(LParallelMove* parallel_move); + + // Perform the move at the moves_ index in question (possibly requiring + // other moves to satisfy dependencies). + void PerformMove(int index); + + // If a cycle is found in the series of moves, save the blocking value to + // a scratch register. The cycle must be found by hitting the root of the + // depth-first search. + void BreakCycle(int index); + + // After a cycle has been resolved, restore the value from the scratch + // register to its proper destination. + void RestoreValue(); + + // Emit a move and remove it from the move graph. + void EmitMove(int index); + + // Verify the move list before performing moves. + void Verify(); + + LCodeGen* cgen_; + + // List of moves not yet resolved. + ZoneList<LMoveOperands> moves_; + + int root_index_; + bool in_cycle_; + LOperand* saved_destination_; +}; + +} } // namespace v8::internal + +#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index eb850cd948..65c92f9e13 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -485,18 +485,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() { PopSafepointRegisters(); } -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) { - str(reg, SafepointRegistersAndDoublesSlot(reg)); +void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, + Register dst) { + str(src, SafepointRegistersAndDoublesSlot(dst)); } -void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { - str(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { + str(src, SafepointRegisterSlot(dst)); } -void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) { - ldr(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + ldr(dst, SafepointRegisterSlot(src)); } @@ -745,6 +746,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } } +void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { +#if !defined(USE_ARM_EABI) + UNREACHABLE(); +#else + vmov(dst, r0, r1); +#endif +} + void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, @@ -2154,11 +2163,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); } - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); - } else { // Slot is in the current function context. - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in esi). + mov(dst, cp); + } + + // We should not have found a 'with' context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (FLAG_debug_code) { + ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); + cmp(dst, ip); + Check(eq, "Yo dawg, I heard you liked function contexts " + "so I put function contexts in all your contexts"); } } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 354662da32..83c59a6f65 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -240,12 +240,13 @@ class MacroAssembler: public Assembler { void PopSafepointRegisters(); void PushSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles(); - void StoreToSafepointRegisterSlot(Register reg); - void StoreToSafepointRegistersAndDoublesSlot(Register reg); - void LoadFromSafepointRegisterSlot(Register reg); - static int SafepointRegisterStackIndex(int reg_code); - static MemOperand SafepointRegisterSlot(Register reg); - static MemOperand SafepointRegistersAndDoublesSlot(Register reg); + // Store value in register src in the safepoint stack slot for + // register dst. + void StoreToSafepointRegisterSlot(Register src, Register dst); + void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); + // Load the value of the src register from its safepoint stack slot + // into register dst. + void LoadFromSafepointRegisterSlot(Register dst, Register src); // Load two consecutive registers with two consecutive memory locations. void Ldrd(Register dst1, @@ -683,6 +684,8 @@ class MacroAssembler: public Assembler { void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(Register function, int num_arguments); + void GetCFunctionDoubleResult(const DoubleRegister dst); + // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Restores context. // stack_space - space to be unwound on exit (includes the call js @@ -883,10 +886,19 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2); + // Compute memory operands for safepoint stack slots. + static int SafepointRegisterStackIndex(int reg_code); + MemOperand SafepointRegisterSlot(Register reg); + MemOperand SafepointRegistersAndDoublesSlot(Register reg); + bool generating_stub_; bool allow_stub_calls_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 675fdf49b2..e623ea1914 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -2332,8 +2332,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a two-byte string or a symbol. @@ -2348,8 +2349,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2369,8 +2371,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 42a61c2b8d..a323ecaa44 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -228,6 +228,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteTaggedPC(pc_delta, kEmbeddedObjectTag); } else if (rmode == RelocInfo::CODE_TARGET) { WriteTaggedPC(pc_delta, kCodeTargetTag); + ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); } else if (RelocInfo::IsPosition(rmode)) { // Use signed delta-encoding for data. intptr_t data_delta = rinfo->data() - last_data_; @@ -251,6 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteExtraTaggedPC(pc_delta, kPCJumpTag); WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag); last_data_ = rinfo->data(); + ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize); } else { // For all other modes we simply use the mode as the extra tag. // None of these modes need a data component. @@ -850,12 +852,14 @@ double power_double_double(double x, double y) { ExternalReference ExternalReference::power_double_double_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double), + FP_RETURN_CALL)); } ExternalReference ExternalReference::power_double_int_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int), + FP_RETURN_CALL)); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 1b71dfc5a1..095859840e 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -184,6 +184,14 @@ class RelocInfo BASE_EMBEDDED { // we do not normally record relocation info. static const char* kFillerCommentString; + // The size of a comment is equal to tree bytes for the extra tagged pc + + // the tag for the data, and kPointerSize for the actual pointer to the + // comment. + static const int kRelocCommentSize = 3 + kPointerSize; + + // The maximum size for a call instruction including pc-jump. + static const int kMaxCallSize = 6; + enum Mode { // Please note the order is important (see IsCodeTarget, IsGCRelocMode). CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ae7b2b9f98..f392cceb3c 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -261,10 +261,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { Handle<SharedFunctionInfo> shared = info->shared_info(); shared->EnableDeoptimizationSupport(*unoptimized.code()); // The existing unoptimized code was replaced with the new one. - Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle<String>(shared->DebugName()), - shared->start_position(), - &unoptimized); + Compiler::RecordFunctionCompilation( + Logger::LAZY_COMPILE_TAG, &unoptimized, shared); } } @@ -273,7 +271,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { // optimizable marker in the code object and optimize anyway. This // is safe as long as the unoptimized code has deoptimization // support. - ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable()); + ASSERT(FLAG_always_opt || code->optimizable()); ASSERT(info->shared_info()->has_deoptimization_support()); if (FLAG_trace_hydrogen) { @@ -283,8 +281,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } TypeFeedbackOracle oracle( - Handle<Code>(info->shared_info()->code()), - Handle<Context>(info->closure()->context()->global_context())); + code, Handle<Context>(info->closure()->context()->global_context())); HGraphBuilder builder(&oracle); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(info); @@ -294,9 +291,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } if (graph != NULL && FLAG_build_lithium) { - Handle<Code> code = graph->Compile(); - if (!code.is_null()) { - info->SetCode(code); + Handle<Code> optimized_code = graph->Compile(); + if (!optimized_code.is_null()) { + info->SetCode(optimized_code); FinishOptimization(info->closure(), start); return true; } @@ -415,13 +412,25 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { return Handle<SharedFunctionInfo>::null(); } + // Allocate function. ASSERT(!info->code().is_null()); + Handle<SharedFunctionInfo> result = + Factory::NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + info->code(), + SerializedScopeInfo::Create(info->scope())); + + ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); + Compiler::SetFunctionInfo(result, lit, true, script); + if (script->name()->IsString()) { PROFILE(CodeCreateEvent( info->is_eval() ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), + *result, String::cast(script->name()))); GDBJIT(AddCode(Handle<String>(String::cast(script->name())), script, @@ -432,21 +441,11 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), - "")); + *result, + Heap::empty_string())); GDBJIT(AddCode(Handle<String>(), script, info->code())); } - // Allocate function. - Handle<SharedFunctionInfo> result = - Factory::NewSharedFunctionInfo( - lit->name(), - lit->materialized_literal_count(), - info->code(), - SerializedScopeInfo::Create(info->scope())); - - ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - Compiler::SetFunctionInfo(result, lit, true, script); - // Hint to the runtime system used when allocating space for initial // property space by setting the expected number of properties for // the instances of the function. @@ -613,10 +612,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { ASSERT(!info->code().is_null()); Handle<Code> code = info->code(); Handle<JSFunction> function = info->closure(); - RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle<String>(shared->DebugName()), - shared->start_position(), - info); + RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared); if (info->IsOptimizing()) { function->ReplaceCode(*code); @@ -724,10 +720,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, ASSERT(!info.code().is_null()); // Function compilation complete. - RecordFunctionCompilation(Logger::FUNCTION_TAG, - literal->debug_name(), - literal->start_position(), - &info); scope_info = SerializedScopeInfo::Create(info.scope()); } @@ -738,6 +730,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, info.code(), scope_info); SetFunctionInfo(result, literal, false, script); + RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result); result->set_allows_lazy_compilation(allow_lazy); // Set the expected number of properties for instances and return @@ -776,28 +769,31 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info, void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, - Handle<String> name, - int start_position, - CompilationInfo* info) { + CompilationInfo* info, + Handle<SharedFunctionInfo> shared) { + // SharedFunctionInfo is passed separately, because if CompilationInfo + // was created using Script object, it will not have it. + // Log the code generation. If source information is available include // script name and line number. Check explicitly whether logging is // enabled as finding the line number is not free. - if (Logger::is_logging() || - CpuProfiler::is_profiling()) { + if (Logger::is_logging() || CpuProfiler::is_profiling()) { Handle<Script> script = info->script(); Handle<Code> code = info->code(); + if (*code == Builtins::builtin(Builtins::LazyCompile)) return; if (script->name()->IsString()) { - int line_num = GetScriptLineNumber(script, start_position) + 1; + int line_num = GetScriptLineNumber(script, shared->start_position()) + 1; USE(line_num); PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code, - *name, + *shared, String::cast(script->name()), line_num)); } else { PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script), *code, - *name)); + *shared, + shared->DebugName())); } } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 239bea35c2..e0a437ac6d 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -265,9 +265,8 @@ class Compiler : public AllStatic { #endif static void RecordFunctionCompilation(Logger::LogEventsAndTags tag, - Handle<String> name, - int start_position, - CompilationInfo* info); + CompilationInfo* info, + Handle<SharedFunctionInfo> shared); }; diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h index 5df5893f8a..440dedca6e 100644 --- a/deps/v8/src/cpu-profiler-inl.h +++ b/deps/v8/src/cpu-profiler-inl.h @@ -41,6 +41,9 @@ namespace internal { void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) { code_map->AddCode(start, entry, size); + if (sfi_address != NULL) { + entry->set_shared_id(code_map->GetSFITag(sfi_address)); + } } @@ -54,8 +57,8 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) { } -void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) { - code_map->AddAlias(start, entry, code_start); +void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) { + code_map->MoveCode(from, to); } diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index fcf539f3bd..ad04a003b2 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -53,13 +53,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) ticks_buffer_(sizeof(TickSampleEventRecord), kTickSamplesBufferChunkSize, kTickSamplesBufferChunksCount), - enqueue_order_(0), - known_functions_(new HashMap(AddressesMatch)) { -} - - -ProfilerEventsProcessor::~ProfilerEventsProcessor() { - delete known_functions_; + enqueue_order_(0) { } @@ -75,6 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, prefix, name); rec->size = 1; + rec->sfi_address = NULL; events_buffer_.Enqueue(evt_rec); } @@ -84,7 +79,8 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, String* resource_name, int line_number, Address start, - unsigned size) { + unsigned size, + Address sfi_address) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; @@ -93,6 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number); rec->size = size; + rec->sfi_address = sfi_address; events_buffer_.Enqueue(evt_rec); } @@ -109,6 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, name); rec->size = size; + rec->sfi_address = NULL; events_buffer_.Enqueue(evt_rec); } @@ -125,6 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->start = start; rec->entry = generator_->NewCodeEntry(tag, args_count); rec->size = size; + rec->sfi_address = NULL; events_buffer_.Enqueue(evt_rec); } @@ -150,57 +149,14 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) { } -void ProfilerEventsProcessor::FunctionCreateEvent(Address alias, - Address start, - int security_token_id) { +void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) { CodeEventsContainer evt_rec; - CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_; - rec->type = CodeEventRecord::CODE_ALIAS; + SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_; + rec->type = CodeEventRecord::SFI_MOVE; rec->order = ++enqueue_order_; - rec->start = alias; - rec->entry = generator_->NewCodeEntry(security_token_id); - rec->code_start = start; + rec->from = from; + rec->to = to; events_buffer_.Enqueue(evt_rec); - - known_functions_->Lookup(alias, AddressHash(alias), true); -} - - -void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) { - CodeMoveEvent(from, to); - - if (IsKnownFunction(from)) { - known_functions_->Remove(from, AddressHash(from)); - known_functions_->Lookup(to, AddressHash(to), true); - } -} - - -void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) { - CodeDeleteEvent(from); - - known_functions_->Remove(from, AddressHash(from)); -} - - -bool ProfilerEventsProcessor::IsKnownFunction(Address start) { - HashMap::Entry* entry = - known_functions_->Lookup(start, AddressHash(start), false); - return entry != NULL; -} - - -void ProfilerEventsProcessor::ProcessMovedFunctions() { - for (int i = 0; i < moved_functions_.length(); ++i) { - JSFunction* function = moved_functions_[i]; - CpuProfiler::FunctionCreateEvent(function); - } - moved_functions_.Clear(); -} - - -void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) { - moved_functions_.Add(function); } @@ -227,13 +183,12 @@ void ProfilerEventsProcessor::AddCurrentStack() { TickSample* sample = &record.sample; sample->state = Top::current_vm_state(); sample->pc = reinterpret_cast<Address>(sample); // Not NULL. + sample->tos = NULL; sample->frames_count = 0; for (StackTraceFrameIterator it; !it.done() && sample->frames_count < TickSample::kMaxFramesCount; it.Advance()) { - JavaScriptFrame* frame = it.frame(); - sample->stack[sample->frames_count++] = - reinterpret_cast<Address>(frame->function()); + sample->stack[sample->frames_count++] = it.frame()->pc(); } record.order = enqueue_order_; ticks_from_vm_buffer_.Enqueue(record); @@ -393,20 +348,38 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Heap::empty_string(), v8::CpuProfileNode::kNoLineNumberInfo, code->address(), - code->ExecutableSize()); + code->ExecutableSize(), + NULL); } void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, String* name, - String* source, int line) { + Code* code, + SharedFunctionInfo* shared, + String* name) { singleton_->processor_->CodeCreateEvent( tag, name, + Heap::empty_string(), + v8::CpuProfileNode::kNoLineNumberInfo, + code->address(), + code->ExecutableSize(), + shared->address()); +} + + +void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* source, int line) { + singleton_->processor_->CodeCreateEvent( + tag, + shared->DebugName(), source, line, code->address(), - code->ExecutableSize()); + code->ExecutableSize(), + shared->address()); } @@ -430,44 +403,8 @@ void CpuProfiler::CodeDeleteEvent(Address from) { } -void CpuProfiler::FunctionCreateEvent(JSFunction* function) { - int security_token_id = TokenEnumerator::kNoSecurityToken; - if (function->unchecked_context()->IsContext()) { - security_token_id = singleton_->token_enumerator_->GetTokenId( - function->context()->global_context()->security_token()); - } - singleton_->processor_->FunctionCreateEvent( - function->address(), - function->shared()->code()->address(), - security_token_id); -} - - -void CpuProfiler::ProcessMovedFunctions() { - singleton_->processor_->ProcessMovedFunctions(); -} - - -void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) { - // This function is called from GC iterators (during Scavenge, - // MC, and MS), so marking bits can be set on objects. That's - // why unchecked accessors are used here. - - // The same function can be reported several times. - if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile) - || singleton_->processor_->IsKnownFunction(function->address())) return; - - singleton_->processor_->RememberMovedFunction(function); -} - - -void CpuProfiler::FunctionMoveEvent(Address from, Address to) { - singleton_->processor_->FunctionMoveEvent(from, to); -} - - -void CpuProfiler::FunctionDeleteEvent(Address from) { - singleton_->processor_->FunctionDeleteEvent(from); +void CpuProfiler::SFIMoveEvent(Address from, Address to) { + singleton_->processor_->SFIMoveEvent(from, to); } @@ -539,7 +476,6 @@ void CpuProfiler::StartProcessorIfNotStarted() { FLAG_log_code = saved_log_code_flag; } Logger::LogCompiledFunctions(); - Logger::LogFunctionObjects(); Logger::LogAccessorCallbacks(); } // Enable stack sampling. diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 10165f67c8..1ebbfebf74 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -50,7 +50,7 @@ class TokenEnumerator; V(CODE_CREATION, CodeCreateEventRecord) \ V(CODE_MOVE, CodeMoveEventRecord) \ V(CODE_DELETE, CodeDeleteEventRecord) \ - V(CODE_ALIAS, CodeAliasEventRecord) + V(SFI_MOVE, SFIMoveEventRecord) class CodeEventRecord { @@ -73,6 +73,7 @@ class CodeCreateEventRecord : public CodeEventRecord { Address start; CodeEntry* entry; unsigned size; + Address sfi_address; INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -95,11 +96,10 @@ class CodeDeleteEventRecord : public CodeEventRecord { }; -class CodeAliasEventRecord : public CodeEventRecord { +class SFIMoveEventRecord : public CodeEventRecord { public: - Address start; - CodeEntry* entry; - Address code_start; + Address from; + Address to; INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -134,7 +134,7 @@ class TickSampleEventRecord BASE_EMBEDDED { class ProfilerEventsProcessor : public Thread { public: explicit ProfilerEventsProcessor(ProfileGenerator* generator); - virtual ~ProfilerEventsProcessor(); + virtual ~ProfilerEventsProcessor() {} // Thread control. virtual void Run(); @@ -148,7 +148,8 @@ class ProfilerEventsProcessor : public Thread { void CodeCreateEvent(Logger::LogEventsAndTags tag, String* name, String* resource_name, int line_number, - Address start, unsigned size); + Address start, unsigned size, + Address sfi_address); void CodeCreateEvent(Logger::LogEventsAndTags tag, const char* name, Address start, unsigned size); @@ -157,17 +158,12 @@ class ProfilerEventsProcessor : public Thread { Address start, unsigned size); void CodeMoveEvent(Address from, Address to); void CodeDeleteEvent(Address from); - void FunctionCreateEvent(Address alias, Address start, int security_token_id); - void FunctionMoveEvent(Address from, Address to); - void FunctionDeleteEvent(Address from); + void SFIMoveEvent(Address from, Address to); void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, const char* prefix, String* name, Address start, unsigned size); // Puts current stack into tick sample events buffer. void AddCurrentStack(); - bool IsKnownFunction(Address start); - void ProcessMovedFunctions(); - void RememberMovedFunction(JSFunction* function); // Tick sample events are filled directly in the buffer of the circular // queue (because the structure is of fixed width, but usually not all @@ -188,13 +184,6 @@ class ProfilerEventsProcessor : public Thread { bool ProcessTicks(unsigned dequeue_order); INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); - INLINE(static bool AddressesMatch(void* key1, void* key2)) { - return key1 == key2; - } - INLINE(static uint32_t AddressHash(Address addr)) { - return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr))); - } ProfileGenerator* generator_; bool running_; @@ -202,10 +191,6 @@ class ProfilerEventsProcessor : public Thread { SamplingCircularQueue ticks_buffer_; UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_; unsigned enqueue_order_; - - // Used from the VM thread. - HashMap* known_functions_; - List<JSFunction*> moved_functions_; }; } } // namespace v8::internal @@ -251,23 +236,22 @@ class CpuProfiler { static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, String* name); static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, String* name, + Code* code, + SharedFunctionInfo *shared, + String* name); + static void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, + SharedFunctionInfo *shared, String* source, int line); static void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); static void CodeMovingGCEvent() {} static void CodeMoveEvent(Address from, Address to); static void CodeDeleteEvent(Address from); - static void FunctionCreateEvent(JSFunction* function); - // Reports function creation in case we had missed it (e.g. - // if it was created from compiled code). - static void FunctionCreateEventFromMove(JSFunction* function); - static void FunctionMoveEvent(Address from, Address to); - static void FunctionDeleteEvent(Address from); static void GetterCallbackEvent(String* name, Address entry_point); static void RegExpCodeCreateEvent(Code* code, String* source); - static void ProcessMovedFunctions(); static void SetterCallbackEvent(String* name, Address entry_point); + static void SFIMoveEvent(Address from, Address to); static INLINE(bool is_profiling()) { return NoBarrier_Load(&is_profiling_); diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index f484d8d9b8..de8f0a4661 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -106,6 +106,11 @@ static Handle<Object> Invoke(bool construct, ASSERT(*has_pending_exception == Top::has_pending_exception()); if (*has_pending_exception) { Top::ReportPendingMessages(); + if (Top::pending_exception() == Failure::OutOfMemoryException()) { + if (!HandleScopeImplementer::instance()->ignore_out_of_memory()) { + V8::FatalProcessOutOfMemory("JS", true); + } + } return Handle<Object>(); } else { Top::clear_pending_message(); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 6a5e2a5756..96f63c530d 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -120,6 +120,7 @@ DEFINE_bool(time_hydrogen, false, "timing for hydrogen") DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file") DEFINE_bool(trace_inlining, false, "trace inlining decisions") DEFINE_bool(trace_alloc, false, "trace register allocator") +DEFINE_bool(trace_all_uses, false, "trace all use positions") DEFINE_bool(trace_range, false, "trace range analysis") DEFINE_bool(trace_gvn, false, "trace global value numbering") DEFINE_bool(trace_representation, false, "trace representation types") @@ -134,7 +135,11 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(aggressive_loop_invariant_motion, true, "aggressive motion of instructions out of loops") +#ifdef V8_TARGET_ARCH_X64 +DEFINE_bool(use_osr, false, "use on-stack replacement") +#else DEFINE_bool(use_osr, true, "use on-stack replacement") +#endif DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_bool(optimize_closures, true, "optimize closures") diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc index 88a9939722..5136deddbf 100644 --- a/deps/v8/src/gdb-jit.cc +++ b/deps/v8/src/gdb-jit.cc @@ -1411,9 +1411,8 @@ static void AddUnwindInfo(CodeDescription *desc) { #ifdef V8_TARGET_ARCH_X64 if (desc->tag() == GDBJITInterface::FUNCTION) { // To avoid propagating unwinding information through - // compilation pipeline we rely on function prologue - // and epilogue being the same for all code objects generated - // by the full code generator. + // compilation pipeline we use an approximation. + // For most use cases this should not affect usability. static const int kFramePointerPushOffset = 1; static const int kFramePointerSetOffset = 4; static const int kFramePointerPopOffset = -3; @@ -1427,19 +1426,6 @@ static void AddUnwindInfo(CodeDescription *desc) { uintptr_t frame_pointer_pop_address = desc->CodeEnd() + kFramePointerPopOffset; -#ifdef DEBUG - static const uint8_t kFramePointerPushInstruction = 0x48; // push ebp - static const uint16_t kFramePointerSetInstruction = 0x5756; // mov ebp, esp - static const uint8_t kFramePointerPopInstruction = 0xBE; // pop ebp - - ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_push_address) == - kFramePointerPushInstruction); - ASSERT(*reinterpret_cast<uint16_t*>(frame_pointer_set_address) == - kFramePointerSetInstruction); - ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_pop_address) == - kFramePointerPopInstruction); -#endif - desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH, frame_pointer_push_address); desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET, diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index d625d644c7..b48aa507e2 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -834,49 +834,39 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared, } -bool CompileLazy(Handle<JSFunction> function, - ClearExceptionFlag flag) { +static bool CompileLazyFunction(Handle<JSFunction> function, + ClearExceptionFlag flag, + InLoopFlag in_loop_flag) { bool result = true; if (function->shared()->is_compiled()) { function->ReplaceCode(function->shared()->code()); function->shared()->set_code_age(0); } else { CompilationInfo info(function); + if (in_loop_flag == IN_LOOP) info.MarkAsInLoop(); result = CompileLazyHelper(&info, flag); ASSERT(!result || function->is_compiled()); } - if (result && function->is_compiled()) { - PROFILE(FunctionCreateEvent(*function)); - } return result; } +bool CompileLazy(Handle<JSFunction> function, + ClearExceptionFlag flag) { + return CompileLazyFunction(function, flag, NOT_IN_LOOP); +} + + bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) { - bool result = true; - if (function->shared()->is_compiled()) { - function->ReplaceCode(function->shared()->code()); - function->shared()->set_code_age(0); - } else { - CompilationInfo info(function); - info.MarkAsInLoop(); - result = CompileLazyHelper(&info, flag); - ASSERT(!result || function->is_compiled()); - } - if (result && function->is_compiled()) { - PROFILE(FunctionCreateEvent(*function)); - } - return result; + return CompileLazyFunction(function, flag, IN_LOOP); } bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) { CompilationInfo info(function); info.SetOptimizing(osr_ast_id); - bool result = CompileLazyHelper(&info, KEEP_EXCEPTION); - if (result) PROFILE(FunctionCreateEvent(*function)); - return result; + return CompileLazyHelper(&info, KEEP_EXCEPTION); } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index f88ebda53d..1fadec3831 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -134,7 +134,7 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC; int Heap::mc_count_ = 0; int Heap::ms_count_ = 0; -int Heap::gc_count_ = 0; +unsigned int Heap::gc_count_ = 0; GCTracer* Heap::tracer_ = NULL; @@ -515,7 +515,6 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { #ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_gc) HeapProfiler::WriteSample(); - if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions(); #endif return next_gc_likely_to_collect_more; @@ -1350,9 +1349,8 @@ class ScavengingVisitor : public StaticVisitorBase { HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address())); #if defined(ENABLE_LOGGING_AND_PROFILING) if (Logger::is_logging() || CpuProfiler::is_profiling()) { - if (target->IsJSFunction()) { - PROFILE(FunctionMoveEvent(source->address(), target->address())); - PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target))); + if (target->IsSharedFunctionInfo()) { + PROFILE(SFIMoveEvent(source->address(), target->address())); } } #endif @@ -2924,9 +2922,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) { // constructor to the function. Object* result; { MaybeObject* maybe_result = - JSObject::cast(prototype)->SetProperty(constructor_symbol(), - function, - DONT_ENUM); + JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( + constructor_symbol(), function, DONT_ENUM); if (!maybe_result->ToObject(&result)) return maybe_result; } return prototype; @@ -3797,9 +3794,9 @@ bool Heap::IdleNotification() { static const int kIdlesBeforeMarkSweep = 7; static const int kIdlesBeforeMarkCompact = 8; static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; - static const int kGCsBetweenCleanup = 4; + static const unsigned int kGCsBetweenCleanup = 4; static int number_idle_notifications = 0; - static int last_gc_count = gc_count_; + static unsigned int last_gc_count = gc_count_; bool uncommit = true; bool finished = false; @@ -3808,7 +3805,7 @@ bool Heap::IdleNotification() { // GCs have taken place. This allows another round of cleanup based // on idle notifications if enough work has been carried out to // provoke a number of garbage collections. - if (gc_count_ < last_gc_count + kGCsBetweenCleanup) { + if (gc_count_ - last_gc_count < kGCsBetweenCleanup) { number_idle_notifications = Min(number_idle_notifications + 1, kMaxIdleCount); } else { @@ -5182,32 +5179,77 @@ void HeapIterator::reset() { } -#ifdef DEBUG +#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) + +Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL); -static bool search_for_any_global; -static Object* search_target; -static bool found_target; -static List<Object*> object_stack(20); +class PathTracer::MarkVisitor: public ObjectVisitor { + public: + explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {} + void VisitPointers(Object** start, Object** end) { + // Scan all HeapObject pointers in [start, end) + for (Object** p = start; !tracer_->found() && (p < end); p++) { + if ((*p)->IsHeapObject()) + tracer_->MarkRecursively(p, this); + } + } + private: + PathTracer* tracer_; +}; -// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. -static const int kMarkTag = 2; -static void MarkObjectRecursively(Object** p); -class MarkObjectVisitor : public ObjectVisitor { +class PathTracer::UnmarkVisitor: public ObjectVisitor { public: + explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {} void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) + // Scan all HeapObject pointers in [start, end) for (Object** p = start; p < end; p++) { if ((*p)->IsHeapObject()) - MarkObjectRecursively(p); + tracer_->UnmarkRecursively(p, this); } } + + private: + PathTracer* tracer_; }; -static MarkObjectVisitor mark_visitor; -static void MarkObjectRecursively(Object** p) { +void PathTracer::VisitPointers(Object** start, Object** end) { + bool done = ((what_to_find_ == FIND_FIRST) && found_target_); + // Visit all HeapObject pointers in [start, end) + for (Object** p = start; !done && (p < end); p++) { + if ((*p)->IsHeapObject()) { + TracePathFrom(p); + done = ((what_to_find_ == FIND_FIRST) && found_target_); + } + } +} + + +void PathTracer::Reset() { + found_target_ = false; + object_stack_.Clear(); +} + + +void PathTracer::TracePathFrom(Object** root) { + ASSERT((search_target_ == kAnyGlobalObject) || + search_target_->IsHeapObject()); + found_target_in_trace_ = false; + object_stack_.Clear(); + + MarkVisitor mark_visitor(this); + MarkRecursively(root, &mark_visitor); + + UnmarkVisitor unmark_visitor(this); + UnmarkRecursively(root, &unmark_visitor); + + ProcessResults(); +} + + +void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { if (!(*p)->IsHeapObject()) return; HeapObject* obj = HeapObject::cast(*p); @@ -5216,14 +5258,17 @@ static void MarkObjectRecursively(Object** p) { if (!map->IsHeapObject()) return; // visited before - if (found_target) return; // stop if target found - object_stack.Add(obj); - if ((search_for_any_global && obj->IsJSGlobalObject()) || - (!search_for_any_global && (obj == search_target))) { - found_target = true; + if (found_target_in_trace_) return; // stop if target found + object_stack_.Add(obj); + if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || + (obj == search_target_)) { + found_target_in_trace_ = true; + found_target_ = true; return; } + bool is_global_context = obj->IsGlobalContext(); + // not visited yet Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); @@ -5231,31 +5276,30 @@ static void MarkObjectRecursively(Object** p) { obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag)); - MarkObjectRecursively(&map); + // Scan the object body. + if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) { + // This is specialized to scan Context's properly. + Object** start = reinterpret_cast<Object**>(obj->address() + + Context::kHeaderSize); + Object** end = reinterpret_cast<Object**>(obj->address() + + Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize); + mark_visitor->VisitPointers(start, end); + } else { + obj->IterateBody(map_p->instance_type(), + obj->SizeFromMap(map_p), + mark_visitor); + } - obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p), - &mark_visitor); + // Scan the map after the body because the body is a lot more interesting + // when doing leak detection. + MarkRecursively(&map, mark_visitor); - if (!found_target) // don't pop if found the target - object_stack.RemoveLast(); + if (!found_target_in_trace_) // don't pop if found the target + object_stack_.RemoveLast(); } -static void UnmarkObjectRecursively(Object** p); -class UnmarkObjectVisitor : public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - UnmarkObjectRecursively(p); - } - } -}; - -static UnmarkObjectVisitor unmark_visitor; - -static void UnmarkObjectRecursively(Object** p) { +void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) { if (!(*p)->IsHeapObject()) return; HeapObject* obj = HeapObject::cast(*p); @@ -5274,63 +5318,38 @@ static void UnmarkObjectRecursively(Object** p) { obj->set_map(reinterpret_cast<Map*>(map_p)); - UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p)); + UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor); obj->IterateBody(Map::cast(map_p)->instance_type(), obj->SizeFromMap(Map::cast(map_p)), - &unmark_visitor); + unmark_visitor); } -static void MarkRootObjectRecursively(Object** root) { - if (search_for_any_global) { - ASSERT(search_target == NULL); - } else { - ASSERT(search_target->IsHeapObject()); - } - found_target = false; - object_stack.Clear(); - - MarkObjectRecursively(root); - UnmarkObjectRecursively(root); - - if (found_target) { +void PathTracer::ProcessResults() { + if (found_target_) { PrintF("=====================================\n"); PrintF("==== Path to object ====\n"); PrintF("=====================================\n\n"); - ASSERT(!object_stack.is_empty()); - for (int i = 0; i < object_stack.length(); i++) { + ASSERT(!object_stack_.is_empty()); + for (int i = 0; i < object_stack_.length(); i++) { if (i > 0) PrintF("\n |\n |\n V\n\n"); - Object* obj = object_stack[i]; + Object* obj = object_stack_[i]; obj->Print(); } PrintF("=====================================\n"); } } +#endif // DEBUG || LIVE_OBJECT_LIST -// Helper class for visiting HeapObjects recursively. -class MarkRootVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - // Visit all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - MarkRootObjectRecursively(p); - } - } -}; - - +#ifdef DEBUG // Triggers a depth-first traversal of reachable objects from roots // and finds a path to a specific heap object and prints it. void Heap::TracePathToObject(Object* target) { - search_target = target; - search_for_any_global = false; - - MarkRootVisitor root_visitor; - IterateRoots(&root_visitor, VISIT_ONLY_STRONG); + PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); + IterateRoots(&tracer, VISIT_ONLY_STRONG); } @@ -5338,11 +5357,10 @@ void Heap::TracePathToObject(Object* target) { // and finds a path to any global object and prints it. Useful for // determining the source for leaks of global objects. void Heap::TracePathToGlobal() { - search_target = NULL; - search_for_any_global = true; - - MarkRootVisitor root_visitor; - IterateRoots(&root_visitor, VISIT_ONLY_STRONG); + PathTracer tracer(PathTracer::kAnyGlobalObject, + PathTracer::FIND_ALL, + VISIT_ALL); + IterateRoots(&tracer, VISIT_ONLY_STRONG); } #endif diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index f50c3f9ac8..163eb04448 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -30,6 +30,8 @@ #include <math.h> +#include "globals.h" +#include "list.h" #include "spaces.h" #include "splay-tree-inl.h" #include "v8-counters.h" @@ -1180,7 +1182,7 @@ class Heap : public AllStatic { static int mc_count_; // how many mark-compact collections happened static int ms_count_; // how many mark-sweep collections happened - static int gc_count_; // how many gc happened + static unsigned int gc_count_; // how many gc happened // Total length of the strings we failed to flatten since the last GC. static int unflattened_strings_length_; @@ -1907,7 +1909,7 @@ class GCTracer BASE_EMBEDDED { void set_collector(GarbageCollector collector) { collector_ = collector; } // Sets the GC count. - void set_gc_count(int count) { gc_count_ = count; } + void set_gc_count(unsigned int count) { gc_count_ = count; } // Sets the full GC count. void set_full_gc_count(int count) { full_gc_count_ = count; } @@ -1950,7 +1952,7 @@ class GCTracer BASE_EMBEDDED { // A count (including this one, eg, the first collection is 1) of the // number of garbage collections. - int gc_count_; + unsigned int gc_count_; // A count (including this one) of the number of full garbage collections. int full_gc_count_; @@ -2152,6 +2154,65 @@ class WeakObjectRetainer { }; +#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) +// Helper class for tracing paths to a search target Object from all roots. +// The TracePathFrom() method can be used to trace paths from a specific +// object to the search target object. +class PathTracer : public ObjectVisitor { + public: + enum WhatToFind { + FIND_ALL, // Will find all matches. + FIND_FIRST // Will stop the search after first match. + }; + + // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop + // after the first match. If FIND_ALL is specified, then tracing will be + // done for all matches. + PathTracer(Object* search_target, + WhatToFind what_to_find, + VisitMode visit_mode) + : search_target_(search_target), + found_target_(false), + found_target_in_trace_(false), + what_to_find_(what_to_find), + visit_mode_(visit_mode), + object_stack_(20), + no_alloc() {} + + virtual void VisitPointers(Object** start, Object** end); + + void Reset(); + void TracePathFrom(Object** root); + + bool found() const { return found_target_; } + + static Object* const kAnyGlobalObject; + + protected: + class MarkVisitor; + class UnmarkVisitor; + + void MarkRecursively(Object** p, MarkVisitor* mark_visitor); + void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); + virtual void ProcessResults(); + + // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. + static const int kMarkTag = 2; + + Object* search_target_; + bool found_target_; + bool found_target_in_trace_; + WhatToFind what_to_find_; + VisitMode visit_mode_; + List<Object*> object_stack_; + + AssertNoAllocation no_alloc; // i.e. no gc allowed. + + DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); +}; +#endif // DEBUG || LIVE_OBJECT_LIST + + } } // namespace v8::internal #endif // V8_HEAP_H_ diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index 5accc77f0b..c5a7146eb4 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -57,10 +57,13 @@ const char* Representation::Mnemonic() const { case kTagged: return "t"; case kDouble: return "d"; case kInteger32: return "i"; - default: + case kExternal: return "x"; + case kNumRepresentations: UNREACHABLE(); return NULL; } + UNREACHABLE(); + return NULL; } @@ -221,7 +224,7 @@ HType HType::TypeFromValue(Handle<Object> value) { } -int HValue::LookupOperandIndex(int occurrence_index, HValue* op) const { +int HValue::LookupOperandIndex(int occurrence_index, HValue* op) { for (int i = 0; i < OperandCount(); ++i) { if (OperandAt(i) == op) { if (occurrence_index == 0) return i; @@ -237,7 +240,7 @@ bool HValue::IsDefinedAfter(HBasicBlock* other) const { } -bool HValue::UsesMultipleTimes(HValue* op) const { +bool HValue::UsesMultipleTimes(HValue* op) { bool seen = false; for (int i = 0; i < OperandCount(); ++i) { if (OperandAt(i) == op) { @@ -249,7 +252,7 @@ bool HValue::UsesMultipleTimes(HValue* op) const { } -bool HValue::Equals(HValue* other) const { +bool HValue::Equals(HValue* other) { if (other->opcode() != opcode()) return false; if (!other->representation().Equals(representation())) return false; if (!other->type_.Equals(type_)) return false; @@ -264,7 +267,7 @@ bool HValue::Equals(HValue* other) const { } -intptr_t HValue::Hashcode() const { +intptr_t HValue::Hashcode() { intptr_t result = opcode(); int count = OperandCount(); for (int i = 0; i < count; ++i) { @@ -281,33 +284,6 @@ void HValue::SetOperandAt(int index, HValue* value) { } -void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) { - if (index < 2) { - operands_[index] = value; - } else { - context_ = value; - } -} - - -void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) { - if (index < 3) { - operands_[index] = value; - } else { - context_ = value; - } -} - - -void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) { - if (index < 2) { - operands_[index] = value; - } else { - context_ = value; - } -} - - void HValue::ReplaceAndDelete(HValue* other) { ReplaceValue(other); Delete(); @@ -438,7 +414,7 @@ void HValue::ComputeInitialRange() { } -void HInstruction::PrintTo(StringStream* stream) const { +void HInstruction::PrintTo(StringStream* stream) { stream->Add("%s", Mnemonic()); if (HasSideEffects()) stream->Add("*"); stream->Add(" "); @@ -561,69 +537,64 @@ void HInstruction::Verify() { #endif -void HCall::PrintDataTo(StringStream* stream) const { - stream->Add("#%d", argument_count()); -} - - -void HUnaryCall::PrintDataTo(StringStream* stream) const { +void HUnaryCall::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" "); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HBinaryCall::PrintDataTo(StringStream* stream) const { +void HBinaryCall::PrintDataTo(StringStream* stream) { first()->PrintNameTo(stream); stream->Add(" "); second()->PrintNameTo(stream); stream->Add(" "); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HCallConstantFunction::PrintDataTo(StringStream* stream) const { +void HCallConstantFunction::PrintDataTo(StringStream* stream) { if (IsApplyFunction()) { stream->Add("optimized apply "); } else { stream->Add("%o ", function()->shared()->DebugName()); } - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HCallNamed::PrintDataTo(StringStream* stream) const { +void HCallNamed::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); HUnaryCall::PrintDataTo(stream); } -void HCallGlobal::PrintDataTo(StringStream* stream) const { +void HCallGlobal::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); HUnaryCall::PrintDataTo(stream); } -void HCallKnownGlobal::PrintDataTo(StringStream* stream) const { +void HCallKnownGlobal::PrintDataTo(StringStream* stream) { stream->Add("o ", target()->shared()->DebugName()); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HCallRuntime::PrintDataTo(StringStream* stream) const { +void HCallRuntime::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); - HCall::PrintDataTo(stream); + stream->Add("#%d", argument_count()); } -void HClassOfTest::PrintDataTo(StringStream* stream) const { +void HClassOfTest::PrintDataTo(StringStream* stream) { stream->Add("class_of_test("); value()->PrintNameTo(stream); stream->Add(", \"%o\")", *class_name()); } -void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const { +void HAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintNameTo(stream); stream->Add("["); index()->PrintNameTo(stream); @@ -632,7 +603,7 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const { } -void HControlInstruction::PrintDataTo(StringStream* stream) const { +void HControlInstruction::PrintDataTo(StringStream* stream) { if (FirstSuccessor() != NULL) { int first_id = FirstSuccessor()->block_id(); if (SecondSuccessor() == NULL) { @@ -645,13 +616,13 @@ void HControlInstruction::PrintDataTo(StringStream* stream) const { } -void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const { +void HUnaryControlInstruction::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); HControlInstruction::PrintDataTo(stream); } -void HCompareMap::PrintDataTo(StringStream* stream) const { +void HCompareMap::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" (%p)", *map()); HControlInstruction::PrintDataTo(stream); @@ -679,19 +650,19 @@ const char* HUnaryMathOperation::OpName() const { } -void HUnaryMathOperation::PrintDataTo(StringStream* stream) const { +void HUnaryMathOperation::PrintDataTo(StringStream* stream) { const char* name = OpName(); stream->Add("%s ", name); value()->PrintNameTo(stream); } -void HUnaryOperation::PrintDataTo(StringStream* stream) const { +void HUnaryOperation::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); } -void HHasInstanceType::PrintDataTo(StringStream* stream) const { +void HHasInstanceType::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); switch (from_) { case FIRST_JS_OBJECT_TYPE: @@ -712,14 +683,14 @@ void HHasInstanceType::PrintDataTo(StringStream* stream) const { } -void HTypeofIs::PrintDataTo(StringStream* stream) const { +void HTypeofIs::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" == "); stream->Add(type_literal_->ToAsciiVector()); } -void HChange::PrintDataTo(StringStream* stream) const { +void HChange::PrintDataTo(StringStream* stream) { HUnaryOperation::PrintDataTo(stream); stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic()); @@ -735,26 +706,26 @@ HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction( } -void HCheckMap::PrintDataTo(StringStream* stream) const { +void HCheckMap::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" %p", *map()); } -void HCheckFunction::PrintDataTo(StringStream* stream) const { +void HCheckFunction::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" %p", *target()); } -void HCallStub::PrintDataTo(StringStream* stream) const { +void HCallStub::PrintDataTo(StringStream* stream) { stream->Add("%s ", CodeStub::MajorName(major_key_, false)); HUnaryCall::PrintDataTo(stream); } -void HInstanceOf::PrintDataTo(StringStream* stream) const { +void HInstanceOf::PrintDataTo(StringStream* stream) { left()->PrintNameTo(stream); stream->Add(" "); right()->PrintNameTo(stream); @@ -899,7 +870,7 @@ Range* HMod::InferRange() { } -void HPhi::PrintTo(StringStream* stream) const { +void HPhi::PrintTo(StringStream* stream) { stream->Add("["); for (int i = 0; i < OperandCount(); ++i) { HValue* value = OperandAt(i); @@ -925,7 +896,7 @@ void HPhi::AddInput(HValue* value) { } -HValue* HPhi::GetRedundantReplacement() const { +HValue* HPhi::GetRedundantReplacement() { HValue* candidate = NULL; int count = OperandCount(); int position = 0; @@ -977,7 +948,7 @@ void HPhi::AddIndirectUsesTo(int* dest) { } -void HSimulate::PrintDataTo(StringStream* stream) const { +void HSimulate::PrintDataTo(StringStream* stream) { stream->Add("id=%d ", ast_id()); if (pop_count_ > 0) stream->Add("pop %d", pop_count_); if (values_.length() > 0) { @@ -994,7 +965,7 @@ void HSimulate::PrintDataTo(StringStream* stream) const { } -void HEnterInlined::PrintDataTo(StringStream* stream) const { +void HEnterInlined::PrintDataTo(StringStream* stream) { SmartPointer<char> name = function()->debug_name()->ToCString(); stream->Add("%s, id=%d", *name, function()->id()); } @@ -1035,7 +1006,7 @@ HConstant* HConstant::CopyToTruncatedInt32() const { } -void HConstant::PrintDataTo(StringStream* stream) const { +void HConstant::PrintDataTo(StringStream* stream) { handle()->ShortPrint(stream); } @@ -1045,7 +1016,7 @@ bool HArrayLiteral::IsCopyOnWrite() const { } -void HBinaryOperation::PrintDataTo(StringStream* stream) const { +void HBinaryOperation::PrintDataTo(StringStream* stream) { left()->PrintNameTo(stream); stream->Add(" "); right()->PrintNameTo(stream); @@ -1129,7 +1100,7 @@ Range* HShl::InferRange() { -void HCompare::PrintDataTo(StringStream* stream) const { +void HCompare::PrintDataTo(StringStream* stream) { stream->Add(Token::Name(token())); stream->Add(" "); HBinaryOperation::PrintDataTo(stream); @@ -1148,18 +1119,26 @@ void HCompare::SetInputRepresentation(Representation r) { } -void HParameter::PrintDataTo(StringStream* stream) const { +void HParameter::PrintDataTo(StringStream* stream) { stream->Add("%u", index()); } -void HLoadNamedField::PrintDataTo(StringStream* stream) const { +void HLoadNamedField::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : ""); } -void HLoadKeyed::PrintDataTo(StringStream* stream) const { +void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add("["); + key()->PrintNameTo(stream); + stream->Add("]"); +} + + +void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); @@ -1167,7 +1146,7 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) const { } -void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const { +void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) { external_pointer()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); @@ -1175,7 +1154,7 @@ void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const { } -void HStoreNamed::PrintDataTo(StringStream* stream) const { +void HStoreNamedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("."); ASSERT(name()->IsString()); @@ -1185,15 +1164,29 @@ void HStoreNamed::PrintDataTo(StringStream* stream) const { } -void HStoreNamedField::PrintDataTo(StringStream* stream) const { - HStoreNamed::PrintDataTo(stream); +void HStoreNamedField::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add("."); + ASSERT(name()->IsString()); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" = "); + value()->PrintNameTo(stream); if (!transition().is_null()) { stream->Add(" (transition map %p)", *transition()); } } -void HStoreKeyed::PrintDataTo(StringStream* stream) const { +void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add("["); + key()->PrintNameTo(stream); + stream->Add("] = "); + value()->PrintNameTo(stream); +} + + +void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); @@ -1202,25 +1195,34 @@ void HStoreKeyed::PrintDataTo(StringStream* stream) const { } -void HLoadGlobal::PrintDataTo(StringStream* stream) const { +void HStorePixelArrayElement::PrintDataTo(StringStream* stream) { + external_pointer()->PrintNameTo(stream); + stream->Add("["); + key()->PrintNameTo(stream); + stream->Add("] = "); + value()->PrintNameTo(stream); +} + + +void HLoadGlobal::PrintDataTo(StringStream* stream) { stream->Add("[%p]", *cell()); if (check_hole_value()) stream->Add(" (deleteable/read-only)"); } -void HStoreGlobal::PrintDataTo(StringStream* stream) const { +void HStoreGlobal::PrintDataTo(StringStream* stream) { stream->Add("[%p] = ", *cell()); value()->PrintNameTo(stream); } -void HLoadContextSlot::PrintDataTo(StringStream* stream) const { +void HLoadContextSlot::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add("[%d]", slot_index()); } -void HStoreContextSlot::PrintDataTo(StringStream* stream) const { +void HStoreContextSlot::PrintDataTo(StringStream* stream) { context()->PrintNameTo(stream); stream->Add("[%d] = ", slot_index()); value()->PrintNameTo(stream); @@ -1230,33 +1232,33 @@ void HStoreContextSlot::PrintDataTo(StringStream* stream) const { // Implementation of type inference and type conversions. Calculates // the inferred type of this instruction based on the input operands. -HType HValue::CalculateInferredType() const { +HType HValue::CalculateInferredType() { return type_; } -HType HCheckMap::CalculateInferredType() const { +HType HCheckMap::CalculateInferredType() { return value()->type(); } -HType HCheckFunction::CalculateInferredType() const { +HType HCheckFunction::CalculateInferredType() { return value()->type(); } -HType HCheckNonSmi::CalculateInferredType() const { +HType HCheckNonSmi::CalculateInferredType() { // TODO(kasperl): Is there any way to signal that this isn't a smi? return HType::Tagged(); } -HType HCheckSmi::CalculateInferredType() const { +HType HCheckSmi::CalculateInferredType() { return HType::Smi(); } -HType HPhi::CalculateInferredType() const { +HType HPhi::CalculateInferredType() { HType result = HType::Uninitialized(); for (int i = 0; i < OperandCount(); ++i) { HType current = OperandAt(i)->type(); @@ -1266,77 +1268,77 @@ HType HPhi::CalculateInferredType() const { } -HType HConstant::CalculateInferredType() const { +HType HConstant::CalculateInferredType() { return constant_type_; } -HType HCompare::CalculateInferredType() const { +HType HCompare::CalculateInferredType() { return HType::Boolean(); } -HType HCompareJSObjectEq::CalculateInferredType() const { +HType HCompareJSObjectEq::CalculateInferredType() { return HType::Boolean(); } -HType HUnaryPredicate::CalculateInferredType() const { +HType HUnaryPredicate::CalculateInferredType() { return HType::Boolean(); } -HType HBitwiseBinaryOperation::CalculateInferredType() const { +HType HBitwiseBinaryOperation::CalculateInferredType() { return HType::TaggedNumber(); } -HType HArithmeticBinaryOperation::CalculateInferredType() const { +HType HArithmeticBinaryOperation::CalculateInferredType() { return HType::TaggedNumber(); } -HType HAdd::CalculateInferredType() const { +HType HAdd::CalculateInferredType() { return HType::Tagged(); } -HType HBitAnd::CalculateInferredType() const { +HType HBitAnd::CalculateInferredType() { return HType::TaggedNumber(); } -HType HBitXor::CalculateInferredType() const { +HType HBitXor::CalculateInferredType() { return HType::TaggedNumber(); } -HType HBitOr::CalculateInferredType() const { +HType HBitOr::CalculateInferredType() { return HType::TaggedNumber(); } -HType HBitNot::CalculateInferredType() const { +HType HBitNot::CalculateInferredType() { return HType::TaggedNumber(); } -HType HUnaryMathOperation::CalculateInferredType() const { +HType HUnaryMathOperation::CalculateInferredType() { return HType::TaggedNumber(); } -HType HShl::CalculateInferredType() const { +HType HShl::CalculateInferredType() { return HType::TaggedNumber(); } -HType HShr::CalculateInferredType() const { +HType HShr::CalculateInferredType() { return HType::TaggedNumber(); } -HType HSar::CalculateInferredType() const { +HType HSar::CalculateInferredType() { return HType::TaggedNumber(); } diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 9f5170ca2b..22916f5034 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -51,14 +51,9 @@ class LChunkBuilder; V(BinaryCall) \ V(BinaryOperation) \ V(BitwiseBinaryOperation) \ - V(Call) \ V(ControlInstruction) \ V(Instruction) \ - V(LoadKeyed) \ - V(MaterializedLiteral) \ V(Phi) \ - V(StoreKeyed) \ - V(StoreNamed) \ V(UnaryCall) \ V(UnaryControlInstruction) \ V(UnaryOperation) \ @@ -151,6 +146,7 @@ class LChunkBuilder; V(StoreContextSlot) \ V(StoreGlobal) \ V(StoreKeyedFastElement) \ + V(StorePixelArrayElement) \ V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ @@ -192,14 +188,6 @@ class LChunkBuilder; DECLARE_INSTRUCTION(type) - -template<int kSize> -class HOperandVector : public EmbeddedVector<HValue*, kSize> { - public: - HOperandVector() : EmbeddedVector<HValue*, kSize>(NULL) { } -}; - - class Range: public ZoneObject { public: Range() : lower_(kMinInt), @@ -308,7 +296,7 @@ class Representation { static Representation Double() { return Representation(kDouble); } static Representation External() { return Representation(kExternal); } - bool Equals(const Representation& other) const { + bool Equals(const Representation& other) { return kind_ == other.kind_; } @@ -543,15 +531,12 @@ class HValue: public ZoneObject { bool IsDefinedAfter(HBasicBlock* other) const; // Operands. - virtual int OperandCount() const { return 0; } - virtual HValue* OperandAt(int index) const { - UNREACHABLE(); - return NULL; - } + virtual int OperandCount() = 0; + virtual HValue* OperandAt(int index) = 0; void SetOperandAt(int index, HValue* value); - int LookupOperandIndex(int occurrence_index, HValue* op) const; - bool UsesMultipleTimes(HValue* op) const; + int LookupOperandIndex(int occurrence_index, HValue* op); + bool UsesMultipleTimes(HValue* op); void ReplaceAndDelete(HValue* other); void ReplaceValue(HValue* other); @@ -577,10 +562,9 @@ class HValue: public ZoneObject { void ComputeInitialRange(); // Representation helpers. - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::None(); - } - virtual Representation InferredRepresentation() const { + virtual Representation RequiredInputRepresentation(int index) const = 0; + + virtual Representation InferredRepresentation() { return representation(); } @@ -595,11 +579,11 @@ class HValue: public ZoneObject { HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO - bool Equals(HValue* other) const; - virtual intptr_t Hashcode() const; + bool Equals(HValue* other); + virtual intptr_t Hashcode(); // Printing support. - virtual void PrintTo(StringStream* stream) const = 0; + virtual void PrintTo(StringStream* stream) = 0; void PrintNameTo(StringStream* stream); static void PrintTypeTo(HType type, StringStream* stream); @@ -610,7 +594,7 @@ class HValue: public ZoneObject { // it has changed. bool UpdateInferredType(); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify() = 0; @@ -619,14 +603,14 @@ class HValue: public ZoneObject { protected: // This function must be overridden for instructions with flag kUseGVN, to // compare the non-Operand parts of the instruction. - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { UNREACHABLE(); return false; } virtual void RepresentationChanged(Representation to) { } virtual Range* InferRange(); virtual void DeleteFromGraph() = 0; - virtual void InternalSetOperandAt(int index, HValue* value) { UNREACHABLE(); } + virtual void InternalSetOperandAt(int index, HValue* value) = 0; void clear_block() { ASSERT(block_ != NULL); block_ = NULL; @@ -668,8 +652,8 @@ class HInstruction: public HValue { HInstruction* next() const { return next_; } HInstruction* previous() const { return previous_; } - void PrintTo(StringStream* stream) const; - virtual void PrintDataTo(StringStream* stream) const {} + virtual void PrintTo(StringStream* stream); + virtual void PrintDataTo(StringStream* stream) { } bool IsLinked() const { return block() != NULL; } void Unlink(); @@ -690,6 +674,8 @@ class HInstruction: public HValue { // instruction. virtual bool IsCheckInstruction() const { return false; } + virtual bool IsCall() { return false; } + DECLARE_INSTRUCTION(Instruction) protected: @@ -716,12 +702,6 @@ class HInstruction: public HValue { }; -class HBlockEntry: public HInstruction { - public: - DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry") -}; - - class HControlInstruction: public HInstruction { public: HControlInstruction(HBasicBlock* first, HBasicBlock* second) @@ -731,7 +711,7 @@ class HControlInstruction: public HInstruction { HBasicBlock* FirstSuccessor() const { return first_successor_; } HBasicBlock* SecondSuccessor() const { return second_successor_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_INSTRUCTION(ControlInstruction) @@ -741,25 +721,101 @@ class HControlInstruction: public HInstruction { }; -class HDeoptimize: public HControlInstruction { +template<int NumElements> +class HOperandContainer { + public: + HOperandContainer() : elems_() { } + + int length() { return NumElements; } + HValue*& operator[](int i) { + ASSERT(i < length()); + return elems_[i]; + } + + private: + HValue* elems_[NumElements]; +}; + + +template<> +class HOperandContainer<0> { + public: + int length() { return 0; } + HValue*& operator[](int i) { + UNREACHABLE(); + static HValue* t = 0; + return t; + } +}; + + +template<int V> +class HTemplateInstruction : public HInstruction { + public: + int OperandCount() { return V; } + HValue* OperandAt(int i) { return inputs_[i]; } + + protected: + void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; } + + private: + HOperandContainer<V> inputs_; +}; + + +template<int V> +class HTemplateControlInstruction : public HControlInstruction { + public: + HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second) + : HControlInstruction(first, second) { } + int OperandCount() { return V; } + HValue* OperandAt(int i) { return inputs_[i]; } + + protected: + void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; } + + private: + HOperandContainer<V> inputs_; +}; + + +class HBlockEntry: public HTemplateInstruction<0> { + public: + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + + DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry") +}; + + +class HDeoptimize: public HTemplateControlInstruction<0> { public: - HDeoptimize() : HControlInstruction(NULL, NULL) { } + HDeoptimize() : HTemplateControlInstruction<0>(NULL, NULL) { } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") }; -class HGoto: public HControlInstruction { +class HGoto: public HTemplateControlInstruction<0> { public: explicit HGoto(HBasicBlock* target) - : HControlInstruction(target, NULL), include_stack_check_(false) { - } + : HTemplateControlInstruction<0>(target, NULL), + include_stack_check_(false) { } void set_include_stack_check(bool include_stack_check) { include_stack_check_ = include_stack_check; } bool include_stack_check() const { return include_stack_check_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") private: @@ -767,34 +823,20 @@ class HGoto: public HControlInstruction { }; -class HUnaryControlInstruction: public HControlInstruction { +class HUnaryControlInstruction: public HTemplateControlInstruction<1> { public: explicit HUnaryControlInstruction(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target) - : HControlInstruction(true_target, false_target) { + : HTemplateControlInstruction<1>(true_target, false_target) { SetOperandAt(0, value); } - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); - } + virtual void PrintDataTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream) const; - - HValue* value() const { return OperandAt(0); } - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* value() { return OperandAt(0); } DECLARE_INSTRUCTION(UnaryControlInstruction) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<1> operands_; }; @@ -826,10 +868,14 @@ class HCompareMap: public HUnaryControlInstruction { ASSERT(!map.is_null()); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<Map> map() const { return map_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map") private: @@ -843,38 +889,36 @@ class HReturn: public HUnaryControlInstruction { : HUnaryControlInstruction(value, NULL, NULL) { } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; -class HAbnormalExit: public HControlInstruction { +class HAbnormalExit: public HTemplateControlInstruction<0> { public: - HAbnormalExit() : HControlInstruction(NULL, NULL) { } + HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit") }; -class HUnaryOperation: public HInstruction { +class HUnaryOperation: public HTemplateInstruction<1> { public: explicit HUnaryOperation(HValue* value) { SetOperandAt(0, value); } - HValue* value() const { return OperandAt(0); } - virtual void PrintDataTo(StringStream* stream) const; - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* value() { return OperandAt(0); } + virtual void PrintDataTo(StringStream* stream); DECLARE_INSTRUCTION(UnaryOperation) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<1> operands_; }; @@ -924,13 +968,13 @@ class HChange: public HUnaryOperation { return true; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(Change, CanTruncateToInt32() ? "truncate" : "change") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { if (!other->IsChange()) return false; HChange* change = HChange::cast(other); return value() == change->value() @@ -954,7 +998,7 @@ class HSimulate: public HInstruction { assigned_indexes_(2) {} virtual ~HSimulate() {} - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; } int ast_id() const { return ast_id_; } @@ -979,8 +1023,12 @@ class HSimulate: public HInstruction { void AddPushedValue(HValue* value) { AddValue(kNoIndex, value); } - virtual int OperandCount() const { return values_.length(); } - virtual HValue* OperandAt(int index) const { return values_[index]; } + virtual int OperandCount() { return values_.length(); } + virtual HValue* OperandAt(int index) { return values_[index]; } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate") @@ -1011,25 +1059,33 @@ class HSimulate: public HInstruction { }; -class HStackCheck: public HInstruction { +class HStackCheck: public HTemplateInstruction<0> { public: HStackCheck() { } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check") }; -class HEnterInlined: public HInstruction { +class HEnterInlined: public HTemplateInstruction<0> { public: HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function) : closure_(closure), function_(function) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<JSFunction> closure() const { return closure_; } FunctionLiteral* function() const { return function_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined") private: @@ -1038,39 +1094,49 @@ class HEnterInlined: public HInstruction { }; -class HLeaveInlined: public HInstruction { +class HLeaveInlined: public HTemplateInstruction<0> { public: HLeaveInlined() {} + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined") }; class HPushArgument: public HUnaryOperation { public: - explicit HPushArgument(HValue* value) : HUnaryOperation(value) { } + explicit HPushArgument(HValue* value) : HUnaryOperation(value) { + set_representation(Representation::Tagged()); + } virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - HValue* argument() const { return OperandAt(0); } + HValue* argument() { return OperandAt(0); } DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument") }; -class HContext: public HInstruction { +class HContext: public HTemplateInstruction<0> { public: HContext() { set_representation(Representation::Tagged()); SetFlag(kUseGVN); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(Context, "context"); protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1083,8 +1149,12 @@ class HOuterContext: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context"); + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1097,8 +1167,12 @@ class HGlobalObject: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1112,94 +1186,79 @@ class HGlobalReceiver: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HCall: public HInstruction { +template <int V> +class HCall: public HTemplateInstruction<V> { public: // The argument count includes the receiver. - explicit HCall(int argument_count) : argument_count_(argument_count) { - set_representation(Representation::Tagged()); - SetAllSideEffects(); + explicit HCall<V>(int argument_count) : argument_count_(argument_count) { + this->set_representation(Representation::Tagged()); + this->SetAllSideEffects(); } - virtual HType CalculateInferredType() const { return HType::Tagged(); } + virtual HType CalculateInferredType() { return HType::Tagged(); } virtual int argument_count() const { return argument_count_; } - virtual void PrintDataTo(StringStream* stream) const; - - DECLARE_INSTRUCTION(Call) + virtual bool IsCall() { return true; } private: int argument_count_; }; -class HUnaryCall: public HCall { +class HUnaryCall: public HCall<1> { public: HUnaryCall(HValue* value, int argument_count) - : HCall(argument_count), value_(NULL) { + : HCall<1>(argument_count) { SetOperandAt(0, value); } - virtual void PrintDataTo(StringStream* stream) const; - - HValue* value() const { return value_; } - - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { - ASSERT(index == 0); - return value_; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); } - DECLARE_INSTRUCTION(UnaryCall) + virtual void PrintDataTo(StringStream* stream); - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - ASSERT(index == 0); - value_ = value; - } + HValue* value() { return OperandAt(0); } - private: - HValue* value_; + DECLARE_INSTRUCTION(UnaryCall) }; -class HBinaryCall: public HCall { +class HBinaryCall: public HCall<2> { public: HBinaryCall(HValue* first, HValue* second, int argument_count) - : HCall(argument_count) { + : HCall<2>(argument_count) { SetOperandAt(0, first); SetOperandAt(1, second); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - HValue* first() const { return operands_[0]; } - HValue* second() const { return operands_[1]; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } - virtual int OperandCount() const { return 2; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* first() { return OperandAt(0); } + HValue* second() { return OperandAt(1); } DECLARE_INSTRUCTION(BinaryCall) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<2> operands_; }; -class HCallConstantFunction: public HCall { +class HCallConstantFunction: public HCall<0> { public: HCallConstantFunction(Handle<JSFunction> function, int argument_count) - : HCall(argument_count), function_(function) { } + : HCall<0>(argument_count), function_(function) { } Handle<JSFunction> function() const { return function_; } @@ -1207,7 +1266,11 @@ class HCallConstantFunction: public HCall { return function_->code() == Builtins::builtin(Builtins::FunctionApply); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function") @@ -1226,8 +1289,8 @@ class HCallKeyed: public HBinaryCall { return Representation::Tagged(); } - HValue* context() const { return first(); } - HValue* key() const { return second(); } + HValue* context() { return first(); } + HValue* key() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed") }; @@ -1239,13 +1302,17 @@ class HCallNamed: public HUnaryCall { : HUnaryCall(context, argument_count), name_(name) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - HValue* context() const { return value(); } + HValue* context() { return value(); } Handle<String> name() const { return name_; } DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + private: Handle<String> name_; }; @@ -1257,7 +1324,11 @@ class HCallFunction: public HUnaryCall { : HUnaryCall(context, argument_count) { } - HValue* context() const { return value(); } + HValue* context() { return value(); } + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function") }; @@ -1269,11 +1340,15 @@ class HCallGlobal: public HUnaryCall { : HUnaryCall(context, argument_count), name_(name) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - HValue* context() const { return value(); } + HValue* context() { return value(); } Handle<String> name() const { return name_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global") private: @@ -1281,15 +1356,19 @@ class HCallGlobal: public HUnaryCall { }; -class HCallKnownGlobal: public HCall { +class HCallKnownGlobal: public HCall<0> { public: HCallKnownGlobal(Handle<JSFunction> target, int argument_count) - : HCall(argument_count), target_(target) { } + : HCall<0>(argument_count), target_(target) { } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<JSFunction> target() const { return target_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global") private: @@ -1307,24 +1386,28 @@ class HCallNew: public HBinaryCall { return Representation::Tagged(); } - HValue* context() const { return first(); } - HValue* constructor() const { return second(); } + HValue* context() { return first(); } + HValue* constructor() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new") }; -class HCallRuntime: public HCall { +class HCallRuntime: public HCall<0> { public: HCallRuntime(Handle<String> name, Runtime::Function* c_function, int argument_count) - : HCall(argument_count), c_function_(c_function), name_(name) { } - virtual void PrintDataTo(StringStream* stream) const; + : HCall<0>(argument_count), c_function_(c_function), name_(name) { } + virtual void PrintDataTo(StringStream* stream); Runtime::Function* function() const { return c_function_; } Handle<String> name() const { return name_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime") private: @@ -1351,7 +1434,7 @@ class HJSArrayLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1370,7 +1453,7 @@ class HFixedArrayLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1391,7 +1474,7 @@ class HPixelArrayLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1406,12 +1489,12 @@ class HBitNot: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Integer32(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1442,9 +1525,9 @@ class HUnaryMathOperation: public HUnaryOperation { SetFlag(kUseGVN); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); @@ -1459,11 +1542,10 @@ class HUnaryMathOperation: public HUnaryOperation { case kMathSin: case kMathCos: return Representation::Double(); - break; case kMathAbs: return representation(); - break; default: + UNREACHABLE(); return Representation::None(); } } @@ -1484,7 +1566,7 @@ class HUnaryMathOperation: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HUnaryMathOperation* b = HUnaryMathOperation::cast(other); return op_ == b->op(); } @@ -1509,7 +1591,7 @@ class HLoadElements: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1533,7 +1615,7 @@ class HLoadPixelArrayExternalPointer: public HUnaryOperation { "load-pixel-array-external-pointer") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1551,8 +1633,8 @@ class HCheckMap: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - virtual HType CalculateInferredType() const; + virtual void PrintDataTo(StringStream* stream); + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1563,7 +1645,7 @@ class HCheckMap: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckMap* b = HCheckMap::cast(other); return map_.is_identical_to(b->map()); } @@ -1586,8 +1668,8 @@ class HCheckFunction: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - virtual HType CalculateInferredType() const; + virtual void PrintDataTo(StringStream* stream); + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1598,7 +1680,7 @@ class HCheckFunction: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckFunction* b = HCheckFunction::cast(other); return target_.is_identical_to(b->target()); } @@ -1646,7 +1728,7 @@ class HCheckInstanceType: public HUnaryOperation { // TODO(ager): It could be nice to allow the ommision of instance // type checks if we have already performed an instance type check // with a larger range. - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckInstanceType* b = HCheckInstanceType::cast(other); return (first_ == b->first()) && (last_ == b->last()); } @@ -1670,7 +1752,7 @@ class HCheckNonSmi: public HUnaryOperation { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1679,11 +1761,11 @@ class HCheckNonSmi: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HCheckPrototypeMaps: public HInstruction { +class HCheckPrototypeMaps: public HTemplateInstruction<0> { public: HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder) : prototype_(prototype), holder_(holder) { @@ -1702,7 +1784,11 @@ class HCheckPrototypeMaps: public HInstruction { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps") - virtual intptr_t Hashcode() const { + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + + virtual intptr_t Hashcode() { ASSERT(!Heap::IsAllocationAllowed()); intptr_t hash = reinterpret_cast<intptr_t>(*prototype()); hash = 17 * hash + reinterpret_cast<intptr_t>(*holder()); @@ -1710,7 +1796,7 @@ class HCheckPrototypeMaps: public HInstruction { } protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other); return prototype_.is_identical_to(b->prototype()) && holder_.is_identical_to(b->holder()); @@ -1734,7 +1820,7 @@ class HCheckSmi: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); #ifdef DEBUG virtual void Verify(); @@ -1743,7 +1829,7 @@ class HCheckSmi: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1762,7 +1848,7 @@ class HPhi: public HValue { SetFlag(kFlexibleRepresentation); } - virtual Representation InferredRepresentation() const { + virtual Representation InferredRepresentation() { bool double_occurred = false; bool int32_occurred = false; for (int i = 0; i < OperandCount(); ++i) { @@ -1781,10 +1867,10 @@ class HPhi: public HValue { virtual Representation RequiredInputRepresentation(int index) const { return representation(); } - virtual HType CalculateInferredType() const; - virtual int OperandCount() const { return inputs_.length(); } - virtual HValue* OperandAt(int index) const { return inputs_[index]; } - HValue* GetRedundantReplacement() const; + virtual HType CalculateInferredType(); + virtual int OperandCount() { return inputs_.length(); } + virtual HValue* OperandAt(int index) { return inputs_[index]; } + HValue* GetRedundantReplacement(); void AddInput(HValue* value); bool IsReceiver() { return merged_index_ == 0; } @@ -1793,7 +1879,7 @@ class HPhi: public HValue { virtual const char* Mnemonic() const { return "phi"; } - virtual void PrintTo(StringStream* stream) const; + virtual void PrintTo(StringStream* stream); #ifdef DEBUG virtual void Verify(); @@ -1841,18 +1927,22 @@ class HPhi: public HValue { }; -class HArgumentsObject: public HInstruction { +class HArgumentsObject: public HTemplateInstruction<0> { public: HArgumentsObject() { set_representation(Representation::Tagged()); SetFlag(kIsArguments); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object") }; -class HConstant: public HInstruction { +class HConstant: public HTemplateInstruction<0> { public: HConstant(Handle<Object> handle, Representation r); @@ -1860,9 +1950,13 @@ class HConstant: public HInstruction { bool InOldSpace() const { return !Heap::InNewSpace(*handle_); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + virtual bool EmitAtUses() const { return !representation().IsDouble(); } - virtual void PrintDataTo(StringStream* stream) const; - virtual HType CalculateInferredType() const; + virtual void PrintDataTo(StringStream* stream); + virtual HType CalculateInferredType(); bool IsInteger() const { return handle_->IsSmi(); } HConstant* CopyToRepresentation(Representation r) const; HConstant* CopyToTruncatedInt32() const; @@ -1878,7 +1972,7 @@ class HConstant: public HInstruction { } bool HasStringValue() const { return handle_->IsString(); } - virtual intptr_t Hashcode() const { + virtual intptr_t Hashcode() { ASSERT(!Heap::allow_allocation(false)); return reinterpret_cast<intptr_t>(*handle()); } @@ -1892,7 +1986,7 @@ class HConstant: public HInstruction { protected: virtual Range* InferRange(); - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HConstant* other_constant = HConstant::cast(other); return handle().is_identical_to(other_constant->handle()); } @@ -1911,7 +2005,7 @@ class HConstant: public HInstruction { }; -class HBinaryOperation: public HInstruction { +class HBinaryOperation: public HTemplateInstruction<2> { public: HBinaryOperation(HValue* left, HValue* right) { ASSERT(left != NULL && right != NULL); @@ -1919,38 +2013,29 @@ class HBinaryOperation: public HInstruction { SetOperandAt(1, right); } - HValue* left() const { return OperandAt(0); } - HValue* right() const { return OperandAt(1); } + HValue* left() { return OperandAt(0); } + HValue* right() { return OperandAt(1); } // TODO(kasperl): Move these helpers to the IA-32 Lithium // instruction sequence builder. - HValue* LeastConstantOperand() const { + HValue* LeastConstantOperand() { if (IsCommutative() && left()->IsConstant()) return right(); return left(); } - HValue* MostConstantOperand() const { + HValue* MostConstantOperand() { if (IsCommutative() && left()->IsConstant()) return left(); return right(); } virtual bool IsCommutative() const { return false; } - virtual void PrintDataTo(StringStream* stream) const; - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + virtual void PrintDataTo(StringStream* stream); DECLARE_INSTRUCTION(BinaryOperation) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - HOperandVector<2> operands_; }; -class HApplyArguments: public HInstruction { +class HApplyArguments: public HTemplateInstruction<4> { public: HApplyArguments(HValue* function, HValue* receiver, @@ -1971,27 +2056,16 @@ class HApplyArguments: public HInstruction { : Representation::Tagged(); } - HValue* function() const { return OperandAt(0); } - HValue* receiver() const { return OperandAt(1); } - HValue* length() const { return OperandAt(2); } - HValue* elements() const { return OperandAt(3); } - - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* function() { return OperandAt(0); } + HValue* receiver() { return OperandAt(1); } + HValue* length() { return OperandAt(2); } + HValue* elements() { return OperandAt(3); } DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<4> operands_; }; -class HArgumentsElements: public HInstruction { +class HArgumentsElements: public HTemplateInstruction<0> { public: HArgumentsElements() { // The value produced by this instruction is a pointer into the stack @@ -2002,8 +2076,12 @@ class HArgumentsElements: public HInstruction { DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements") + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2014,14 +2092,18 @@ class HArgumentsLength: public HUnaryOperation { SetFlag(kUseGVN); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HAccessArgumentsAt: public HInstruction { +class HAccessArgumentsAt: public HTemplateInstruction<3> { public: HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) { set_representation(Representation::Tagged()); @@ -2031,7 +2113,7 @@ class HAccessArgumentsAt: public HInstruction { SetOperandAt(2, index); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) const { // The arguments elements is considered tagged. @@ -2040,24 +2122,13 @@ class HAccessArgumentsAt: public HInstruction { : Representation::Integer32(); } - HValue* arguments() const { return operands_[0]; } - HValue* length() const { return operands_[1]; } - HValue* index() const { return operands_[2]; } - - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + HValue* arguments() { return OperandAt(0); } + HValue* length() { return OperandAt(1); } + HValue* index() { return OperandAt(2); } DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at") - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - virtual bool DataEquals(HValue* other) const { return true; } - - private: - HOperandVector<3> operands_; + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2078,13 +2149,13 @@ class HBoundsCheck: public HBinaryOperation { virtual void Verify(); #endif - HValue* index() const { return left(); } - HValue* length() const { return right(); } + HValue* index() { return left(); } + HValue* length() { return right(); } DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2110,7 +2181,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation { } } - HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_INSTRUCTION(BitwiseBinaryOperation) }; @@ -2132,11 +2203,11 @@ class HArithmeticBinaryOperation: public HBinaryOperation { } } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); virtual Representation RequiredInputRepresentation(int index) const { return representation(); } - virtual Representation InferredRepresentation() const { + virtual Representation InferredRepresentation() { if (left()->representation().Equals(right()->representation())) { return left()->representation(); } @@ -2169,18 +2240,18 @@ class HCompare: public HBinaryOperation { return input_representation_; } Token::Value token() const { return token_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); - virtual intptr_t Hashcode() const { + virtual intptr_t Hashcode() { return HValue::Hashcode() * 7 + token_; } DECLARE_CONCRETE_INSTRUCTION(Compare, "compare") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HCompare* comp = HCompare::cast(other); return token_ == comp->token(); } @@ -2206,12 +2277,12 @@ class HCompareJSObjectEq: public HBinaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2229,7 +2300,7 @@ class HUnaryPredicate: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); }; @@ -2243,7 +2314,7 @@ class HIsNull: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HIsNull* b = HIsNull::cast(other); return is_strict_ == b->is_strict(); } @@ -2260,7 +2331,7 @@ class HIsObject: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2271,11 +2342,11 @@ class HIsSmi: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HIsConstructCall: public HInstruction { +class HIsConstructCall: public HTemplateInstruction<0> { public: HIsConstructCall() { set_representation(Representation::Tagged()); @@ -2286,10 +2357,14 @@ class HIsConstructCall: public HInstruction { return !HasSideEffects() && (uses()->length() <= 1); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2305,12 +2380,12 @@ class HHasInstanceType: public HUnaryPredicate { InstanceType from() { return from_; } InstanceType to() { return to_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HHasInstanceType* b = HHasInstanceType::cast(other); return (from_ == b->from()) && (to_ == b->to()); } @@ -2328,7 +2403,7 @@ class HHasCachedArrayIndex: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2339,7 +2414,7 @@ class HGetCachedArrayIndex: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2350,12 +2425,12 @@ class HClassOfTest: public HUnaryPredicate { DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test") - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); Handle<String> class_name() const { return class_name_; } protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HClassOfTest* b = HClassOfTest::cast(other); return class_name_.is_identical_to(b->class_name_); } @@ -2371,12 +2446,12 @@ class HTypeofIs: public HUnaryPredicate { : HUnaryPredicate(value), type_literal_(type_literal) { } Handle<String> type_literal() { return type_literal_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HTypeofIs* b = HTypeofIs::cast(other); return type_literal_.is_identical_to(b->type_literal_); } @@ -2386,7 +2461,7 @@ class HTypeofIs: public HUnaryPredicate { }; -class HInstanceOf: public HInstruction { +class HInstanceOf: public HTemplateInstruction<3> { public: HInstanceOf(HValue* context, HValue* left, HValue* right) { SetOperandAt(0, context); @@ -2396,9 +2471,9 @@ class HInstanceOf: public HInstruction { SetAllSideEffects(); } - HValue* context() const { return operands_[0]; } - HValue* left() const { return operands_[1]; } - HValue* right() const { return operands_[2]; } + HValue* context() { return OperandAt(0); } + HValue* left() { return OperandAt(1); } + HValue* right() { return OperandAt(2); } virtual bool EmitAtUses() const { return !HasSideEffects() && (uses()->length() <= 1); @@ -2408,20 +2483,9 @@ class HInstanceOf: public HInstruction { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - - virtual int OperandCount() const { return 3; } - virtual HValue* OperandAt(int index) const { return operands_[index]; } + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } - - private: - HOperandVector<3> operands_; }; @@ -2462,7 +2526,7 @@ class HPower: public HBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Power, "power") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2480,12 +2544,12 @@ class HAdd: public HArithmeticBinaryOperation { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Add, "add") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2502,7 +2566,7 @@ class HSub: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Sub, "sub") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2524,7 +2588,7 @@ class HMul: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Mul, "mul") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2541,7 +2605,7 @@ class HMod: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Mod, "mod") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2559,7 +2623,7 @@ class HDiv: public HArithmeticBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(Div, "div") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2571,12 +2635,12 @@ class HBitAnd: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual bool IsCommutative() const { return true; } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2588,12 +2652,12 @@ class HBitXor: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual bool IsCommutative() const { return true; } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2603,12 +2667,12 @@ class HBitOr: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual bool IsCommutative() const { return true; } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(); }; @@ -2620,12 +2684,12 @@ class HShl: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual Range* InferRange(); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Shl, "shl") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2634,12 +2698,12 @@ class HShr: public HBitwiseBinaryOperation { HShr(HValue* left, HValue* right) : HBitwiseBinaryOperation(left, right) { } - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Shr, "shr") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2649,16 +2713,16 @@ class HSar: public HBitwiseBinaryOperation { : HBitwiseBinaryOperation(left, right) { } virtual Range* InferRange(); - virtual HType CalculateInferredType() const; + virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(Sar, "sar") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HOsrEntry: public HInstruction { +class HOsrEntry: public HTemplateInstruction<0> { public: explicit HOsrEntry(int ast_id) : ast_id_(ast_id) { SetFlag(kChangesOsrEntries); @@ -2666,6 +2730,10 @@ class HOsrEntry: public HInstruction { int ast_id() const { return ast_id_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry") private: @@ -2673,7 +2741,7 @@ class HOsrEntry: public HInstruction { }; -class HParameter: public HInstruction { +class HParameter: public HTemplateInstruction<0> { public: explicit HParameter(unsigned index) : index_(index) { set_representation(Representation::Tagged()); @@ -2681,7 +2749,11 @@ class HParameter: public HInstruction { unsigned index() const { return index_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") @@ -2700,7 +2772,7 @@ class HCallStub: public HUnaryCall { CodeStub::Major major_key() { return major_key_; } - HValue* context() const { return value(); } + HValue* context() { return value(); } void set_transcendental_type(TranscendentalCache::Type transcendental_type) { transcendental_type_ = transcendental_type; @@ -2709,7 +2781,11 @@ class HCallStub: public HUnaryCall { return transcendental_type_; } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub") @@ -2719,15 +2795,19 @@ class HCallStub: public HUnaryCall { }; -class HUnknownOSRValue: public HInstruction { +class HUnknownOSRValue: public HTemplateInstruction<0> { public: HUnknownOSRValue() { set_representation(Representation::Tagged()); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value") }; -class HLoadGlobal: public HInstruction { +class HLoadGlobal: public HTemplateInstruction<0> { public: HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value) : cell_(cell), check_hole_value_(check_hole_value) { @@ -2739,20 +2819,21 @@ class HLoadGlobal: public HInstruction { Handle<JSGlobalPropertyCell> cell() const { return cell_; } bool check_hole_value() const { return check_hole_value_; } - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); - } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); - virtual intptr_t Hashcode() const { + virtual intptr_t Hashcode() { ASSERT(!Heap::allow_allocation(false)); return reinterpret_cast<intptr_t>(*cell_); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HLoadGlobal* b = HLoadGlobal::cast(other); return cell_.is_identical_to(b->cell()); } @@ -2780,7 +2861,7 @@ class HStoreGlobal: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global") @@ -2805,12 +2886,12 @@ class HLoadContextSlot: public HUnaryOperation { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HLoadContextSlot* b = HLoadContextSlot::cast(other); return (slot_index() == b->slot_index()); } @@ -2833,11 +2914,11 @@ class HStoreContextSlot: public HBinaryOperation { SetFlag(kChangesContextSlots); } - HValue* context() const { return OperandAt(0); } - HValue* value() const { return OperandAt(1); } + HValue* context() { return OperandAt(0); } + HValue* value() { return OperandAt(1); } int slot_index() const { return slot_index_; } - bool NeedsWriteBarrier() const { + bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); } @@ -2845,7 +2926,7 @@ class HStoreContextSlot: public HBinaryOperation { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot") @@ -2869,19 +2950,19 @@ class HLoadNamedField: public HUnaryOperation { } } - HValue* object() const { return OperandAt(0); } + HValue* object() { return OperandAt(0); } bool is_in_object() const { return is_in_object_; } int offset() const { return offset_; } virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field") protected: - virtual bool DataEquals(HValue* other) const { + virtual bool DataEquals(HValue* other) { HLoadNamedField* b = HLoadNamedField::cast(other); return is_in_object_ == b->is_in_object_ && offset_ == b->offset_; } @@ -2900,8 +2981,8 @@ class HLoadNamedGeneric: public HBinaryOperation { SetAllSideEffects(); } - HValue* context() const { return OperandAt(0); } - HValue* object() const { return OperandAt(1); } + HValue* context() { return OperandAt(0); } + HValue* object() { return OperandAt(1); } Handle<Object> name() const { return name_; } virtual Representation RequiredInputRepresentation(int index) const { @@ -2924,7 +3005,7 @@ class HLoadFunctionPrototype: public HUnaryOperation { SetFlag(kDependsOnCalls); } - HValue* function() const { return OperandAt(0); } + HValue* function() { return OperandAt(0); } virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); @@ -2933,46 +3014,34 @@ class HLoadFunctionPrototype: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HLoadKeyed: public HBinaryOperation { +class HLoadKeyedFastElement: public HBinaryOperation { public: - HLoadKeyed(HValue* obj, HValue* key) : HBinaryOperation(obj, key) { + HLoadKeyedFastElement(HValue* obj, HValue* key) : HBinaryOperation(obj, key) { set_representation(Representation::Tagged()); - } - - virtual void PrintDataTo(StringStream* stream) const; - - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); - } - HValue* object() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } - - DECLARE_INSTRUCTION(LoadKeyed) -}; - - -class HLoadKeyedFastElement: public HLoadKeyed { - public: - HLoadKeyedFastElement(HValue* obj, HValue* key) : HLoadKeyed(obj, key) { SetFlag(kDependsOnArrayElements); SetFlag(kUseGVN); } + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + virtual Representation RequiredInputRepresentation(int index) const { // The key is supposed to be Integer32. return (index == 1) ? Representation::Integer32() : Representation::Tagged(); } + virtual void PrintDataTo(StringStream* stream); + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load_keyed_fast_element") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; @@ -2987,7 +3056,7 @@ class HLoadPixelArrayElement: public HBinaryOperation { SetFlag(kUseGVN); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) const { // The key is supposed to be Integer32, but the base pointer @@ -2996,76 +3065,50 @@ class HLoadPixelArrayElement: public HBinaryOperation { : Representation::External(); } - HValue* external_pointer() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } + HValue* external_pointer() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement, "load_pixel_array_element") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } }; -class HLoadKeyedGeneric: public HLoadKeyed { +class HLoadKeyedGeneric: public HTemplateInstruction<3> { public: - HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) - : HLoadKeyed(obj, key), context_(NULL) { + HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) { + set_representation(Representation::Tagged()); + SetOperandAt(0, obj); + SetOperandAt(1, key); SetOperandAt(2, context); SetAllSideEffects(); } - HValue* context() const { return context_; } - HValue* object() const { return operands_[0]; } - HValue* key() const { return operands_[1]; } - - virtual int OperandCount() const { return 3; } - virtual HValue* OperandAt(int index) const { - return (index < 2) ? operands_[index] : context_; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value); - - private: - HValue* context_; -}; - + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* context() { return OperandAt(2); } -class HStoreNamed: public HBinaryOperation { - public: - HStoreNamed(HValue* obj, Handle<String> name, HValue* val) - : HBinaryOperation(obj, val), name_(name) { - } + virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; - - HValue* object() const { return OperandAt(0); } - Handle<String> name() const { return name_; } - HValue* value() const { return OperandAt(1); } - void set_value(HValue* value) { SetOperandAt(1, value); } - - DECLARE_INSTRUCTION(StoreNamed) - - private: - Handle<String> name_; + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic") }; -class HStoreNamedField: public HStoreNamed { +class HStoreNamedField: public HBinaryOperation { public: HStoreNamedField(HValue* obj, Handle<String> name, HValue* val, bool in_object, int offset) - : HStoreNamed(obj, name, val), + : HBinaryOperation(obj, val), + name_(name), is_in_object_(in_object), offset_(offset) { if (is_in_object_) { @@ -3080,137 +3123,143 @@ class HStoreNamedField: public HStoreNamed { virtual Representation RequiredInputRepresentation(int index) const { return Representation::Tagged(); } - virtual void PrintDataTo(StringStream* stream) const; + virtual void PrintDataTo(StringStream* stream); + + HValue* object() { return OperandAt(0); } + HValue* value() { return OperandAt(1); } + Handle<String> name() const { return name_; } bool is_in_object() const { return is_in_object_; } int offset() const { return offset_; } Handle<Map> transition() const { return transition_; } void set_transition(Handle<Map> map) { transition_ = map; } - bool NeedsWriteBarrier() const { + bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); } private: + Handle<String> name_; bool is_in_object_; int offset_; Handle<Map> transition_; }; -class HStoreNamedGeneric: public HStoreNamed { +class HStoreNamedGeneric: public HTemplateInstruction<3> { public: HStoreNamedGeneric(HValue* context, HValue* object, Handle<String> name, HValue* value) - : HStoreNamed(object, name, value), context_(NULL) { + : name_(name) { + SetOperandAt(0, object); + SetOperandAt(1, value); SetOperandAt(2, context); SetAllSideEffects(); } - HValue* context() const { return context_; } - HValue* object() const { return operands_[0]; } - HValue* value() const { return operands_[1]; } + HValue* object() { return OperandAt(0); } + HValue* value() { return OperandAt(1); } + HValue* context() { return OperandAt(2); } + Handle<String> name() { return name_; } - virtual int OperandCount() const { return 3; } + virtual void PrintDataTo(StringStream* stream); - virtual HValue* OperandAt(int index) const { - return (index < 2) ? operands_[index] : context_; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic") - protected: - virtual void InternalSetOperandAt(int index, HValue* value); - private: - HValue* context_; + Handle<String> name_; }; -class HStoreKeyed: public HInstruction { +class HStoreKeyedFastElement: public HTemplateInstruction<3> { public: - HStoreKeyed(HValue* obj, HValue* key, HValue* val) { + HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) { SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, val); + SetFlag(kChangesArrayElements); } - virtual void PrintDataTo(StringStream* stream) const; - virtual int OperandCount() const { return operands_.length(); } - virtual HValue* OperandAt(int index) const { return operands_[index]; } - virtual Representation RequiredInputRepresentation(int index) const { - return Representation::Tagged(); + // The key is supposed to be Integer32. + return (index == 1) ? Representation::Integer32() + : Representation::Tagged(); } - HValue* object() const { return OperandAt(0); } - HValue* key() const { return OperandAt(1); } - HValue* value() const { return OperandAt(2); } + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } - bool NeedsWriteBarrier() const { + bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); } - DECLARE_INSTRUCTION(StoreKeyed) - - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - operands_[index] = value; - } + virtual void PrintDataTo(StringStream* stream); - HOperandVector<3> operands_; + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store_keyed_fast_element") }; -class HStoreKeyedFastElement: public HStoreKeyed { +class HStorePixelArrayElement: public HTemplateInstruction<3> { public: - HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) - : HStoreKeyed(obj, key, val) { - SetFlag(kChangesArrayElements); + HStorePixelArrayElement(HValue* external_elements, HValue* key, HValue* val) { + SetFlag(kChangesPixelArrayElements); + SetOperandAt(0, external_elements); + SetOperandAt(1, key); + SetOperandAt(2, val); } + virtual void PrintDataTo(StringStream* stream); + virtual Representation RequiredInputRepresentation(int index) const { - // The key is supposed to be Integer32. - return (index == 1) ? Representation::Integer32() - : Representation::Tagged(); + if (index == 0) { + return Representation::External(); + } else { + return Representation::Integer32(); + } } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store_keyed_fast_element") + HValue* external_pointer() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } + + DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement, + "store_pixel_array_element") }; -class HStoreKeyedGeneric: public HStoreKeyed { +class HStoreKeyedGeneric: public HTemplateInstruction<4> { public: HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key, - HValue* value) - : HStoreKeyed(object, key, value), context_(NULL) { + HValue* value) { + SetOperandAt(0, object); + SetOperandAt(1, key); + SetOperandAt(2, value); SetOperandAt(3, context); SetAllSideEffects(); } - HValue* context() const { return context_; } - HValue* object() const { return operands_[0]; } - HValue* key() const { return operands_[1]; } - HValue* value() const { return operands_[2]; } + HValue* object() { return OperandAt(0); } + HValue* key() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } + HValue* context() { return OperandAt(3); } - virtual int OperandCount() const { return 4; } - - virtual HValue* OperandAt(int index) const { - return (index < 3) ? operands_[index] : context_; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic") - - protected: - virtual void InternalSetOperandAt(int index, HValue* value); + virtual void PrintDataTo(StringStream* stream); - private: - HValue* context_; + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic") }; @@ -3228,13 +3277,13 @@ class HStringCharCodeAt: public HBinaryOperation { : Representation::Tagged(); } - HValue* string() const { return OperandAt(0); } - HValue* index() const { return OperandAt(1); } + HValue* string() { return OperandAt(0); } + HValue* index() { return OperandAt(1); } DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange() { return new Range(0, String::kMaxUC16CharCode); @@ -3253,7 +3302,7 @@ class HStringLength: public HUnaryOperation { return Representation::Tagged(); } - virtual HType CalculateInferredType() const { + virtual HType CalculateInferredType() { STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); return HType::Smi(); } @@ -3261,7 +3310,7 @@ class HStringLength: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length") protected: - virtual bool DataEquals(HValue* other) const { return true; } + virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange() { return new Range(0, String::kMaxLength); @@ -3269,31 +3318,30 @@ class HStringLength: public HUnaryOperation { }; -class HMaterializedLiteral: public HInstruction { +template <int V> +class HMaterializedLiteral: public HTemplateInstruction<V> { public: - HMaterializedLiteral(int index, int depth) + HMaterializedLiteral<V>(int index, int depth) : literal_index_(index), depth_(depth) { - set_representation(Representation::Tagged()); + this->set_representation(Representation::Tagged()); } int literal_index() const { return literal_index_; } int depth() const { return depth_; } - DECLARE_INSTRUCTION(MaterializedLiteral) - private: int literal_index_; int depth_; }; -class HArrayLiteral: public HMaterializedLiteral { +class HArrayLiteral: public HMaterializedLiteral<0> { public: HArrayLiteral(Handle<FixedArray> constant_elements, int length, int literal_index, int depth) - : HMaterializedLiteral(literal_index, depth), + : HMaterializedLiteral<0>(literal_index, depth), length_(length), constant_elements_(constant_elements) {} @@ -3302,6 +3350,10 @@ class HArrayLiteral: public HMaterializedLiteral { bool IsCopyOnWrite() const; + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal") private: @@ -3310,55 +3362,53 @@ class HArrayLiteral: public HMaterializedLiteral { }; -class HObjectLiteral: public HMaterializedLiteral { +class HObjectLiteral: public HMaterializedLiteral<1> { public: HObjectLiteral(HValue* context, Handle<FixedArray> constant_properties, bool fast_elements, int literal_index, int depth) - : HMaterializedLiteral(literal_index, depth), - context_(NULL), + : HMaterializedLiteral<1>(literal_index, depth), constant_properties_(constant_properties), fast_elements_(fast_elements) { SetOperandAt(0, context); } - HValue* context() const { return context_; } + HValue* context() { return OperandAt(0); } Handle<FixedArray> constant_properties() const { return constant_properties_; } bool fast_elements() const { return fast_elements_; } - virtual int OperandCount() const { return 1; } - virtual HValue* OperandAt(int index) const { return context_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal") - protected: - virtual void InternalSetOperandAt(int index, HValue* value) { - context_ = value; - } - private: - HValue* context_; Handle<FixedArray> constant_properties_; bool fast_elements_; }; -class HRegExpLiteral: public HMaterializedLiteral { +class HRegExpLiteral: public HMaterializedLiteral<0> { public: HRegExpLiteral(Handle<String> pattern, Handle<String> flags, int literal_index) - : HMaterializedLiteral(literal_index, 0), + : HMaterializedLiteral<0>(literal_index, 0), pattern_(pattern), flags_(flags) { } Handle<String> pattern() { return pattern_; } Handle<String> flags() { return flags_; } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal") private: @@ -3367,13 +3417,17 @@ class HRegExpLiteral: public HMaterializedLiteral { }; -class HFunctionLiteral: public HInstruction { +class HFunctionLiteral: public HTemplateInstruction<0> { public: HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure) : shared_info_(shared), pretenure_(pretenure) { set_representation(Representation::Tagged()); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::None(); + } + DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal") Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } @@ -3405,6 +3459,10 @@ class HValueOf: public HUnaryOperation { set_representation(Representation::Tagged()); } + virtual Representation RequiredInputRepresentation(int index) const { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of") }; @@ -3423,8 +3481,8 @@ class HDeleteProperty: public HBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property") - HValue* object() const { return left(); } - HValue* key() const { return right(); } + HValue* object() { return left(); } + HValue* key() { return right(); } }; #undef DECLARE_INSTRUCTION diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index 3ebd580adc..9e40a50c71 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -482,84 +482,30 @@ HConstant* HGraph::GetConstantFalse() { } -void HSubgraph::AppendOptional(HSubgraph* graph, - bool on_true_branch, - HValue* value) { - ASSERT(HasExit() && graph->HasExit()); - HBasicBlock* other_block = graph_->CreateBasicBlock(); - HBasicBlock* join_block = graph_->CreateBasicBlock(); - - HTest* test = on_true_branch - ? new HTest(value, graph->entry_block(), other_block) - : new HTest(value, other_block, graph->entry_block()); - exit_block_->Finish(test); - other_block->Goto(join_block); - graph->exit_block()->Goto(join_block); - exit_block_ = join_block; -} - - -void HSubgraph::AppendJoin(HSubgraph* then_graph, - HSubgraph* else_graph, - AstNode* node) { - if (then_graph->HasExit() && else_graph->HasExit()) { - // We need to merge, create new merge block. +void HSubgraph::AppendJoin(HBasicBlock* first, + HBasicBlock* second, + int join_id) { + if (first == NULL) { + exit_block_ = second; + } else if (second == NULL) { + exit_block_ = first; + } else { HBasicBlock* join_block = graph_->CreateBasicBlock(); - then_graph->exit_block()->Goto(join_block); - else_graph->exit_block()->Goto(join_block); - join_block->SetJoinId(node->id()); + first->Goto(join_block); + second->Goto(join_block); + join_block->SetJoinId(join_id); exit_block_ = join_block; - } else if (then_graph->HasExit()) { - exit_block_ = then_graph->exit_block_; - } else if (else_graph->HasExit()) { - exit_block_ = else_graph->exit_block_; - } else { - exit_block_ = NULL; } } -void HSubgraph::ResolveContinue(IterationStatement* statement) { - HBasicBlock* continue_block = BundleContinue(statement); +void HSubgraph::ResolveContinue(IterationStatement* statement, + HBasicBlock* continue_block) { if (continue_block != NULL) { - exit_block_ = JoinBlocks(exit_block(), - continue_block, - statement->ContinueId()); - } -} - - -HBasicBlock* HSubgraph::BundleBreak(BreakableStatement* statement) { - return BundleBreakContinue(statement, false, statement->ExitId()); -} - - -HBasicBlock* HSubgraph::BundleContinue(IterationStatement* statement) { - return BundleBreakContinue(statement, true, statement->ContinueId()); -} - - -HBasicBlock* HSubgraph::BundleBreakContinue(BreakableStatement* statement, - bool is_continue, - int join_id) { - HBasicBlock* result = NULL; - const ZoneList<BreakContinueInfo*>* infos = break_continue_info(); - for (int i = 0; i < infos->length(); ++i) { - BreakContinueInfo* info = infos->at(i); - if (info->is_continue() == is_continue && - info->target() == statement && - !info->IsResolved()) { - if (result == NULL) { - result = graph_->CreateBasicBlock(); - } - info->block()->Goto(result); - info->Resolve(); - } + continue_block->SetJoinId(statement->ContinueId()); } - - if (result != NULL) result->SetJoinId(join_id); - - return result; + exit_block_ = + JoinBlocks(exit_block(), continue_block, statement->ContinueId()); } @@ -574,83 +520,93 @@ HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) { } -void HSubgraph::AppendEndless(HSubgraph* body, IterationStatement* statement) { - ConnectExitTo(body->entry_block()); - body->ResolveContinue(statement); - body->ConnectExitTo(body->entry_block(), true); - exit_block_ = body->BundleBreak(statement); - body->entry_block()->PostProcessLoopHeader(statement); +void HSubgraph::AppendEndless(IterationStatement* statement, + HBasicBlock* body_entry, + HBasicBlock* body_exit, + HBasicBlock* break_block) { + if (exit_block() != NULL) { + exit_block()->Goto(body_entry, false); + } + if (body_exit != NULL) { + body_exit->Goto(body_entry, true); + } + if (break_block != NULL) break_block->SetJoinId(statement->ExitId()); + exit_block_ = break_block; + body_entry->PostProcessLoopHeader(statement); } -void HSubgraph::AppendDoWhile(HSubgraph* body, - IterationStatement* statement, - HSubgraph* go_back, - HSubgraph* exit) { - ConnectExitTo(body->entry_block()); - go_back->ConnectExitTo(body->entry_block(), true); - - HBasicBlock* break_block = body->BundleBreak(statement); +void HSubgraph::AppendDoWhile(IterationStatement* statement, + HBasicBlock* body_entry, + HBasicBlock* go_back, + HBasicBlock* exit_block, + HBasicBlock* break_block) { + if (this->exit_block() != NULL) { + this->exit_block()->Goto(body_entry, false); + } + if (go_back != NULL) { + go_back->Goto(body_entry, true); + } + if (break_block != NULL) break_block->SetJoinId(statement->ExitId()); exit_block_ = - JoinBlocks(exit->exit_block(), break_block, statement->ExitId()); - body->entry_block()->PostProcessLoopHeader(statement); + JoinBlocks(exit_block, break_block, statement->ExitId()); + body_entry->PostProcessLoopHeader(statement); } -void HSubgraph::AppendWhile(HSubgraph* condition, - HSubgraph* body, - IterationStatement* statement, - HSubgraph* continue_subgraph, - HSubgraph* exit) { - ConnectExitTo(condition->entry_block()); +void HSubgraph::AppendWhile(IterationStatement* statement, + HBasicBlock* condition_entry, + HBasicBlock* exit_block, + HBasicBlock* body_exit, + HBasicBlock* break_block, + HBasicBlock* loop_entry, + HBasicBlock* loop_exit) { + if (this->exit_block() != NULL) { + this->exit_block()->Goto(condition_entry, false); + } - HBasicBlock* break_block = body->BundleBreak(statement); + if (break_block != NULL) break_block->SetJoinId(statement->ExitId()); exit_block_ = - JoinBlocks(exit->exit_block(), break_block, statement->ExitId()); - - if (continue_subgraph != NULL) { - body->ConnectExitTo(continue_subgraph->entry_block(), true); - continue_subgraph->entry_block()->SetJoinId(statement->EntryId()); - exit_block_ = JoinBlocks(exit_block_, - continue_subgraph->exit_block(), - statement->ExitId()); + JoinBlocks(exit_block, break_block, statement->ExitId()); + + if (loop_entry != NULL) { + if (body_exit != NULL) { + body_exit->Goto(loop_entry, true); + } + loop_entry->SetJoinId(statement->EntryId()); + exit_block_ = JoinBlocks(exit_block_, loop_exit, statement->ExitId()); } else { - body->ConnectExitTo(condition->entry_block(), true); + if (body_exit != NULL) { + body_exit->Goto(condition_entry, true); + } } - condition->entry_block()->PostProcessLoopHeader(statement); + condition_entry->PostProcessLoopHeader(statement); } -void HSubgraph::Append(HSubgraph* next, BreakableStatement* stmt) { - exit_block_->Goto(next->entry_block()); - exit_block_ = next->exit_block_; +void HSubgraph::Append(BreakableStatement* stmt, + HBasicBlock* entry_block, + HBasicBlock* exit_block, + HBasicBlock* break_block) { + exit_block_->Goto(entry_block); + exit_block_ = exit_block; if (stmt != NULL) { - next->entry_block()->SetJoinId(stmt->EntryId()); - HBasicBlock* break_block = next->BundleBreak(stmt); - exit_block_ = JoinBlocks(exit_block(), break_block, stmt->ExitId()); + entry_block->SetJoinId(stmt->EntryId()); + if (break_block != NULL) break_block->SetJoinId(stmt->EntryId()); + exit_block_ = JoinBlocks(exit_block, break_block, stmt->ExitId()); } } void HSubgraph::FinishExit(HControlInstruction* instruction) { - ASSERT(HasExit()); + ASSERT(exit_block() != NULL); exit_block_->Finish(instruction); exit_block_->ClearEnvironment(); exit_block_ = NULL; } -void HSubgraph::FinishBreakContinue(BreakableStatement* target, - bool is_continue) { - ASSERT(!exit_block_->IsFinished()); - BreakContinueInfo* info = new BreakContinueInfo(target, exit_block_, - is_continue); - break_continue_info_.Add(info); - exit_block_ = NULL; -} - - HGraph::HGraph(CompilationInfo* info) : HSubgraph(this), next_block_id_(0), @@ -1992,14 +1948,14 @@ AstContext::~AstContext() { EffectContext::~EffectContext() { ASSERT(owner()->HasStackOverflow() || - !owner()->subgraph()->HasExit() || + owner()->current_block() == NULL || owner()->environment()->length() == original_length_); } ValueContext::~ValueContext() { ASSERT(owner()->HasStackOverflow() || - !owner()->subgraph()->HasExit() || + owner()->current_block() == NULL || owner()->environment()->length() == original_length_ + 1); } @@ -2057,7 +2013,7 @@ void TestContext::BuildBranch(HValue* value) { HBasicBlock* empty_true = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_false = builder->graph()->CreateBasicBlock(); HTest* test = new HTest(value, empty_true, empty_false); - builder->CurrentBlock()->Finish(test); + builder->current_block()->Finish(test); HValue* const no_return_value = NULL; HBasicBlock* true_target = if_true(); @@ -2073,7 +2029,7 @@ void TestContext::BuildBranch(HValue* value) { } else { empty_false->Goto(false_target); } - builder->subgraph()->set_exit_block(NULL); + builder->set_current_block(NULL); } @@ -2130,7 +2086,6 @@ class HGraphBuilder::SubgraphScope BASE_EMBEDDED { } ~SubgraphScope() { - old_subgraph_->AddBreakContinueInfo(subgraph_); builder_->current_subgraph_ = old_subgraph_; } @@ -2173,14 +2128,22 @@ void HGraphBuilder::VisitForControl(Expression* expr, void HGraphBuilder::VisitArgument(Expression* expr) { - VisitForValue(expr); + VISIT_FOR_VALUE(expr); + Push(AddInstruction(new HPushArgument(Pop()))); } void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) { for (int i = 0; i < arguments->length(); i++) { VisitArgument(arguments->at(i)); - if (HasStackOverflow() || !current_subgraph_->HasExit()) return; + if (HasStackOverflow() || current_block() == NULL) return; + } +} + + +void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) { + for (int i = 0; i < exprs->length(); ++i) { + VISIT_FOR_VALUE(exprs->at(i)); } } @@ -2204,10 +2167,13 @@ HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) { HSubgraph* body = CreateGotoSubgraph(environment()); AddToSubgraph(body, stmts); if (HasStackOverflow()) return NULL; - current_subgraph_->Append(body, NULL); + current_subgraph_->Append(NULL, + body->entry_block(), + body->exit_block(), + NULL); body->entry_block()->SetJoinId(info->function()->id()); - if (graph_->HasExit()) { + if (graph()->exit_block() != NULL) { graph_->FinishExit(new HReturn(graph_->GetConstantUndefined())); } } @@ -2268,21 +2234,21 @@ void HGraphBuilder::AddToSubgraph(HSubgraph* graph, HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { - ASSERT(current_subgraph_->HasExit()); - current_subgraph_->exit_block()->AddInstruction(instr); + ASSERT(current_block() != NULL); + current_block()->AddInstruction(instr); return instr; } void HGraphBuilder::AddSimulate(int id) { - ASSERT(current_subgraph_->HasExit()); - current_subgraph_->exit_block()->AddSimulate(id); + ASSERT(current_block() != NULL); + current_block()->AddSimulate(id); } void HGraphBuilder::AddPhi(HPhi* instr) { - ASSERT(current_subgraph_->HasExit()); - current_subgraph_->exit_block()->AddPhi(instr); + ASSERT(current_block() != NULL); + current_block()->AddPhi(instr); } @@ -2292,7 +2258,8 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) { } -void HGraphBuilder::PreProcessCall(HCall* call) { +template <int V> +HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) { int count = call->argument_count(); ZoneList<HValue*> arguments(count); for (int i = 0; i < count; ++i) { @@ -2302,6 +2269,7 @@ void HGraphBuilder::PreProcessCall(HCall* call) { while (!arguments.is_empty()) { AddInstruction(new HPushArgument(arguments.RemoveLast())); } + return call; } @@ -2309,9 +2277,6 @@ void HGraphBuilder::SetupScope(Scope* scope) { // We don't yet handle the function name for named function expressions. if (scope->function() != NULL) BAILOUT("named function expression"); - // We can't handle heap-allocated locals. - if (scope->num_heap_slots() > 0) BAILOUT("heap allocated locals"); - HConstant* undefined_constant = new HConstant(Factory::undefined_value(), Representation::Tagged()); AddInstruction(undefined_constant); @@ -2333,6 +2298,10 @@ void HGraphBuilder::SetupScope(Scope* scope) { // Handle the arguments and arguments shadow variables specially (they do // not have declarations). if (scope->arguments() != NULL) { + if (!scope->arguments()->IsStackAllocated() || + !scope->arguments_shadow()->IsStackAllocated()) { + BAILOUT("context-allocated arguments"); + } HArgumentsObject* object = new HArgumentsObject; AddInstruction(object); graph()->SetArgumentsObject(object); @@ -2345,7 +2314,7 @@ void HGraphBuilder::SetupScope(Scope* scope) { void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) { for (int i = 0; i < statements->length(); i++) { Visit(statements->at(i)); - if (HasStackOverflow() || !current_subgraph_->HasExit()) break; + if (HasStackOverflow() || current_block() == NULL) break; } } @@ -2406,8 +2375,14 @@ HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) { void HGraphBuilder::VisitBlock(Block* stmt) { if (stmt->labels() != NULL) { HSubgraph* block_graph = CreateGotoSubgraph(environment()); - ADD_TO_SUBGRAPH(block_graph, stmt->statements()); - current_subgraph_->Append(block_graph, stmt); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + ADD_TO_SUBGRAPH(block_graph, stmt->statements()); + } + subgraph()->Append(stmt, + block_graph->entry_block(), + block_graph->exit_block(), + break_info.break_block()); } else { VisitStatements(stmt->statements()); } @@ -2443,18 +2418,55 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) { else_graph->entry_block()->SetJoinId(stmt->ElseId()); ADD_TO_SUBGRAPH(else_graph, stmt->else_statement()); - current_subgraph_->AppendJoin(then_graph, else_graph, stmt); + current_subgraph_->AppendJoin(then_graph->exit_block(), + else_graph->exit_block(), + stmt->id()); + } +} + + +HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get( + BreakableStatement* stmt, + BreakType type) { + BreakAndContinueScope* current = this; + while (current != NULL && current->info()->target() != stmt) { + current = current->next(); + } + ASSERT(current != NULL); // Always found (unless stack is malformed). + HBasicBlock* block = NULL; + switch (type) { + case BREAK: + block = current->info()->break_block(); + if (block == NULL) { + block = current->owner()->graph()->CreateBasicBlock(); + current->info()->set_break_block(block); + } + break; + + case CONTINUE: + block = current->info()->continue_block(); + if (block == NULL) { + block = current->owner()->graph()->CreateBasicBlock(); + current->info()->set_continue_block(block); + } + break; } + + return block; } void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { - current_subgraph_->FinishBreakContinue(stmt->target(), true); + HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE); + current_block()->Goto(continue_block); + set_current_block(NULL); } void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { - current_subgraph_->FinishBreakContinue(stmt->target(), false); + HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK); + current_block()->Goto(break_block); + set_current_block(NULL); } @@ -2483,9 +2495,9 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { VISIT_FOR_VALUE(stmt->expression()); return_value = environment()->Pop(); } - subgraph()->exit_block()->AddLeaveInlined(return_value, + current_block()->AddLeaveInlined(return_value, function_return_); - subgraph()->set_exit_block(NULL); + set_current_block(NULL); } } } @@ -2506,7 +2518,7 @@ HCompare* HGraphBuilder::BuildSwitchCompare(HSubgraph* subgraph, CaseClause* clause) { AddToSubgraph(subgraph, clause->label()); if (HasStackOverflow()) return NULL; - HValue* clause_value = subgraph->environment()->Pop(); + HValue* clause_value = subgraph->exit_block()->last_environment()->Pop(); HCompare* compare = new HCompare(switch_value, clause_value, Token::EQ_STRICT); @@ -2587,7 +2599,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { // last_false_block is the (empty) false-block of the last comparison. If // there are no comparisons at all (a single default clause), it is just // the last block of the current subgraph. - HBasicBlock* last_false_block = current_subgraph_->exit_block(); + HBasicBlock* last_false_block = current_block(); if (prev_graph != current_subgraph_) { last_false_block = graph()->CreateBasicBlock(); HBasicBlock* empty = graph()->CreateBasicBlock(); @@ -2630,17 +2642,20 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { } // Check for fall-through from previous statement block. - if (previous_subgraph != NULL && previous_subgraph->HasExit()) { + if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) { if (subgraph == NULL) subgraph = CreateEmptySubgraph(); previous_subgraph->exit_block()-> Finish(new HGoto(subgraph->entry_block())); } if (subgraph != NULL) { - ADD_TO_SUBGRAPH(subgraph, clause->statements()); - HBasicBlock* break_block = subgraph->BundleBreak(stmt); - if (break_block != NULL) { - break_block->Finish(new HGoto(single_exit_block)); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + ADD_TO_SUBGRAPH(subgraph, clause->statements()); + } + if (break_info.break_block() != NULL) { + break_info.break_block()->SetJoinId(stmt->ExitId()); + break_info.break_block()->Finish(new HGoto(single_exit_block)); } } @@ -2649,7 +2664,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { // If the last statement block has a fall-through, connect it to the // single exit block. - if (previous_subgraph != NULL && previous_subgraph->HasExit()) { + if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) { previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block)); } @@ -2659,9 +2674,9 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { } if (single_exit_block->HasPredecessor()) { - current_subgraph_->set_exit_block(single_exit_block); + set_current_block(single_exit_block); } else { - current_subgraph_->set_exit_block(NULL); + set_current_block(NULL); } } @@ -2703,15 +2718,21 @@ void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) { void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { - ASSERT(subgraph()->HasExit()); + ASSERT(current_block() != NULL); subgraph()->PreProcessOsrEntry(stmt); HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment()); - ADD_TO_SUBGRAPH(body_graph, stmt->body()); - body_graph->ResolveContinue(stmt); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + ADD_TO_SUBGRAPH(body_graph, stmt->body()); + } + body_graph->ResolveContinue(stmt, break_info.continue_block()); - if (!body_graph->HasExit() || stmt->cond()->ToBooleanIsTrue()) { - current_subgraph_->AppendEndless(body_graph, stmt); + if (body_graph->exit_block() == NULL || stmt->cond()->ToBooleanIsTrue()) { + subgraph()->AppendEndless(stmt, + body_graph->entry_block(), + body_graph->exit_block(), + break_info.break_block()); } else { HSubgraph* go_back = CreateEmptySubgraph(); HSubgraph* exit = CreateEmptySubgraph(); @@ -2723,18 +2744,17 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { go_back->entry_block()->SetJoinId(stmt->BackEdgeId()); exit->entry_block()->SetJoinId(stmt->ExitId()); } - current_subgraph_->AppendDoWhile(body_graph, stmt, go_back, exit); + subgraph()->AppendDoWhile(stmt, + body_graph->entry_block(), + go_back->exit_block(), + exit->exit_block(), + break_info.break_block()); } } -bool HGraphBuilder::ShouldPeel(HSubgraph* cond, HSubgraph* body) { - return FLAG_use_peeling; -} - - void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { - ASSERT(subgraph()->HasExit()); + ASSERT(current_block() != NULL); subgraph()->PreProcessOsrEntry(stmt); HSubgraph* cond_graph = NULL; @@ -2744,7 +2764,6 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { // If the condition is constant true, do not generate a condition subgraph. if (stmt->cond()->ToBooleanIsTrue()) { body_graph = CreateLoopHeaderSubgraph(environment()); - ADD_TO_SUBGRAPH(body_graph, stmt->body()); } else { cond_graph = CreateLoopHeaderSubgraph(environment()); body_graph = CreateEmptySubgraph(); @@ -2757,36 +2776,54 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { body_graph->entry_block()->SetJoinId(stmt->BodyId()); exit_graph->entry_block()->SetJoinId(stmt->ExitId()); } - ADD_TO_SUBGRAPH(body_graph, stmt->body()); } - body_graph->ResolveContinue(stmt); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + ADD_TO_SUBGRAPH(body_graph, stmt->body()); + } + body_graph->ResolveContinue(stmt, break_info.continue_block()); if (cond_graph != NULL) { - AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph); + AppendPeeledWhile(stmt, + cond_graph->entry_block(), + exit_graph->exit_block(), + body_graph->exit_block(), + break_info.break_block()); } else { // TODO(fschneider): Implement peeling for endless loops as well. - current_subgraph_->AppendEndless(body_graph, stmt); + subgraph()->AppendEndless(stmt, + body_graph->entry_block(), + body_graph->exit_block(), + break_info.break_block()); } } void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt, - HSubgraph* cond_graph, - HSubgraph* body_graph, - HSubgraph* exit_graph) { - HSubgraph* loop = NULL; - if (body_graph->HasExit() && stmt != peeled_statement_ && - ShouldPeel(cond_graph, body_graph)) { + HBasicBlock* condition_entry, + HBasicBlock* exit_block, + HBasicBlock* body_exit, + HBasicBlock* break_block) { + HBasicBlock* loop_entry = NULL; + HBasicBlock* loop_exit = NULL; + if (FLAG_use_peeling && body_exit != NULL && stmt != peeled_statement_) { // Save the last peeled iteration statement to prevent infinite recursion. IterationStatement* outer_peeled_statement = peeled_statement_; peeled_statement_ = stmt; - loop = CreateGotoSubgraph(body_graph->environment()); + HSubgraph* loop = CreateGotoSubgraph(body_exit->last_environment()); ADD_TO_SUBGRAPH(loop, stmt); peeled_statement_ = outer_peeled_statement; + loop_entry = loop->entry_block(); + loop_exit = loop->exit_block(); } - current_subgraph_->AppendWhile(cond_graph, body_graph, stmt, loop, - exit_graph); + subgraph()->AppendWhile(stmt, + condition_entry, + exit_block, + body_exit, + break_block, + loop_entry, + loop_exit); } @@ -2796,7 +2833,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) { Visit(stmt->init()); CHECK_BAILOUT; } - ASSERT(subgraph()->HasExit()); + ASSERT(current_block() != NULL); subgraph()->PreProcessOsrEntry(stmt); HSubgraph* cond_graph = NULL; @@ -2817,22 +2854,36 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) { } else { body_graph = CreateLoopHeaderSubgraph(environment()); } - ADD_TO_SUBGRAPH(body_graph, stmt->body()); + BreakAndContinueInfo break_info(stmt); + { BreakAndContinueScope push(&break_info, this); + ADD_TO_SUBGRAPH(body_graph, stmt->body()); + } HSubgraph* next_graph = NULL; - body_graph->ResolveContinue(stmt); + body_graph->ResolveContinue(stmt, break_info.continue_block()); - if (stmt->next() != NULL && body_graph->HasExit()) { - next_graph = CreateGotoSubgraph(body_graph->environment()); + if (stmt->next() != NULL && body_graph->exit_block() != NULL) { + next_graph = + CreateGotoSubgraph(body_graph->exit_block()->last_environment()); ADD_TO_SUBGRAPH(next_graph, stmt->next()); - body_graph->Append(next_graph, NULL); + body_graph->Append(NULL, + next_graph->entry_block(), + next_graph->exit_block(), + NULL); next_graph->entry_block()->SetJoinId(stmt->ContinueId()); } if (cond_graph != NULL) { - AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph); + AppendPeeledWhile(stmt, + cond_graph->entry_block(), + exit_graph->exit_block(), + body_graph->exit_block(), + break_info.break_block()); } else { - current_subgraph_->AppendEndless(body_graph, stmt); + subgraph()->AppendEndless(stmt, + body_graph->entry_block(), + body_graph->exit_block(), + break_info.break_block()); } } @@ -2886,7 +2937,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) { else_graph->entry_block()->SetJoinId(expr->ElseId()); ADD_TO_SUBGRAPH(else_graph, expr->else_expression()); - current_subgraph_->AppendJoin(then_graph, else_graph, expr); + current_subgraph_->AppendJoin(then_graph->exit_block(), + else_graph->exit_block(), + expr->id()); ast_context()->ReturnValue(Pop()); } @@ -3099,9 +3152,9 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver, maps->at(i), if_true->entry_block(), if_false->entry_block()); - subgraph()->exit_block()->Finish(compare); + current_block()->Finish(compare); - if (if_true->HasExit()) { + if (if_true->exit_block() != NULL) { // In an effect context the value of the type switch is not needed. // There is no need to merge it at the join block only to discard it. if (ast_context()->IsEffect()) { @@ -3110,15 +3163,15 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver, if_true->exit_block()->Goto(join_block); } - subgraph()->set_exit_block(if_false->exit_block()); + set_current_block(if_false->exit_block()); } // Connect the default if necessary. - if (subgraph()->HasExit()) { + if (current_block() != NULL) { if (ast_context()->IsEffect()) { environment()->Drop(1); } - subgraph()->exit_block()->Goto(join_block); + current_block()->Goto(join_block); } if (join_block->predecessors()->is_empty()) return NULL; @@ -3275,10 +3328,10 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr, HBasicBlock* new_exit_block = BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id()); - subgraph()->set_exit_block(new_exit_block); + set_current_block(new_exit_block); // In an effect context, we did not materialized the value in the // predecessor environments so there's no need to handle it here. - if (subgraph()->HasExit() && !ast_context()->IsEffect()) { + if (current_block() != NULL && !ast_context()->IsEffect()) { ast_context()->ReturnValue(Pop()); } } @@ -3326,12 +3379,20 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { HValue* key = Pop(); HValue* object = Pop(); - bool is_fast_elements = expr->IsMonomorphic() && - expr->GetMonomorphicReceiverType()->has_fast_elements(); - - instr = is_fast_elements - ? BuildStoreKeyedFastElement(object, key, value, expr) - : BuildStoreKeyedGeneric(object, key, value); + if (expr->IsMonomorphic()) { + Handle<Map> receiver_type(expr->GetMonomorphicReceiverType()); + // An object has either fast elements or pixel array elements, but never + // both. Pixel array maps that are assigned to pixel array elements are + // always created with the fast elements flag cleared. + if (receiver_type->has_pixel_array_elements()) { + instr = BuildStoreKeyedPixelArrayElement(object, key, value, expr); + } else if (receiver_type->has_fast_elements()) { + instr = BuildStoreKeyedFastElement(object, key, value, expr); + } + } + if (instr == NULL) { + instr = BuildStoreKeyedGeneric(object, key, value); + } } Push(value); @@ -3601,10 +3662,10 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr, HBasicBlock* new_exit_block = BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id()); - subgraph()->set_exit_block(new_exit_block); + set_current_block(new_exit_block); // In an effect context, we did not materialized the value in the // predecessor environments so there's no need to handle it here. - if (subgraph()->HasExit() && !ast_context()->IsEffect()) { + if (current_block() != NULL && !ast_context()->IsEffect()) { ast_context()->ReturnValue(Pop()); } } @@ -3711,7 +3772,8 @@ HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object, AddInstruction(new HCheckMap(object, map)); HLoadElements* elements = new HLoadElements(object); AddInstruction(elements); - HInstruction* length = AddInstruction(new HPixelArrayLength(elements)); + HInstruction* length = new HPixelArrayLength(elements); + AddInstruction(length); AddInstruction(new HBoundsCheck(key, length)); HLoadPixelArrayExternalPointer* external_elements = new HLoadPixelArrayExternalPointer(elements); @@ -3754,6 +3816,28 @@ HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object, } +HInstruction* HGraphBuilder::BuildStoreKeyedPixelArrayElement( + HValue* object, + HValue* key, + HValue* val, + Expression* expr) { + ASSERT(expr->IsMonomorphic()); + AddInstruction(new HCheckNonSmi(object)); + Handle<Map> map = expr->GetMonomorphicReceiverType(); + ASSERT(!map->has_fast_elements()); + ASSERT(map->has_pixel_array_elements()); + AddInstruction(new HCheckMap(object, map)); + HLoadElements* elements = new HLoadElements(object); + AddInstruction(elements); + HInstruction* length = AddInstruction(new HPixelArrayLength(elements)); + AddInstruction(new HBoundsCheck(key, length)); + HLoadPixelArrayExternalPointer* external_elements = + new HLoadPixelArrayExternalPointer(elements); + AddInstruction(external_elements); + return new HStorePixelArrayElement(external_elements, key, val); +} + + bool HGraphBuilder::TryArgumentsAccess(Property* expr) { VariableProxy* proxy = expr->obj()->AsVariableProxy(); if (proxy == NULL) return false; @@ -3899,7 +3983,8 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, // Check for bailout, as trying to inline might fail due to bailout // during hydrogen processing. CHECK_BAILOUT; - HCall* call = new HCallConstantFunction(expr->target(), argument_count); + HCallConstantFunction* call = + new HCallConstantFunction(expr->target(), argument_count); call->set_position(expr->position()); PreProcessCall(call); PushAndAdd(call); @@ -3916,7 +4001,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, if (maps.length() == 0) { HContext* context = new HContext; AddInstruction(context); - HCall* call = new HCallNamed(context, name, argument_count); + HCallNamed* call = new HCallNamed(context, name, argument_count); call->set_position(expr->position()); PreProcessCall(call); ast_context()->ReturnInstruction(call, expr->id()); @@ -3929,7 +4014,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, } else { HContext* context = new HContext; AddInstruction(context); - HCall* call = new HCallNamed(context, name, argument_count); + HCallNamed* call = new HCallNamed(context, name, argument_count); call->set_position(expr->position()); PreProcessCall(call); PushAndAdd(call); @@ -3938,7 +4023,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, HBasicBlock* new_exit_block = BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id()); - subgraph()->set_exit_block(new_exit_block); + set_current_block(new_exit_block); // In an effect context, we did not materialized the value in the // predecessor environments so there's no need to handle it here. if (new_exit_block != NULL && !ast_context()->IsEffect()) { @@ -4009,6 +4094,7 @@ bool HGraphBuilder::TryInline(Call* expr) { } return false; } + if (inner_info.scope()->num_heap_slots() > 0) return false; FunctionLiteral* function = inner_info.function(); // Count the number of AST nodes added by inlining this call. @@ -4048,10 +4134,7 @@ bool HGraphBuilder::TryInline(Call* expr) { if (!FullCodeGenerator::MakeCode(&inner_info)) return false; shared->EnableDeoptimizationSupport(*inner_info.code()); Compiler::RecordFunctionCompilation( - Logger::FUNCTION_TAG, - Handle<String>(shared->DebugName()), - shared->start_position(), - &inner_info); + Logger::FUNCTION_TAG, &inner_info, shared); } // Save the pending call context and type feedback oracle. Set up new ones @@ -4107,7 +4190,7 @@ bool HGraphBuilder::TryInline(Call* expr) { if (FLAG_trace_inlining) TraceInline(target, true); - if (body->HasExit()) { + if (body->exit_block() != NULL) { // Add a return of undefined if control can fall off the body. In a // test context, undefined is false. HValue* return_value = graph()->GetConstantUndefined(); @@ -4136,7 +4219,7 @@ bool HGraphBuilder::TryInline(Call* expr) { AddSimulate(expr->ReturnId()); // Jump to the function entry (without re-recording the environment). - subgraph()->exit_block()->Finish(new HGoto(body->entry_block())); + current_block()->Finish(new HGoto(body->entry_block())); // Fix up the function exits. if (test_context != NULL) { @@ -4166,11 +4249,11 @@ bool HGraphBuilder::TryInline(Call* expr) { // TODO(kmillikin): Come up with a better way to handle this. It is too // subtle. NULL here indicates that the enclosing context has no control // flow to handle. - subgraph()->set_exit_block(NULL); + set_current_block(NULL); } else { function_return_->SetJoinId(expr->id()); - subgraph()->set_exit_block(function_return_); + set_current_block(function_return_); } call_context_ = saved_call_context; @@ -4332,14 +4415,13 @@ static bool HasCustomCallGenerator(Handle<JSFunction> function) { void HGraphBuilder::VisitCall(Call* expr) { Expression* callee = expr->expression(); int argument_count = expr->arguments()->length() + 1; // Plus receiver. - HCall* call = NULL; + HInstruction* call = NULL; Property* prop = callee->AsProperty(); if (prop != NULL) { if (!prop->key()->IsPropertyName()) { // Keyed function call. - VisitArgument(prop->obj()); - CHECK_BAILOUT; + VISIT_FOR_VALUE(prop->obj()); VISIT_FOR_VALUE(prop->key()); // Push receiver and key like the non-optimized code generator expects it. @@ -4348,14 +4430,13 @@ void HGraphBuilder::VisitCall(Call* expr) { Push(key); Push(receiver); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - call = new HCallKeyed(context, key, argument_count); + call = PreProcessCall(new HCallKeyed(context, key, argument_count)); call->set_position(expr->position()); - PreProcessCall(call); Drop(1); // Key. ast_context()->ReturnInstruction(call, expr->id()); return; @@ -4367,9 +4448,8 @@ void HGraphBuilder::VisitCall(Call* expr) { if (TryCallApply(expr)) return; CHECK_BAILOUT; - VisitArgument(prop->obj()); - CHECK_BAILOUT; - VisitArgumentList(expr->arguments()); + VISIT_FOR_VALUE(prop->obj()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; Handle<String> name = prop->key()->AsLiteral()->AsPropertyName(); @@ -4396,12 +4476,12 @@ void HGraphBuilder::VisitCall(Call* expr) { // IC when a primitive receiver check is required. HContext* context = new HContext; AddInstruction(context); - call = new HCallNamed(context, name, argument_count); + call = PreProcessCall(new HCallNamed(context, name, argument_count)); } else { AddCheckConstantFunction(expr, receiver, receiver_map, true); if (TryInline(expr)) { - if (subgraph()->HasExit()) { + if (current_block() != NULL) { HValue* return_value = Pop(); // If we inlined a function in a test context then we need to emit // a simulate here to shadow the ones at the end of the @@ -4416,7 +4496,8 @@ void HGraphBuilder::VisitCall(Call* expr) { // Check for bailout, as the TryInline call in the if condition above // might return false due to bailout during hydrogen processing. CHECK_BAILOUT; - call = new HCallConstantFunction(expr->target(), argument_count); + call = PreProcessCall(new HCallConstantFunction(expr->target(), + argument_count)); } } } else if (types != NULL && types->length() > 1) { @@ -4427,7 +4508,7 @@ void HGraphBuilder::VisitCall(Call* expr) { } else { HContext* context = new HContext; AddInstruction(context); - call = new HCallNamed(context, name, argument_count); + call = PreProcessCall(new HCallNamed(context, name, argument_count)); } } else { @@ -4436,8 +4517,7 @@ void HGraphBuilder::VisitCall(Call* expr) { if (!global_call) { ++argument_count; - VisitArgument(expr->expression()); - CHECK_BAILOUT; + VISIT_FOR_VALUE(expr->expression()); } if (global_call) { @@ -4456,7 +4536,7 @@ void HGraphBuilder::VisitCall(Call* expr) { HGlobalObject* global_object = new HGlobalObject(context); AddInstruction(context); PushAndAdd(global_object); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; VISIT_FOR_VALUE(expr->expression()); @@ -4473,7 +4553,7 @@ void HGraphBuilder::VisitCall(Call* expr) { environment()->SetExpressionStackAt(receiver_index, global_receiver); if (TryInline(expr)) { - if (subgraph()->HasExit()) { + if (current_block() != NULL) { HValue* return_value = Pop(); // If we inlined a function in a test context then we need to // emit a simulate here to shadow the ones at the end of the @@ -4489,15 +4569,18 @@ void HGraphBuilder::VisitCall(Call* expr) { // during hydrogen processing. CHECK_BAILOUT; - call = new HCallKnownGlobal(expr->target(), argument_count); + call = PreProcessCall(new HCallKnownGlobal(expr->target(), + argument_count)); } else { HContext* context = new HContext; AddInstruction(context); PushAndAdd(new HGlobalObject(context)); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; - call = new HCallGlobal(context, var->name(), argument_count); + call = PreProcessCall(new HCallGlobal(context, + var->name(), + argument_count)); } } else { @@ -4506,15 +4589,14 @@ void HGraphBuilder::VisitCall(Call* expr) { AddInstruction(context); AddInstruction(global_object); PushAndAdd(new HGlobalReceiver(global_object)); - VisitArgumentList(expr->arguments()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; - call = new HCallFunction(context, argument_count); + call = PreProcessCall(new HCallFunction(context, argument_count)); } } call->set_position(expr->position()); - PreProcessCall(call); ast_context()->ReturnInstruction(call, expr->id()); } @@ -4522,9 +4604,8 @@ void HGraphBuilder::VisitCall(Call* expr) { void HGraphBuilder::VisitCallNew(CallNew* expr) { // The constructor function is also used as the receiver argument to the // JS construct call builtin. - VisitArgument(expr->expression()); - CHECK_BAILOUT; - VisitArgumentList(expr->arguments()); + VISIT_FOR_VALUE(expr->expression()); + VisitExpressions(expr->arguments()); CHECK_BAILOUT; HContext* context = new HContext; @@ -4534,7 +4615,7 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) { // to the construct call. int arg_count = expr->arguments()->length() + 1; // Plus constructor. HValue* constructor = environment()->ExpressionStackAt(arg_count - 1); - HCall* call = new HCallNew(context, constructor, arg_count); + HCallNew* call = new HCallNew(context, constructor, arg_count); call->set_position(expr->position()); PreProcessCall(call); ast_context()->ReturnInstruction(call, expr->id()); @@ -4557,25 +4638,15 @@ const HGraphBuilder::InlineFunctionGenerator void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) { - Handle<String> name = expr->name(); - if (name->IsEqualTo(CStrVector("_Log"))) { - ast_context()->ReturnValue(graph()->GetConstantUndefined()); - return; - } - - Runtime::Function* function = expr->function(); if (expr->is_jsruntime()) { BAILOUT("call to a JavaScript runtime function"); } - ASSERT(function != NULL); - VisitArgumentList(expr->arguments()); - CHECK_BAILOUT; - - int argument_count = expr->arguments()->length(); + Runtime::Function* function = expr->function(); + ASSERT(function != NULL); if (function->intrinsic_type == Runtime::INLINE) { - ASSERT(name->length() > 0); - ASSERT(name->Get(0) == '_'); + ASSERT(expr->name()->length() > 0); + ASSERT(expr->name()->Get(0) == '_'); // Call to an inline function. int lookup_index = static_cast<int>(function->function_id) - static_cast<int>(Runtime::kFirstInlineFunction); @@ -4585,12 +4656,17 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) { InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index]; // Call the inline code generator using the pointer-to-member. - (this->*generator)(argument_count, expr->id()); + (this->*generator)(expr); } else { ASSERT(function->intrinsic_type == Runtime::RUNTIME); - HCall* call = new HCallRuntime(name, expr->function(), argument_count); + VisitArgumentList(expr->arguments()); + CHECK_BAILOUT; + + Handle<String> name = expr->name(); + int argument_count = expr->arguments()->length(); + HCallRuntime* call = new HCallRuntime(name, function, argument_count); call->set_position(RelocInfo::kNoPosition); - PreProcessCall(call); + Drop(argument_count); ast_context()->ReturnInstruction(call, expr->id()); } } @@ -4640,21 +4716,29 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { VisitForControl(expr->expression(), context->if_false(), context->if_true()); - } else { + } else if (ast_context()->IsValue()) { HSubgraph* true_graph = CreateEmptySubgraph(); HSubgraph* false_graph = CreateEmptySubgraph(); VISIT_FOR_CONTROL(expr->expression(), false_graph->entry_block(), true_graph->entry_block()); true_graph->entry_block()->SetJoinId(expr->expression()->id()); - true_graph->environment()->Push(graph_->GetConstantTrue()); + true_graph->exit_block()->last_environment()->Push( + graph_->GetConstantTrue()); false_graph->entry_block()->SetJoinId(expr->expression()->id()); - false_graph->environment()->Push(graph_->GetConstantFalse()); + false_graph->exit_block()->last_environment()->Push( + graph_->GetConstantFalse()); - current_subgraph_->AppendJoin(true_graph, false_graph, expr); + current_subgraph_->AppendJoin(true_graph->exit_block(), + false_graph->exit_block(), + expr->id()); ast_context()->ReturnValue(Pop()); + } else { + ASSERT(ast_context()->IsEffect()); + VISIT_FOR_EFFECT(expr->expression()); } + } else if (op == Token::BIT_NOT || op == Token::SUB) { VISIT_FOR_VALUE(expr->expression()); HValue* value = Pop(); @@ -4943,12 +5027,12 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { // Translate right subexpression by visiting it in the same AST // context as the entire expression. - subgraph()->set_exit_block(eval_right); + set_current_block(eval_right); Visit(expr->right()); - } else { + } else if (ast_context()->IsValue()) { VISIT_FOR_VALUE(expr->left()); - ASSERT(current_subgraph_->HasExit()); + ASSERT(current_block() != NULL); HValue* left = Top(); HEnvironment* environment_copy = environment()->Copy(); @@ -4956,9 +5040,51 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { HSubgraph* right_subgraph; right_subgraph = CreateBranchSubgraph(environment_copy); ADD_TO_SUBGRAPH(right_subgraph, expr->right()); - current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left); - current_subgraph_->exit_block()->SetJoinId(expr->id()); + + ASSERT(current_block() != NULL && + right_subgraph->exit_block() != NULL); + // We need an extra block to maintain edge-split form. + HBasicBlock* empty_block = graph()->CreateBasicBlock(); + HBasicBlock* join_block = graph()->CreateBasicBlock(); + + HTest* test = is_logical_and + ? new HTest(left, right_subgraph->entry_block(), empty_block) + : new HTest(left, empty_block, right_subgraph->entry_block()); + current_block()->Finish(test); + empty_block->Goto(join_block); + right_subgraph->exit_block()->Goto(join_block); + join_block->SetJoinId(expr->id()); + set_current_block(join_block); ast_context()->ReturnValue(Pop()); + } else { + ASSERT(ast_context()->IsEffect()); + // In an effect context, we don't need the value of the left + // subexpression, only its control flow and side effects. We need an + // extra block to maintain edge-split form. + HBasicBlock* empty_block = graph()->CreateBasicBlock(); + HBasicBlock* right_block = graph()->CreateBasicBlock(); + HBasicBlock* join_block = graph()->CreateBasicBlock(); + if (is_logical_and) { + VISIT_FOR_CONTROL(expr->left(), right_block, empty_block); + } else { + VISIT_FOR_CONTROL(expr->left(), empty_block, right_block); + } + // TODO(kmillikin): Find a way to fix this. It's ugly that there are + // actually two empty blocks (one here and one inserted by + // TestContext::BuildBranch, and that they both have an HSimulate + // though the second one is not a merge node, and that we really have + // no good AST ID to put on that first HSimulate. + empty_block->SetJoinId(expr->id()); + right_block->SetJoinId(expr->RightId()); + set_current_block(right_block); + VISIT_FOR_EFFECT(expr->right()); + + empty_block->Goto(join_block); + current_block()->Goto(join_block); + join_block->SetJoinId(expr->id()); + set_current_block(join_block); + // We did not materialize any value in the predecessor environments, + // so there is no need to handle it here. } } else { @@ -5135,343 +5261,361 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) { // Generators for inline runtime functions. // Support for types. -void HGraphBuilder::GenerateIsSmi(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsSmi(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HIsSmi* result = new HIsSmi(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsSpecObject(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsFunction(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsFunction(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count, - int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsArray(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsArray(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsRegExp(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateIsObject(int argument_count, int ast_id) { - ASSERT(argument_count == 1); - +void HGraphBuilder::GenerateIsObject(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HIsObject* test = new HIsObject(value); - ast_context()->ReturnInstruction(test, ast_id); + ast_context()->ReturnInstruction(test, call->id()); } -void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) { BAILOUT("inlined runtime function: IsNonNegativeSmi"); } -void HGraphBuilder::GenerateIsUndetectableObject(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { BAILOUT("inlined runtime function: IsUndetectableObject"); } void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf( - int argument_count, - int ast_id) { + CallRuntime* call) { BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf"); } // Support for construct call checks. -void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) { - ASSERT(argument_count == 0); - ast_context()->ReturnInstruction(new HIsConstructCall, ast_id); +void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { + ASSERT(call->arguments()->length() == 0); + ast_context()->ReturnInstruction(new HIsConstructCall, call->id()); } // Support for arguments.length and arguments[?]. -void HGraphBuilder::GenerateArgumentsLength(int argument_count, int ast_id) { - ASSERT(argument_count == 0); +void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) { + ASSERT(call->arguments()->length() == 0); HInstruction* elements = AddInstruction(new HArgumentsElements); HArgumentsLength* result = new HArgumentsLength(elements); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateArguments(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateArguments(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* index = Pop(); HInstruction* elements = AddInstruction(new HArgumentsElements); HInstruction* length = AddInstruction(new HArgumentsLength(elements)); HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } // Support for accessing the class and value fields of an object. -void HGraphBuilder::GenerateClassOf(int argument_count, int ast_id) { +void HGraphBuilder::GenerateClassOf(CallRuntime* call) { // The special form detected by IsClassOfTest is detected before we get here // and does not cause a bailout. BAILOUT("inlined runtime function: ClassOf"); } -void HGraphBuilder::GenerateValueOf(int argument_count, int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateValueOf(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HValueOf* result = new HValueOf(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) { +void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) { BAILOUT("inlined runtime function: SetValueOf"); } // Fast support for charCodeAt(n). -void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) { - ASSERT(argument_count == 2); +void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) { + ASSERT(call->arguments()->length() == 2); + VISIT_FOR_VALUE(call->arguments()->at(0)); + VISIT_FOR_VALUE(call->arguments()->at(1)); HValue* index = Pop(); HValue* string = Pop(); HStringCharCodeAt* result = BuildStringCharCodeAt(string, index); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for string.charAt(n) and string[n]. -void HGraphBuilder::GenerateStringCharFromCode(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) { BAILOUT("inlined runtime function: StringCharFromCode"); } // Fast support for string.charAt(n) and string[n]. -void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::StringCharAt, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::StringCharAt, 2); + Drop(2); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for object equality testing. -void HGraphBuilder::GenerateObjectEquals(int argument_count, int ast_id) { - ASSERT(argument_count == 2); +void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) { + ASSERT(call->arguments()->length() == 2); + VISIT_FOR_VALUE(call->arguments()->at(0)); + VISIT_FOR_VALUE(call->arguments()->at(1)); HValue* right = Pop(); HValue* left = Pop(); HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateLog(int argument_count, int ast_id) { - UNREACHABLE(); // We caught this in VisitCallRuntime. +void HGraphBuilder::GenerateLog(CallRuntime* call) { + // %_Log is ignored in optimized code. + ast_context()->ReturnValue(graph()->GetConstantUndefined()); } // Fast support for Math.random(). -void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) { +void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { BAILOUT("inlined runtime function: RandomHeapNumber"); } // Fast support for StringAdd. -void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateStringAdd(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::StringAdd, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::StringAdd, 2); + Drop(2); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for SubString. -void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) { - ASSERT_EQ(3, argument_count); +void HGraphBuilder::GenerateSubString(CallRuntime* call) { + ASSERT_EQ(3, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::SubString, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::SubString, 3); + Drop(3); + ast_context()->ReturnInstruction(result, call->id()); } // Fast support for StringCompare. -void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateStringCompare(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::StringCompare, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::StringCompare, 2); + Drop(2); + ast_context()->ReturnInstruction(result, call->id()); } // Support for direct calls from JavaScript to native RegExp code. -void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) { - ASSERT_EQ(4, argument_count); +void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) { + ASSERT_EQ(4, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::RegExpExec, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::RegExpExec, 4); + Drop(4); + ast_context()->ReturnInstruction(result, call->id()); } // Construct a RegExp exec result with two in-object properties. -void HGraphBuilder::GenerateRegExpConstructResult(int argument_count, - int ast_id) { - ASSERT_EQ(3, argument_count); +void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { + ASSERT_EQ(3, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); HCallStub* result = - new HCallStub(context, CodeStub::RegExpConstructResult, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + new HCallStub(context, CodeStub::RegExpConstructResult, 3); + Drop(3); + ast_context()->ReturnInstruction(result, call->id()); } // Support for fast native caches. -void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) { +void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) { BAILOUT("inlined runtime function: GetFromCache"); } // Fast support for number to string. -void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateNumberToString(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::NumberToString, argument_count); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + HCallStub* result = new HCallStub(context, CodeStub::NumberToString, 1); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } // Fast swapping of elements. Takes three expressions, the object and two // indices. This should only be used if the indices are known to be // non-negative and within bounds of the elements array at the call site. -void HGraphBuilder::GenerateSwapElements(int argument_count, int ast_id) { +void HGraphBuilder::GenerateSwapElements(CallRuntime* call) { BAILOUT("inlined runtime function: SwapElements"); } // Fast call for custom callbacks. -void HGraphBuilder::GenerateCallFunction(int argument_count, int ast_id) { +void HGraphBuilder::GenerateCallFunction(CallRuntime* call) { BAILOUT("inlined runtime function: CallFunction"); } // Fast call to math functions. -void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) { - ASSERT_EQ(2, argument_count); +void HGraphBuilder::GenerateMathPow(CallRuntime* call) { + ASSERT_EQ(2, call->arguments()->length()); + VISIT_FOR_VALUE(call->arguments()->at(0)); + VISIT_FOR_VALUE(call->arguments()->at(1)); HValue* right = Pop(); HValue* left = Pop(); HPower* result = new HPower(left, right); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateMathSin(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::TranscendentalCache, argument_count); + HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::SIN); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateMathCos(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::TranscendentalCache, argument_count); + HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::COS); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) { - ASSERT_EQ(1, argument_count); +void HGraphBuilder::GenerateMathLog(CallRuntime* call) { + ASSERT_EQ(1, call->arguments()->length()); + VisitArgumentList(call->arguments()); + CHECK_BAILOUT; HContext* context = new HContext; AddInstruction(context); - HCallStub* result = - new HCallStub(context, CodeStub::TranscendentalCache, argument_count); + HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::LOG); - PreProcessCall(result); - ast_context()->ReturnInstruction(result, ast_id); + Drop(1); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathSqrt(int argument_count, int ast_id) { +void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) { BAILOUT("inlined runtime function: MathSqrt"); } // Check whether two RegExps are equivalent -void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) { BAILOUT("inlined runtime function: IsRegExpEquivalent"); } -void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count, - int ast_id) { - ASSERT(argument_count == 1); +void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + VISIT_FOR_VALUE(call->arguments()->at(0)); HValue* value = Pop(); HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value); - ast_context()->ReturnInstruction(result, ast_id); + ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count, - int ast_id) { +void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) { BAILOUT("inlined runtime function: FastAsciiArrayJoin"); } @@ -5872,7 +6016,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) { if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister(); trace_.Add(" %d %d", parent_index, hint_index); UseInterval* cur_interval = range->first_interval(); - while (cur_interval != NULL) { + while (cur_interval != NULL && range->Covers(cur_interval->start())) { trace_.Add(" [%d, %d[", cur_interval->start().Value(), cur_interval->end().Value()); @@ -5881,7 +6025,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) { UsePosition* current_pos = range->first_pos(); while (current_pos != NULL) { - if (current_pos->RegisterIsBeneficial()) { + if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) { trace_.Add(" %d M", current_pos->pos().Value()); } current_pos = current_pos->next(); diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index c911b6c1fc..e8c0b0630d 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -196,94 +196,54 @@ class HSubgraph: public ZoneObject { explicit HSubgraph(HGraph* graph) : graph_(graph), entry_block_(NULL), - exit_block_(NULL), - break_continue_info_(4) { + exit_block_(NULL) { } HGraph* graph() const { return graph_; } - HEnvironment* environment() const { - ASSERT(HasExit()); - return exit_block_->last_environment(); + HBasicBlock* entry_block() const { return entry_block_; } + HBasicBlock* exit_block() const { return exit_block_; } + void set_exit_block(HBasicBlock* block) { + exit_block_ = block; } - bool HasExit() const { return exit_block_ != NULL; } - void PreProcessOsrEntry(IterationStatement* statement); - void AppendOptional(HSubgraph* graph, - bool on_true_branch, - HValue* boolean_value); - void AppendJoin(HSubgraph* then_graph, HSubgraph* else_graph, AstNode* node); - void AppendWhile(HSubgraph* condition, - HSubgraph* body, - IterationStatement* statement, - HSubgraph* continue_subgraph, - HSubgraph* exit); - void AppendDoWhile(HSubgraph* body, - IterationStatement* statement, - HSubgraph* go_back, - HSubgraph* exit); - void AppendEndless(HSubgraph* body, IterationStatement* statement); - void Append(HSubgraph* next, BreakableStatement* statement); - void ResolveContinue(IterationStatement* statement); - HBasicBlock* BundleBreak(BreakableStatement* statement); - HBasicBlock* BundleContinue(IterationStatement* statement); - HBasicBlock* BundleBreakContinue(BreakableStatement* statement, - bool is_continue, - int join_id); + void AppendJoin(HBasicBlock* first, HBasicBlock* second, int join_id); + void AppendWhile(IterationStatement* statement, + HBasicBlock* condition_entry, + HBasicBlock* exit_block, + HBasicBlock* body_exit, + HBasicBlock* break_block, + HBasicBlock* loop_entry, + HBasicBlock* loop_exit); + void AppendDoWhile(IterationStatement* statement, + HBasicBlock* body_entry, + HBasicBlock* go_back, + HBasicBlock* exit_block, + HBasicBlock* break_block); + void AppendEndless(IterationStatement* statement, + HBasicBlock* body_entry, + HBasicBlock* body_exit, + HBasicBlock* break_block); + void Append(BreakableStatement* stmt, + HBasicBlock* entry_block, + HBasicBlock* exit_block, + HBasicBlock* break_block); + void ResolveContinue(IterationStatement* statement, + HBasicBlock* continue_block); HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id); void FinishExit(HControlInstruction* instruction); - void FinishBreakContinue(BreakableStatement* target, bool is_continue); void Initialize(HBasicBlock* block) { ASSERT(entry_block_ == NULL); entry_block_ = block; exit_block_ = block; } - HBasicBlock* entry_block() const { return entry_block_; } - HBasicBlock* exit_block() const { return exit_block_; } - void set_exit_block(HBasicBlock* block) { - exit_block_ = block; - } - - void ConnectExitTo(HBasicBlock* other, bool include_stack_check = false) { - if (HasExit()) { - exit_block()->Goto(other, include_stack_check); - } - } - - void AddBreakContinueInfo(HSubgraph* other) { - break_continue_info_.AddAll(other->break_continue_info_); - } protected: - class BreakContinueInfo: public ZoneObject { - public: - BreakContinueInfo(BreakableStatement* target, HBasicBlock* block, - bool is_continue) - : target_(target), block_(block), continue_(is_continue) {} - BreakableStatement* target() const { return target_; } - HBasicBlock* block() const { return block_; } - bool is_continue() const { return continue_; } - bool IsResolved() const { return block_ == NULL; } - void Resolve() { block_ = NULL; } - - private: - BreakableStatement* target_; - HBasicBlock* block_; - bool continue_; - }; - - const ZoneList<BreakContinueInfo*>* break_continue_info() const { - return &break_continue_info_; - } - HGraph* graph_; // The graph this is a subgraph of. HBasicBlock* entry_block_; HBasicBlock* exit_block_; - - private: - ZoneList<BreakContinueInfo*> break_continue_info_; }; @@ -621,6 +581,53 @@ class TestContext: public AstContext { class HGraphBuilder: public AstVisitor { public: + enum BreakType { BREAK, CONTINUE }; + + // A class encapsulating (lazily-allocated) break and continue blocks for + // a breakable statement. Separated from BreakAndContinueScope so that it + // can have a separate lifetime. + class BreakAndContinueInfo BASE_EMBEDDED { + public: + explicit BreakAndContinueInfo(BreakableStatement* target) + : target_(target), break_block_(NULL), continue_block_(NULL) { + } + + BreakableStatement* target() { return target_; } + HBasicBlock* break_block() { return break_block_; } + void set_break_block(HBasicBlock* block) { break_block_ = block; } + HBasicBlock* continue_block() { return continue_block_; } + void set_continue_block(HBasicBlock* block) { continue_block_ = block; } + + private: + BreakableStatement* target_; + HBasicBlock* break_block_; + HBasicBlock* continue_block_; + }; + + // A helper class to maintain a stack of current BreakAndContinueInfo + // structures mirroring BreakableStatement nesting. + class BreakAndContinueScope BASE_EMBEDDED { + public: + BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner) + : info_(info), owner_(owner), next_(owner->break_scope()) { + owner->set_break_scope(this); + } + + ~BreakAndContinueScope() { owner_->set_break_scope(next_); } + + BreakAndContinueInfo* info() { return info_; } + HGraphBuilder* owner() { return owner_; } + BreakAndContinueScope* next() { return next_; } + + // Search the break stack for a break or continue target. + HBasicBlock* Get(BreakableStatement* stmt, BreakType type); + + private: + BreakAndContinueInfo* info_; + HGraphBuilder* owner_; + BreakAndContinueScope* next_; + }; + explicit HGraphBuilder(TypeFeedbackOracle* oracle) : oracle_(oracle), graph_(NULL), @@ -629,16 +636,25 @@ class HGraphBuilder: public AstVisitor { ast_context_(NULL), call_context_(NULL), function_return_(NULL), - inlined_count_(0) { } + inlined_count_(0), + break_scope_(NULL) { + } HGraph* CreateGraph(CompilationInfo* info); // Simple accessors. HGraph* graph() const { return graph_; } HSubgraph* subgraph() const { return current_subgraph_; } + BreakAndContinueScope* break_scope() const { return break_scope_; } + void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; } - HEnvironment* environment() const { return subgraph()->environment(); } - HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); } + HBasicBlock* current_block() const { return subgraph()->exit_block(); } + void set_current_block(HBasicBlock* block) { + subgraph()->set_exit_block(block); + } + HEnvironment* environment() const { + return current_block()->last_environment(); + } // Adding instructions. HInstruction* AddInstruction(HInstruction* instr); @@ -650,8 +666,7 @@ class HGraphBuilder: public AstVisitor { private: // Type of a member function that generates inline code for a native function. - typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count, - int ast_id); + typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call); // Forward declarations for inner scope classes. class SubgraphScope; @@ -675,7 +690,7 @@ class HGraphBuilder: public AstVisitor { // Generators for inline runtime functions. #define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \ - void Generate##Name(int argument_count, int ast_id); + void Generate##Name(CallRuntime* call); INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION) INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION) @@ -684,9 +699,10 @@ class HGraphBuilder: public AstVisitor { void Bailout(const char* reason); void AppendPeeledWhile(IterationStatement* stmt, - HSubgraph* cond_graph, - HSubgraph* body_graph, - HSubgraph* exit_graph); + HBasicBlock* condition_entry, + HBasicBlock* exit_block, + HBasicBlock* body_exit, + HBasicBlock* break_block); void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts); void AddToSubgraph(HSubgraph* graph, Statement* stmt); @@ -702,17 +718,21 @@ class HGraphBuilder: public AstVisitor { HBasicBlock* true_block, HBasicBlock* false_block); - // Visit an argument subexpression. + // Visit an argument subexpression and emit a push to the outgoing + // arguments. void VisitArgument(Expression* expr); void VisitArgumentList(ZoneList<Expression*>* arguments); + // Visit a list of expressions from left to right, each in a value context. + void VisitExpressions(ZoneList<Expression*>* exprs); + void AddPhi(HPhi* phi); void PushAndAdd(HInstruction* instr); // Remove the arguments from the bailout environment and emit instructions // to push them as outgoing parameters. - void PreProcessCall(HCall* call); + template <int V> HInstruction* PreProcessCall(HCall<V>* call); void AssumeRepresentation(HValue* value, Representation r); static Representation ToRepresentation(TypeInfo info); @@ -724,8 +744,6 @@ class HGraphBuilder: public AstVisitor { AST_NODE_LIST(DECLARE_VISIT) #undef DECLARE_VISIT - bool ShouldPeel(HSubgraph* cond, HSubgraph* body); - HBasicBlock* CreateBasicBlock(HEnvironment* env); HSubgraph* CreateEmptySubgraph(); HSubgraph* CreateGotoSubgraph(HEnvironment* env); @@ -816,6 +834,11 @@ class HGraphBuilder: public AstVisitor { HValue* val, Expression* expr); + HInstruction* BuildStoreKeyedPixelArrayElement(HValue* object, + HValue* key, + HValue* val, + Expression* expr); + HCompare* BuildSwitchCompare(HSubgraph* subgraph, HValue* switch_value, CaseClause* clause); @@ -853,6 +876,8 @@ class HGraphBuilder: public AstVisitor { int inlined_count_; + BreakAndContinueScope* break_scope_; + friend class AstContext; // Pushes and pops the AST context stack. DISALLOW_COPY_AND_ASSIGN(HGraphBuilder); diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 568b4d84fe..b60157c752 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -183,13 +183,6 @@ const XMMRegister xmm7 = { 7 }; typedef XMMRegister DoubleRegister; -// Index of register used in pusha/popa. -// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI -inline int EspIndexForPushAll(Register reg) { - return Register::kNumRegisters - 1 - reg.code(); -} - - enum Condition { // any value < 0 is considered no_condition no_condition = -1, @@ -980,6 +973,10 @@ class Assembler : public Malloced { PositionsRecorder* positions_recorder() { return &positions_recorder_; } + int relocation_writer_size() { + return (buffer_ + buffer_size_) - reloc_info_writer.pos(); + } + // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 6331a6e2ea..cb05c38247 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -2385,14 +2385,14 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { - NearLabel call_runtime; + ASSERT(op_ == Token::ADD); + NearLabel left_not_string, call_runtime; // Registers containing left and right operands respectively. Register left = edx; Register right = eax; // Test if left operand is a string. - NearLabel left_not_string; __ test(left, Immediate(kSmiTagMask)); __ j(zero, &left_not_string); __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 02e29191dc..ae544dc63b 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -8234,8 +8234,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); if (variable != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); @@ -8244,7 +8244,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3); frame_->Push(&answer); - return; } else if (slot != NULL && slot->type() == Slot::LOOKUP) { // Call the runtime to delete from the context holding the named @@ -8255,13 +8254,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { frame_->EmitPush(Immediate(variable->name())); Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); frame_->Push(&answer); - return; + } else { + // Default: Result of deleting non-global, not dynamically + // introduced variables is false. + frame_->Push(Factory::false_value()); } - - // Default: Result of deleting non-global, not dynamically - // introduced variables is false. - frame_->Push(Factory::false_value()); - } else { // Default: Result of deleting expressions is true. Load(node->expression()); // may have side-effects diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 322993ee61..5f4d94449a 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -431,14 +431,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast<uint32_t>(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) output_frame->SetRegister(esi.code(), value); if (FLAG_trace_deopt) { diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index a5c94c6bf5..3cdca4c621 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -3743,8 +3743,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } } else if (var != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); if (var->is_global()) { __ push(GlobalObjectOperand()); __ push(Immediate(var->name())); @@ -3782,17 +3782,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index a59b1a5bad..d61ebdc0f6 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -55,7 +55,7 @@ class SafepointGenerator : public PostCallGenerator { // Ensure that we have enough space in the reloc info to patch // this with calls when doing deoptimization. if (ensure_reloc_space_) { - codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true); + codegen_->EnsureRelocSpaceForDeoptimization(); } codegen_->RecordSafepoint(pointers_, deoptimization_index_); } @@ -78,6 +78,7 @@ bool LCodeGen::GenerateCode() { return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && + GenerateRelocPadding() && GenerateSafepointTable(); } @@ -122,6 +123,16 @@ void LCodeGen::Comment(const char* format, ...) { } +bool LCodeGen::GenerateRelocPadding() { + int reloc_size = masm()->relocation_writer_size(); + while (reloc_size < deoptimization_reloc_size.min_size) { + __ RecordComment(RelocInfo::kFillerCommentString, true); + reloc_size += RelocInfo::kRelocCommentSize; + } + return !is_aborted(); +} + + bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -163,6 +174,45 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is still in edi. + __ push(edi); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both eax and esi. It replaces the context + // passed to us. It's saved in the stack and kept live in esi. + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); + + // Copy parameters into context if necessary. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ mov(eax, Operand(ebp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(slot->index()); + __ mov(Operand(esi, context_offset), eax); + // Update the write barrier. This clobbers all involved + // registers, so we have to use a third register to avoid + // clobbering esi. + __ mov(ecx, esi); + __ RecordWrite(ecx, context_offset, eax, ebx); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { // We have not executed any compiled code yet, so esi still holds the @@ -335,6 +385,22 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, } +void LCodeGen::EnsureRelocSpaceForDeoptimization() { + // Since we patch the reloc info with RUNTIME_ENTRY calls every patch + // site will take up 2 bytes + any pc-jumps. + // We are conservative and always reserver 6 bytes in case where a + // simple pc-jump is not enough. + uint32_t pc_delta = + masm()->pc_offset() - deoptimization_reloc_size.last_pc_offset; + if (is_uintn(pc_delta, 6)) { + deoptimization_reloc_size.min_size += 2; + } else { + deoptimization_reloc_size.min_size += 6; + } + deoptimization_reloc_size.last_pc_offset = masm()->pc_offset(); +} + + void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged) { @@ -382,10 +448,13 @@ void LCodeGen::CallCode(Handle<Code> code, ASSERT(instr != NULL); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); + if (!adjusted) { __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); } __ call(code, mode); + + EnsureRelocSpaceForDeoptimization(); RegisterLazyDeoptimization(instr); // Signal that we don't inline smi code before these stubs in the @@ -595,6 +664,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -1836,7 +1911,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label before_push_delta; __ bind(&before_push_delta); __ mov(temp, Immediate(delta)); - __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp); + __ StoreToSafepointRegisterSlot(temp, temp); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ call(stub.GetCode(), RelocInfo::CODE_TARGET); ASSERT_EQ(kAdditionalDelta, @@ -1844,8 +1919,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Put the result value into the eax slot and restore all registers. - __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax); - + __ StoreToSafepointRegisterSlot(eax, eax); __ PopSafepointRegisters(); } @@ -2100,13 +2174,13 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) { - Register external_elements = ToRegister(instr->external_pointer()); + Register external_pointer = ToRegister(instr->external_pointer()); Register key = ToRegister(instr->key()); Register result = ToRegister(instr->result()); - ASSERT(result.is(external_elements)); + ASSERT(result.is(external_pointer)); // Load the result. - __ movzx_b(result, Operand(external_elements, key, times_1, 0)); + __ movzx_b(result, Operand(external_pointer, key, times_1, 0)); } @@ -2301,11 +2375,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, if (*function == *graph()->info()->closure()) { __ CallSelf(); } else { - // This is an indirect call and will not be recorded in the reloc info. - // Add a comment to the reloc info in case we need to patch this during - // deoptimization. - __ RecordComment(RelocInfo::kFillerCommentString, true); __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); + EnsureRelocSpaceForDeoptimization(); } // Setup deoptimization. @@ -2360,7 +2431,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { if (!tmp.is(eax)) __ mov(tmp, eax); // Restore input_reg after call to runtime. - __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize)); + __ LoadFromSafepointRegisterSlot(input_reg, input_reg); __ bind(&allocated); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset)); @@ -2368,7 +2439,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2); __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2); - __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp); + __ StoreToSafepointRegisterSlot(input_reg, tmp); __ bind(&done); __ PopSafepointRegisters(); @@ -2493,11 +2564,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { XMMRegister xmm_scratch = xmm0; XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); - ExternalReference negative_infinity = - ExternalReference::address_of_negative_infinity(); - __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity)); - __ ucomisd(xmm_scratch, input_reg); - DeoptimizeIf(equal, instr->environment()); __ xorpd(xmm_scratch, xmm_scratch); __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. __ sqrtsd(input_reg, input_reg); @@ -2731,6 +2797,25 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } +void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) { + Register external_pointer = ToRegister(instr->external_pointer()); + Register key = ToRegister(instr->key()); + Register value = ToRegister(instr->value()); + ASSERT(ToRegister(instr->TempAt(0)).is(eax)); + + __ mov(eax, value); + { // Clamp the value to [0..255]. + NearLabel done; + __ test(eax, Immediate(0xFFFFFF00)); + __ j(zero, &done); + __ setcc(negative, eax); // 1 if negative, 0 if positive. + __ dec_b(eax); // 0 if negative, 255 if positive. + __ bind(&done); + } + __ mov_b(Operand(external_pointer, key, times_1, 0), eax); +} + + void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register value = ToRegister(instr->value()); Register elements = ToRegister(instr->object()); @@ -2840,19 +2925,20 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { __ test(result, Immediate(kStringRepresentationMask)); __ j(not_zero, deferred->entry()); - // Check for 1-byte or 2-byte string. + // Check for ASCII or two-byte string. __ bind(&flat_string); STATIC_ASSERT(kAsciiStringTag != 0); __ test(result, Immediate(kStringEncodingMask)); __ j(not_zero, &ascii_string); - // 2-byte string. - // Load the 2-byte character code into the result register. + // Two-byte string. + // Load the two-byte character code into the result register. STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); if (instr->index()->IsConstantOperand()) { __ movzx_w(result, FieldOperand(string, - SeqTwoByteString::kHeaderSize + 2 * const_index)); + SeqTwoByteString::kHeaderSize + + (kUC16Size * const_index))); } else { __ movzx_w(result, FieldOperand(string, index, @@ -2908,7 +2994,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ AbortIfNotSmi(eax); } __ SmiUntag(eax); - __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax); + __ StoreToSafepointRegisterSlot(result, eax); __ PopSafepointRegisters(); } @@ -2976,7 +3062,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // TODO(3095996): Put a valid pointer value in the stack slot where the result // register is stored, as this register is in the pointer map, but contains an // integer value. - __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0)); + __ StoreToSafepointRegisterSlot(reg, Immediate(0)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); @@ -2988,7 +3074,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // number. __ bind(&done); __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); - __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg); + __ StoreToSafepointRegisterSlot(reg, reg); __ PopSafepointRegisters(); } @@ -3030,7 +3116,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax); + __ StoreToSafepointRegisterSlot(reg, eax); __ PopSafepointRegisters(); } diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 977fbcda7c..5ba4bc4352 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED { status_(UNUSED), deferred_(8), osr_pc_offset_(-1), + deoptimization_reloc_size(), resolver_(this) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } @@ -102,6 +103,8 @@ class LCodeGen BASE_EMBEDDED { // Emit frame translation commands for an environment. void WriteTranslation(LEnvironment* environment, Translation* translation); + void EnsureRelocSpaceForDeoptimization(); + // Declare methods that deal with the individual node types. #define DECLARE_DO(type) void Do##type(L##type* node); LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -151,6 +154,9 @@ class LCodeGen BASE_EMBEDDED { bool GeneratePrologue(); bool GenerateBody(); bool GenerateDeferredCode(); + // Pad the reloc info to ensure that we have enough space to patch during + // deoptimization. + bool GenerateRelocPadding(); bool GenerateSafepointTable(); void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr, @@ -204,6 +210,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); @@ -251,6 +258,13 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; + struct DeoptimizationRelocSize { + int min_size; + int last_pc_offset; + }; + + DeoptimizationRelocSize deoptimization_reloc_size; + // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. SafepointTableBuilder safepoints_; diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index a57e8c928c..221a7aa154 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -404,7 +404,7 @@ void LChunk::MarkEmptyBlocks() { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -413,7 +413,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -1223,7 +1241,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { case kMathSqrt: return DefineSameAsFirst(result); case kMathPowHalf: - return AssignEnvironment(DefineSameAsFirst(result)); + return DefineSameAsFirst(result); default: UNREACHABLE(); return NULL; @@ -1840,6 +1858,23 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + ASSERT(instr->value()->representation().IsInteger32()); + ASSERT(instr->external_pointer()->representation().IsExternal()); + ASSERT(instr->key()->representation().IsInteger32()); + + LOperand* external_pointer = UseRegister(instr->external_pointer()); + LOperand* val = UseRegister(instr->value()); + LOperand* key = UseRegister(instr->key()); + // The generated code requires that the clamped value is in a byte + // register. eax is an arbitrary choice to satisfy this requirement. + LOperand* clamped = FixedTemp(eax); + + return new LStorePixelArrayElement(external_pointer, key, val, clamped); +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* object = UseFixed(instr->object(), edx); @@ -1923,8 +1958,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - LDeleteProperty* result = new LDeleteProperty(Use(instr->object()), - UseOrConstant(instr->key())); + LDeleteProperty* result = + new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key())); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1957,8 +1992,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index f1b9ffc997..ad0b0ca059 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -150,6 +148,7 @@ class LCodeGen; V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ + V(StorePixelArrayElement) \ V(StringCharCodeAt) \ V(StringLength) \ V(SubI) \ @@ -1580,34 +1579,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 1> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamed(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) { inputs_[0] = obj; inputs_[1] = val; + temps_[0] = temp; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle<Object> name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) - : LStoreNamed(obj, val) { - temps_[0] = temp; - } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle<Object> name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1626,6 +1614,8 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> { DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + virtual void PrintDataTo(StringStream* stream); + LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } @@ -1633,15 +1623,17 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1651,14 +1643,25 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 1> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} + LStorePixelArrayElement(LOperand* external_pointer, + LOperand* key, + LOperand* val, + LOperand* clamped) { + inputs_[0] = external_pointer; + inputs_[1] = key; + inputs_[2] = val; + temps_[0] = clamped; + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) + DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement, + "store-pixel-array-element") + DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement) + + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; @@ -1676,6 +1679,8 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> { DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + virtual void PrintDataTo(StringStream* stream); + LOperand* context() { return inputs_[0]; } LOperand* object() { return inputs_[1]; } LOperand* key() { return inputs_[2]; } diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 7f93b843d4..91b6651fe0 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -1654,6 +1654,28 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, } +// Store the value in register src in the safepoint register stack +// slot for register dst. +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) { + mov(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) { + mov(SafepointRegisterSlot(dst), src); +} + + +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + mov(dst, SafepointRegisterSlot(src)); +} + + +Operand MacroAssembler::SafepointRegisterSlot(Register reg) { + return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { // The registers are pushed starting with the lowest encoding, // which means that lowest encodings are furthest away from diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 16361ad239..62bb0f3634 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -143,7 +143,11 @@ class MacroAssembler: public Assembler { // Push and pop the registers that can hold pointers. void PushSafepointRegisters() { pushad(); } void PopSafepointRegisters() { popad(); } - static int SafepointRegisterStackIndex(int reg_code); + // Store the value in register/immediate src in the safepoint + // register stack slot for register dst. + void StoreToSafepointRegisterSlot(Register dst, Register src); + void StoreToSafepointRegisterSlot(Register dst, Immediate src); + void LoadFromSafepointRegisterSlot(Register dst, Register src); // --------------------------------------------------------------------------- // JavaScript invokes @@ -667,6 +671,15 @@ class MacroAssembler: public Assembler { MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved, Register scratch, bool gc_allowed); + + + // Compute memory operands for safepoint stack slots. + Operand SafepointRegisterSlot(Register reg); + static int SafepointRegisterStackIndex(int reg_code); + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index fdb22acea0..51cc46a08e 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -2204,8 +2204,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a string or a symbol. @@ -2220,8 +2221,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2241,8 +2243,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2586,8 +2589,8 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Compute the cell operand to use. Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell)); if (Serializer::enabled()) { - __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell))); - cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset); + __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); + cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); } // Check that the value in the cell is not the hole. If it is, this diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 968b45d0b6..7482830ae0 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -435,16 +435,25 @@ Object* CallICBase::TryCallAsFunction(Object* object) { } -void CallICBase::ReceiverToObject(Handle<Object> object) { - HandleScope scope; - Handle<Object> receiver(object); +void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee, + Handle<Object> object) { + if (callee->IsJSFunction()) { + Handle<JSFunction> function = Handle<JSFunction>::cast(callee); + if (function->shared()->strict_mode() || function->IsBuiltin()) { + // Do not wrap receiver for strict mode functions or for builtins. + return; + } + } - // Change the receiver to the result of calling ToObject on it. - const int argc = this->target()->arguments_count(); - StackFrameLocator locator; - JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); - int index = frame->ComputeExpressionsCount() - (argc + 1); - frame->SetExpression(index, *Factory::ToObject(object)); + // And only wrap string, number or boolean. + if (object->IsString() || object->IsNumber() || object->IsBoolean()) { + // Change the receiver to the result of calling ToObject on it. + const int argc = this->target()->arguments_count(); + StackFrameLocator locator; + JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); + int index = frame->ComputeExpressionsCount() - (argc + 1); + frame->SetExpression(index, *Factory::ToObject(object)); + } } @@ -458,10 +467,6 @@ MaybeObject* CallICBase::LoadFunction(State state, return TypeError("non_object_property_call", object, name); } - if (object->IsString() || object->IsNumber() || object->IsBoolean()) { - ReceiverToObject(object); - } - // Check if the name is trivially convertible to an index and get // the element if so. uint32_t index; @@ -505,6 +510,7 @@ MaybeObject* CallICBase::LoadFunction(State state, object->GetProperty(*object, &lookup, *name, &attr); if (!maybe_result->ToObject(&result)) return maybe_result; } + if (lookup.type() == INTERCEPTOR) { // If the object does not have the requested property, check which // exception we need to throw. @@ -516,31 +522,37 @@ MaybeObject* CallICBase::LoadFunction(State state, } } - ASSERT(result != Heap::the_hole_value()); + ASSERT(!result->IsTheHole()); - if (result->IsJSFunction()) { + HandleScope scope; + // Wrap result in a handle because ReceiverToObjectIfRequired may allocate + // new object and cause GC. + Handle<Object> result_handle(result); + // Make receiver an object if the callee requires it. Strict mode or builtin + // functions do not wrap the receiver, non-strict functions and objects + // called as functions do. + ReceiverToObjectIfRequired(result_handle, object); + + if (result_handle->IsJSFunction()) { #ifdef ENABLE_DEBUGGER_SUPPORT // Handle stepping into a function if step into is active. if (Debug::StepInActive()) { // Protect the result in a handle as the debugger can allocate and might // cause GC. - HandleScope scope; - Handle<JSFunction> function(JSFunction::cast(result)); + Handle<JSFunction> function(JSFunction::cast(*result_handle)); Debug::HandleStepIn(function, object, fp(), false); return *function; } #endif - return result; + return *result_handle; } // Try to find a suitable function delegate for the object at hand. - result = TryCallAsFunction(result); - MaybeObject* answer = result; - if (!result->IsJSFunction()) { - answer = TypeError("property_not_function", object, name); - } - return answer; + result_handle = Handle<Object>(TryCallAsFunction(*result_handle)); + if (result_handle->IsJSFunction()) return *result_handle; + + return TypeError("property_not_function", object, name); } @@ -565,8 +577,8 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup, case kStringCharAt: if (object->IsString()) { String* string = String::cast(*object); - // Check that there's the right wrapper in the receiver slot. - ASSERT(string == JSValue::cast(args[0])->value()); + // Check there's the right string value or wrapper in the receiver slot. + ASSERT(string == args[0] || string == JSValue::cast(args[0])->value()); // If we're in the default (fastest) state and the index is // out of bounds, update the state to record this fact. if (*extra_ic_state == DEFAULT_STRING_STUB && @@ -775,10 +787,6 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, return TypeError("non_object_property_call", object, key); } - if (object->IsString() || object->IsNumber() || object->IsBoolean()) { - ReceiverToObject(object); - } - if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) { int argc = target()->arguments_count(); InLoopFlag in_loop = target()->ic_in_loop(); @@ -793,17 +801,20 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, #endif } } - Object* result; - { MaybeObject* maybe_result = Runtime::GetObjectProperty(object, key); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - if (result->IsJSFunction()) return result; - result = TryCallAsFunction(result); - MaybeObject* answer = result; - if (!result->IsJSFunction()) { - answer = TypeError("property_not_function", object, key); - } - return answer; + + HandleScope scope; + Handle<Object> result = GetProperty(object, key); + + // Make receiver an object if the callee requires it. Strict mode or builtin + // functions do not wrap the receiver, non-strict functions and objects + // called as functions do. + ReceiverToObjectIfRequired(result, object); + + if (result->IsJSFunction()) return *result; + result = Handle<Object>(TryCallAsFunction(*result)); + if (result->IsJSFunction()) return *result; + + return TypeError("property_not_function", object, key); } diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 3b10d064f6..96838c7338 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -224,7 +224,7 @@ class CallICBase: public IC { // Otherwise, it returns the undefined value. Object* TryCallAsFunction(Object* object); - void ReceiverToObject(Handle<Object> object); + void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object); static void Clear(Address address, Code* target); friend class IC; diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 9f5f1b97d8..a13a189779 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -478,11 +478,6 @@ void LiveRange::ConvertOperands() { } -UsePosition* LiveRange::AddUsePosition(LifetimePosition pos) { - return AddUsePosition(pos, CreateAssignedOperand()); -} - - bool LiveRange::CanCover(LifetimePosition position) const { if (IsEmpty()) return false; return Start().Value() <= position.Value() && @@ -1098,6 +1093,21 @@ void LAllocator::ResolveControlFlow(LiveRange* range, } else { ASSERT(pred->end()->SecondSuccessor() == NULL); gap = GetLastGap(pred); + + // We are going to insert a move before the branch instruction. + // Some branch instructions (e.g. loops' back edges) + // can potentially cause a GC so they have a pointer map. + // By insterting a move we essentially create a copy of a + // value which is invisible to PopulatePointerMaps(), because we store + // it into a location different from the operand of a live range + // covering a branch instruction. + // Thus we need to manually record a pointer. + if (HasTaggedValue(range->id())) { + LInstruction* branch = InstructionAt(pred->last_instruction_index()); + if (branch->HasPointerMap()) { + branch->pointer_map()->RecordPointer(cur_op); + } + } } gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op); } diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h index 914a5b6868..d53ea78718 100644 --- a/deps/v8/src/lithium-allocator.h +++ b/deps/v8/src/lithium-allocator.h @@ -286,7 +286,6 @@ class LiveRange: public ZoneObject { LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; } LiveRange* next() const { return next_; } bool IsChild() const { return parent() != NULL; } - bool IsParent() const { return parent() == NULL; } int id() const { return id_; } bool IsFixed() const { return id_ < 0; } bool IsEmpty() const { return first_interval() == NULL; } @@ -360,7 +359,6 @@ class LiveRange: public ZoneObject { void EnsureInterval(LifetimePosition start, LifetimePosition end); void AddUseInterval(LifetimePosition start, LifetimePosition end); UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand); - UsePosition* AddUsePosition(LifetimePosition pos); // Shorten the most recently added interval by setting a new start. void ShortenTo(LifetimePosition start); diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index a2f9df0fdf..d85a87c12e 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -536,10 +536,12 @@ class ShallowIterator BASE_EMBEDDED { inline LEnvironment* env() { return env_; } private: + inline bool ShouldSkip(LOperand* op) { + return op == NULL || op->IsConstantOperand() || op->IsArgument(); + } + inline int AdvanceToNext(int start) { - while (start < limit_ && - (env_->values()->at(start) == NULL || - env_->values()->at(start)->IsConstantOperand())) { + while (start < limit_ && ShouldSkip(env_->values()->at(start))) { start++; } return start; diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc index c7b75679ea..9a498ec0ff 100644 --- a/deps/v8/src/log-utils.cc +++ b/deps/v8/src/log-utils.cc @@ -300,6 +300,8 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) { Append("\\,"); } else if (c == '\\') { Append("\\\\"); + } else if (c == '\"') { + Append("\"\""); } else { Append("%lc", c); } diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 6eb3c9b0ba..16aeadb0b0 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -147,7 +147,7 @@ bool Profiler::paused_ = false; // StackTracer implementation // void StackTracer::Trace(TickSample* sample) { - sample->function = NULL; + sample->tos = NULL; sample->frames_count = 0; // Avoid collecting traces while doing GC. @@ -159,15 +159,9 @@ void StackTracer::Trace(TickSample* sample) { return; } - const Address function_address = - sample->fp + JavaScriptFrameConstants::kFunctionOffset; - if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp, - function_address)) { - Object* object = Memory::Object_at(function_address); - if (object->IsHeapObject()) { - sample->function = HeapObject::cast(object)->address(); - } - } + // Sample potential return address value for frameless invocation of + // stubs (we'll figure out later, if this value makes sense). + sample->tos = Memory::Address_at(sample->sp); int i = 0; const Address callback = Top::external_callback(); @@ -181,10 +175,7 @@ void StackTracer::Trace(TickSample* sample) { SafeStackTraceFrameIterator it(sample->fp, sample->sp, sample->sp, js_entry_sp); while (!it.done() && i < TickSample::kMaxFramesCount) { - Object* object = it.frame()->function_slot_object(); - if (object->IsHeapObject()) { - sample->stack[i++] = HeapObject::cast(object)->address(); - } + sample->stack[i++] = it.frame()->pc(); it.Advance(); } sample->frames_count = i; @@ -710,17 +701,6 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) { } -#ifdef ENABLE_LOGGING_AND_PROFILING -static const char* ComputeMarker(Code* code) { - switch (code->kind()) { - case Code::FUNCTION: return code->optimizable() ? "~" : ""; - case Code::OPTIMIZED_FUNCTION: return "*"; - default: return ""; - } -} -#endif - - void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, const char* comment) { @@ -731,7 +711,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[tag]); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code)); + msg.Append(",%d,\"", code->ExecutableSize()); for (const char* p = comment; *p != '\0'; p++) { if (*p == '"') { msg.Append('\\'); @@ -746,9 +726,40 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, } -void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { +void Logger::CodeCreateEvent(LogEventsAndTags tag, + Code* code, + String* name) { +#ifdef ENABLE_LOGGING_AND_PROFILING + if (name != NULL) { + SmartPointer<char> str = + name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + CodeCreateEvent(tag, code, *str); + } else { + CodeCreateEvent(tag, code, ""); + } +#endif +} + + +#ifdef ENABLE_LOGGING_AND_PROFILING +// ComputeMarker must only be used when SharedFunctionInfo is known. +static const char* ComputeMarker(Code* code) { + switch (code->kind()) { + case Code::FUNCTION: return code->optimizable() ? "~" : ""; + case Code::OPTIMIZED_FUNCTION: return "*"; + default: return ""; + } +} +#endif + + +void Logger::CodeCreateEvent(LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* name) { #ifdef ENABLE_LOGGING_AND_PROFILING if (!Log::IsEnabled() || !FLAG_log_code) return; + if (code == Builtins::builtin(Builtins::LazyCompile)) return; LogMessageBuilder msg; SmartPointer<char> str = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); @@ -756,7 +767,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[tag]); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str); + msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str); + msg.AppendAddress(shared->address()); + msg.Append(",%s", ComputeMarker(code)); LowLevelCodeCreateEvent(code, &msg); msg.Append('\n'); msg.WriteToLogFile(); @@ -764,26 +777,31 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { } +// Although, it is possible to extract source and line from +// the SharedFunctionInfo object, we left it to caller +// to leave logging functions free from heap allocations. void Logger::CodeCreateEvent(LogEventsAndTags tag, - Code* code, String* name, + Code* code, + SharedFunctionInfo* shared, String* source, int line) { #ifdef ENABLE_LOGGING_AND_PROFILING if (!Log::IsEnabled() || !FLAG_log_code) return; LogMessageBuilder msg; - SmartPointer<char> str = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + SmartPointer<char> name = + shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); SmartPointer<char> sourcestr = source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); msg.Append("%s,%s,", kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[tag]); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s%s %s:%d\"", + msg.Append(",%d,\"%s %s:%d\",", code->ExecutableSize(), - ComputeMarker(code), - *str, + *name, *sourcestr, line); + msg.AppendAddress(shared->address()); + msg.Append(",%s", ComputeMarker(code)); LowLevelCodeCreateEvent(code, &msg); msg.Append('\n'); msg.WriteToLogFile(); @@ -863,42 +881,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) { } -void Logger::FunctionCreateEvent(JSFunction* function) { -#ifdef ENABLE_LOGGING_AND_PROFILING - // This function can be called from GC iterators (during Scavenge, - // MC, and MS), so marking bits can be set on objects. That's - // why unchecked accessors are used here. - if (!Log::IsEnabled() || !FLAG_log_code) return; - LogMessageBuilder msg; - msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]); - msg.AppendAddress(function->address()); - msg.Append(','); - msg.AppendAddress(function->unchecked_code()->address()); - msg.Append('\n'); - msg.WriteToLogFile(); -#endif -} - - -void Logger::FunctionCreateEventFromMove(JSFunction* function) { -#ifdef ENABLE_LOGGING_AND_PROFILING - if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) { - FunctionCreateEvent(function); - } -#endif -} - - -void Logger::FunctionMoveEvent(Address from, Address to) { +void Logger::SFIMoveEvent(Address from, Address to) { #ifdef ENABLE_LOGGING_AND_PROFILING - MoveEventInternal(FUNCTION_MOVE_EVENT, from, to); -#endif -} - - -void Logger::FunctionDeleteEvent(Address from) { -#ifdef ENABLE_LOGGING_AND_PROFILING - DeleteEventInternal(FUNCTION_DELETE_EVENT, from); + MoveEventInternal(SFI_MOVE_EVENT, from, to); #endif } @@ -1118,7 +1103,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) { msg.Append(','); msg.AppendAddress(sample->sp); msg.Append(','); - msg.AppendAddress(sample->function); + msg.AppendAddress(sample->tos); msg.Append(",%d", static_cast<int>(sample->state)); if (overflow) { msg.Append(",overflow"); @@ -1187,7 +1172,6 @@ void Logger::ResumeProfiler(int flags, int tag) { LOG(UncheckedStringEvent("profiler", "resume")); FLAG_log_code = true; LogCompiledFunctions(); - LogFunctionObjects(); LogAccessorCallbacks(); if (!FLAG_sliding_state_window && !ticker_->IsActive()) { ticker_->Start(); @@ -1388,10 +1372,9 @@ void Logger::LogCompiledFunctions() { // During iteration, there can be heap allocation due to // GetScriptLineNumber call. for (int i = 0; i < compiled_funcs_count; ++i) { + if (*code_objects[i] == Builtins::builtin(Builtins::LazyCompile)) continue; Handle<SharedFunctionInfo> shared = sfis[i]; - Handle<String> name(String::cast(shared->name())); - Handle<String> func_name(name->length() > 0 ? - *name : shared->inferred_name()); + Handle<String> func_name(shared->DebugName()); if (shared->script()->IsScript()) { Handle<Script> script(Script::cast(shared->script())); if (script->name()->IsString()) { @@ -1400,18 +1383,18 @@ void Logger::LogCompiledFunctions() { if (line_num > 0) { PROFILE(CodeCreateEvent( Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script), - *code_objects[i], *func_name, + *code_objects[i], *shared, *script_name, line_num + 1)); } else { // Can't distinguish eval and script here, so always use Script. PROFILE(CodeCreateEvent( Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), - *code_objects[i], *script_name)); + *code_objects[i], *shared, *script_name)); } } else { PROFILE(CodeCreateEvent( Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script), - *code_objects[i], *func_name)); + *code_objects[i], *shared, *func_name)); } } else if (shared->IsApiFunction()) { // API function. @@ -1425,24 +1408,12 @@ void Logger::LogCompiledFunctions() { } } else { PROFILE(CodeCreateEvent( - Logger::LAZY_COMPILE_TAG, *code_objects[i], *func_name)); + Logger::LAZY_COMPILE_TAG, *code_objects[i], *shared, *func_name)); } } } -void Logger::LogFunctionObjects() { - AssertNoAllocation no_alloc; - HeapIterator iterator; - for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { - if (!obj->IsJSFunction()) continue; - JSFunction* jsf = JSFunction::cast(obj); - if (!jsf->is_compiled()) continue; - PROFILE(FunctionCreateEvent(jsf)); - } -} - - void Logger::LogAccessorCallbacks() { AssertNoAllocation no_alloc; HeapIterator iterator; diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index 771709c8a1..a808cd1d4c 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -91,9 +91,7 @@ class LogMessageBuilder; V(CODE_MOVE_EVENT, "code-move") \ V(CODE_DELETE_EVENT, "code-delete") \ V(CODE_MOVING_GC, "code-moving-gc") \ - V(FUNCTION_CREATION_EVENT, "function-creation") \ - V(FUNCTION_MOVE_EVENT, "function-move") \ - V(FUNCTION_DELETE_EVENT, "function-delete") \ + V(SFI_MOVE_EVENT, "sfi-move") \ V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \ V(TICK_EVENT, "tick") \ V(REPEAT_META_EVENT, "repeat") \ @@ -205,8 +203,15 @@ class Logger { // Emits a code create event. static void CodeCreateEvent(LogEventsAndTags tag, Code* code, const char* source); - static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name); - static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name, + static void CodeCreateEvent(LogEventsAndTags tag, + Code* code, String* name); + static void CodeCreateEvent(LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* name); + static void CodeCreateEvent(LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, String* source, int line); static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count); static void CodeMovingGCEvent(); @@ -216,13 +221,8 @@ class Logger { static void CodeMoveEvent(Address from, Address to); // Emits a code delete event. static void CodeDeleteEvent(Address from); - // Emits a function object create event. - static void FunctionCreateEvent(JSFunction* function); - static void FunctionCreateEventFromMove(JSFunction* function); - // Emits a function move event. - static void FunctionMoveEvent(Address from, Address to); - // Emits a function delete event. - static void FunctionDeleteEvent(Address from); + + static void SFIMoveEvent(Address from, Address to); static void SnapshotPositionEvent(Address addr, int pos); @@ -273,8 +273,6 @@ class Logger { // Logs all compiled functions found in the heap. static void LogCompiledFunctions(); - // Logs all compiled JSFunction objects found in the heap. - static void LogFunctionObjects(); // Logs all accessor callbacks found in the heap. static void LogAccessorCallbacks(); // Used for logging stubs found in the snapshot. diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 5c649d17d5..a3b769a8bd 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -2819,9 +2819,8 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj, ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsJSFunction()) { - PROFILE(FunctionMoveEvent(old_addr, new_addr)); - PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to))); + if (copied_to->IsSharedFunctionInfo()) { + PROFILE(SFIMoveEvent(old_addr, new_addr)); } HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); @@ -2912,9 +2911,8 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) { #endif HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsJSFunction()) { - PROFILE(FunctionMoveEvent(old_addr, new_addr)); - PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to))); + if (copied_to->IsSharedFunctionInfo()) { + PROFILE(SFIMoveEvent(old_addr, new_addr)); } HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); @@ -2931,8 +2929,6 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { #ifdef ENABLE_LOGGING_AND_PROFILING if (obj->IsCode()) { PROFILE(CodeDeleteEvent(obj->address())); - } else if (obj->IsJSFunction()) { - PROFILE(FunctionDeleteEvent(obj->address())); } #endif } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index e0232d5873..2d100529ea 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -2813,6 +2813,12 @@ bool JSObject::ReferencesObject(Object* obj) { MaybeObject* JSObject::PreventExtensions() { + if (IsAccessCheckNeeded() && + !Top::MayNamedAccess(this, Heap::undefined_value(), v8::ACCESS_KEYS)) { + Top::ReportFailedAccessCheck(this, v8::ACCESS_KEYS); + return Heap::false_value(); + } + if (IsJSGlobalProxy()) { Object* proto = GetPrototype(); if (proto->IsNull()) return this; @@ -6660,7 +6666,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) { break; } case PIXEL_ELEMENTS: { - // TODO(iposva): Add testcase. PixelArray* pixels = PixelArray::cast(elements()); if (index < static_cast<uint32_t>(pixels->length())) { return true; @@ -6674,7 +6679,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) { case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS: { - // TODO(kbr): Add testcase. ExternalArray* array = ExternalArray::cast(elements()); if (index < static_cast<uint32_t>(array->length())) { return true; @@ -7265,11 +7269,7 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver, } break; } - case PIXEL_ELEMENTS: { - // TODO(iposva): Add testcase and implement. - UNIMPLEMENTED(); - break; - } + case PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: case EXTERNAL_SHORT_ELEMENTS: @@ -7277,8 +7277,8 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver, case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS: { - // TODO(kbr): Add testcase and implement. - UNIMPLEMENTED(); + MaybeObject* value = GetExternalElement(index); + if (!value->ToObjectUnchecked()->IsUndefined()) return value; break; } case DICTIONARY_ELEMENTS: { @@ -7366,6 +7366,46 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver, } break; } + case PIXEL_ELEMENTS: + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: { + MaybeObject* value = GetExternalElement(index); + if (!value->ToObjectUnchecked()->IsUndefined()) return value; + break; + } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + Object* element = dictionary->ValueAt(entry); + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.type() == CALLBACKS) { + return GetElementWithCallback(receiver, + element, + index, + this); + } + return element; + } + break; + } + } + + Object* pt = GetPrototype(); + if (pt == Heap::null_value()) return Heap::undefined_value(); + return pt->GetElementWithReceiver(receiver, index); +} + + +MaybeObject* JSObject::GetExternalElement(uint32_t index) { + // Get element works for both JSObject and JSArray since + // JSArray::length cannot change. + switch (GetElementsKind()) { case PIXEL_ELEMENTS: { PixelArray* pixels = PixelArray::cast(elements()); if (index < static_cast<uint32_t>(pixels->length())) { @@ -7433,27 +7473,12 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver, } break; } - case DICTIONARY_ELEMENTS: { - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - Object* element = dictionary->ValueAt(entry); - PropertyDetails details = dictionary->DetailsAt(entry); - if (details.type() == CALLBACKS) { - return GetElementWithCallback(receiver, - element, - index, - this); - } - return element; - } + case FAST_ELEMENTS: + case DICTIONARY_ELEMENTS: + UNREACHABLE(); break; - } } - - Object* pt = GetPrototype(); - if (pt == Heap::null_value()) return Heap::undefined_value(); - return pt->GetElementWithReceiver(receiver, index); + return Heap::undefined_value(); } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index d6349e66eb..fbfc5fdc25 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -1549,6 +1549,11 @@ class JSObject: public HeapObject { MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index); MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index); + // Get external element value at index if there is one and undefined + // otherwise. Can return a failure if allocation of a heap number + // failed. + MaybeObject* GetExternalElement(uint32_t index); + MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity, int length); MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length); diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc index f5fc5be070..a7cc5256f5 100644 --- a/deps/v8/src/platform-cygwin.cc +++ b/deps/v8/src/platform-cygwin.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -71,7 +71,7 @@ void OS::Setup() { uint64_t OS::CpuFeaturesImpliedByPlatform() { - return 0; // Nothing special about cygwin + return 0; // Nothing special about Cygwin. } @@ -209,7 +209,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); @@ -304,12 +304,12 @@ void OS::LogSharedLibraryAddresses() { void OS::SignalCodeMovingGC() { - // Nothing to do on Cygwin + // Nothing to do on Cygwin. } int OS::StackWalk(Vector<OS::StackFrame> frames) { - // Not supported on Cygwin + // Not supported on Cygwin. return 0; } @@ -443,17 +443,36 @@ void Thread::Join() { } +static inline Thread::LocalStorageKey PthreadKeyToLocalKey( + pthread_key_t pthread_key) { + // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps + // because pthread_key_t is a pointer type on Cygwin. This will probably not + // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway. + STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); + intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key); + return static_cast<Thread::LocalStorageKey>(ptr_key); +} + + +static inline pthread_key_t LocalKeyToPthreadKey( + Thread::LocalStorageKey local_key) { + STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t)); + intptr_t ptr_key = static_cast<intptr_t>(local_key); + return reinterpret_cast<pthread_key_t>(ptr_key); +} + + Thread::LocalStorageKey Thread::CreateThreadLocalKey() { pthread_key_t key; int result = pthread_key_create(&key, NULL); USE(result); ASSERT(result == 0); - return static_cast<LocalStorageKey>(key); + return PthreadKeyToLocalKey(key); } void Thread::DeleteThreadLocalKey(LocalStorageKey key) { - pthread_key_t pthread_key = static_cast<pthread_key_t>(key); + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); int result = pthread_key_delete(pthread_key); USE(result); ASSERT(result == 0); @@ -461,13 +480,13 @@ void Thread::DeleteThreadLocalKey(LocalStorageKey key) { void* Thread::GetThreadLocal(LocalStorageKey key) { - pthread_key_t pthread_key = static_cast<pthread_key_t>(key); + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); return pthread_getspecific(pthread_key); } void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - pthread_key_t pthread_key = static_cast<pthread_key_t>(key); + pthread_key_t pthread_key = LocalKeyToPthreadKey(key); pthread_setspecific(pthread_key, value); } @@ -594,7 +613,7 @@ Semaphore* OS::CreateSemaphore(int count) { // ---------------------------------------------------------------------------- // Cygwin profiler support. // -// On cygwin we use the same sampler implementation as on win32 +// On Cygwin we use the same sampler implementation as on win32. class Sampler::PlatformData : public Malloced { public: @@ -698,8 +717,7 @@ void Sampler::Start() { // Start sampler thread. DWORD tid; SetActive(true); - data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, - &tid); + data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid); // Set thread to high priority to increase sampling accuracy. SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL); } diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index c18049fec7..21763b5de9 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -224,7 +224,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index 761ff7e207..733956aceb 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -327,7 +327,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index ea35c1b130..35724c3525 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -205,7 +205,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index 0002dd7620..e2796294a6 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -222,7 +222,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index 556e26be21..ebe0475f4d 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -235,7 +235,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile { OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { - FILE* file = fopen(name, "w+"); + FILE* file = fopen(name, "r+"); if (file == NULL) return NULL; fseek(file, 0, SEEK_END); diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index b5a85f6689..f24994b5b8 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -939,7 +939,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { // Open a physical file HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); - if (file == NULL) return NULL; + if (file == INVALID_HANDLE_VALUE) return NULL; int size = static_cast<int>(GetFileSize(file, NULL)); diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index 8cb1561c5d..88825e6457 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -383,14 +383,10 @@ class Thread: public ThreadHandle { // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified // to ensure that enumeration type has correct value range (see Issue 830 for // more details). -#ifdef __CYGWIN__ - typedef void* LocalStorageKey; -#else enum LocalStorageKey { LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt, LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt }; -#endif // Create new thread. Thread(); @@ -571,13 +567,13 @@ class TickSample { pc(NULL), sp(NULL), fp(NULL), - function(NULL), + tos(NULL), frames_count(0) {} StateTag state; // The state of the VM. - Address pc; // Instruction pointer. - Address sp; // Stack pointer. - Address fp; // Frame pointer. - Address function; // The last called JS function. + Address pc; // Instruction pointer. + Address sp; // Stack pointer. + Address fp; // Frame pointer. + Address tos; // Top stack value (*sp). static const int kMaxFramesCount = 64; Address stack[kMaxFramesCount]; // Call stack. int frames_count; // Number of captured frames. diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h index 3df6af06f6..4bcfa9b1ec 100644 --- a/deps/v8/src/profile-generator-inl.h +++ b/deps/v8/src/profile-generator-inl.h @@ -45,16 +45,6 @@ const char* StringsStorage::GetFunctionName(const char* name) { } -CodeEntry::CodeEntry(int security_token_id) - : tag_(Logger::FUNCTION_TAG), - name_prefix_(kEmptyNamePrefix), - name_(""), - resource_name_(""), - line_number_(0), - security_token_id_(security_token_id) { -} - - CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name_prefix, const char* name, @@ -66,6 +56,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, name_(name), resource_name_(resource_name), line_number_(line_number), + shared_id_(0), security_token_id_(security_token_id) { } diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index 06ee333b90..261b3d6ff0 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -156,13 +156,18 @@ void CodeEntry::CopyData(const CodeEntry& source) { uint32_t CodeEntry::GetCallUid() const { uint32_t hash = ComputeIntegerHash(tag_); - hash ^= ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_))); - hash ^= ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_))); - hash ^= ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_))); - hash ^= ComputeIntegerHash(line_number_); + if (shared_id_ != 0) { + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(shared_id_)); + } else { + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_))); + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_))); + hash ^= ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_))); + hash ^= ComputeIntegerHash(line_number_); + } return hash; } @@ -170,10 +175,12 @@ uint32_t CodeEntry::GetCallUid() const { bool CodeEntry::IsSameAs(CodeEntry* entry) const { return this == entry || (tag_ == entry->tag_ - && name_prefix_ == entry->name_prefix_ - && name_ == entry->name_ - && resource_name_ == entry->resource_name_ - && line_number_ == entry->line_number_); + && shared_id_ == entry->shared_id_ + && (shared_id_ != 0 + || (name_prefix_ == entry->name_prefix_ + && name_ == entry->name_ + && resource_name_ == entry->resource_name_ + && line_number_ == entry->line_number_))); } @@ -458,23 +465,12 @@ void CpuProfile::Print() { } +CodeEntry* const CodeMap::kSfiCodeEntry = NULL; const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL; const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue = CodeMap::CodeEntryInfo(NULL, 0); -void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) { - CodeTree::Locator locator; - if (tree_.Find(code_start, &locator)) { - const CodeEntryInfo& code_info = locator.value(); - if (tree_.Insert(start, &locator)) { - entry->CopyData(*code_info.entry); - locator.set_value(CodeEntryInfo(entry, code_info.size)); - } - } -} - - CodeEntry* CodeMap::FindEntry(Address addr) { CodeTree::Locator locator; if (tree_.FindGreatestLessThan(addr, &locator)) { @@ -487,6 +483,22 @@ CodeEntry* CodeMap::FindEntry(Address addr) { } +int CodeMap::GetSFITag(Address addr) { + CodeTree::Locator locator; + // For SFI entries, 'size' field is used to store their IDs. + if (tree_.Find(addr, &locator)) { + const CodeEntryInfo& entry = locator.value(); + ASSERT(entry.entry == kSfiCodeEntry); + return entry.size; + } else { + tree_.Insert(addr, &locator); + int tag = next_sfi_tag_++; + locator.set_value(CodeEntryInfo(kSfiCodeEntry, tag)); + return tag; + } +} + + void CodeMap::CodeTreePrinter::Call( const Address& key, const CodeMap::CodeEntryInfo& value) { OS::Print("%p %5d %s\n", key, value.size, value.entry->name()); @@ -715,13 +727,6 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, } -CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) { - CodeEntry* entry = new CodeEntry(security_token_id); - code_entries_.Add(entry); - return entry; -} - - void CpuProfilesCollection::AddPathToCurrentProfiles( const Vector<CodeEntry*>& path) { // As starting / stopping profiles is rare relatively to this @@ -784,19 +789,10 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) { if (sample.pc != NULL) { *entry++ = code_map_.FindEntry(sample.pc); - if (sample.function != NULL) { - *entry = code_map_.FindEntry(sample.function); + if (sample.tos != NULL) { + *entry = code_map_.FindEntry(sample.tos); if (*entry != NULL && !(*entry)->is_js_function()) { *entry = NULL; - } else { - CodeEntry* pc_entry = *entries.start(); - if (pc_entry == NULL) { - *entry = NULL; - } else if (pc_entry->is_js_function()) { - // Use function entry in favor of pc entry, as function - // entry has security token. - *entries.start() = NULL; - } } entry++; } diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index cacd27eaab..748714dc4d 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -88,7 +88,6 @@ class StringsStorage { class CodeEntry { public: - explicit INLINE(CodeEntry(int security_token_id)); // CodeEntry doesn't own name strings, just references them. INLINE(CodeEntry(Logger::LogEventsAndTags tag, const char* name_prefix, @@ -103,6 +102,8 @@ class CodeEntry { INLINE(const char* name() const) { return name_; } INLINE(const char* resource_name() const) { return resource_name_; } INLINE(int line_number() const) { return line_number_; } + INLINE(int shared_id() const) { return shared_id_; } + INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; } INLINE(int security_token_id() const) { return security_token_id_; } INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag)); @@ -119,6 +120,7 @@ class CodeEntry { const char* name_; const char* resource_name_; int line_number_; + int shared_id_; int security_token_id_; DISALLOW_COPY_AND_ASSIGN(CodeEntry); @@ -234,12 +236,12 @@ class CpuProfile { class CodeMap { public: - CodeMap() { } + CodeMap() : next_sfi_tag_(1) { } INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size)); INLINE(void MoveCode(Address from, Address to)); INLINE(void DeleteCode(Address addr)); - void AddAlias(Address start, CodeEntry* entry, Address code_start); CodeEntry* FindEntry(Address addr); + int GetSFITag(Address addr); void Print(); @@ -267,7 +269,11 @@ class CodeMap { void Call(const Address& key, const CodeEntryInfo& value); }; + // Fake CodeEntry pointer to distinguish SFI entries. + static CodeEntry* const kSfiCodeEntry; + CodeTree tree_; + int next_sfi_tag_; DISALLOW_COPY_AND_ASSIGN(CodeMap); }; diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 48ff69f5d2..5a443efc3d 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -1051,6 +1051,12 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) { // Fall-through and introduce the absent property by using // SetProperty. } else { + // For const properties, we treat a callback with this name + // even in the prototype as a conflicting declaration. + if (is_const_property && (lookup.type() == CALLBACKS)) { + return ThrowRedeclarationError("const", name); + } + // Otherwise, we check for locally conflicting declarations. if (is_local && (is_read_only || is_const_property)) { const char* type = (is_read_only) ? "const" : "var"; return ThrowRedeclarationError(type, name); @@ -1076,29 +1082,34 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) { ? static_cast<PropertyAttributes>(base | READ_ONLY) : base; - if (lookup.IsProperty()) { - // There's a local property that we need to overwrite because - // we're either declaring a function or there's an interceptor - // that claims the property is absent. - - // Check for conflicting re-declarations. We cannot have - // conflicting types in case of intercepted properties because - // they are absent. - if (lookup.type() != INTERCEPTOR && - (lookup.IsReadOnly() || is_const_property)) { - const char* type = (lookup.IsReadOnly()) ? "const" : "var"; - return ThrowRedeclarationError(type, name); - } - RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes)); + // There's a local property that we need to overwrite because + // we're either declaring a function or there's an interceptor + // that claims the property is absent. + // + // Check for conflicting re-declarations. We cannot have + // conflicting types in case of intercepted properties because + // they are absent. + if (lookup.IsProperty() && + (lookup.type() != INTERCEPTOR) && + (lookup.IsReadOnly() || is_const_property)) { + const char* type = (lookup.IsReadOnly()) ? "const" : "var"; + return ThrowRedeclarationError(type, name); + } + + // Safari does not allow the invocation of callback setters for + // function declarations. To mimic this behavior, we do not allow + // the invocation of setters for function values. This makes a + // difference for global functions with the same names as event + // handlers such as "function onload() {}". Firefox does call the + // onload setter in those case and Safari does not. We follow + // Safari for compatibility. + if (value->IsJSFunction()) { + RETURN_IF_EMPTY_HANDLE(SetLocalPropertyIgnoreAttributes(global, + name, + value, + attributes)); } else { - // If a property with this name does not already exist on the - // global object add the property locally. We take special - // precautions to always add it as a local property even in case - // of callbacks in the prototype chain (this rules out using - // SetProperty). Also, we must use the handle-based version to - // avoid GC issues. - RETURN_IF_EMPTY_HANDLE( - SetLocalPropertyIgnoreAttributes(global, name, value, attributes)); + RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes)); } } @@ -1186,6 +1197,20 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) { ASSERT(!context_ext->HasLocalProperty(*name)); Handle<Object> value(Heap::undefined_value()); if (*initial_value != NULL) value = initial_value; + // Declaring a const context slot is a conflicting declaration if + // there is a callback with that name in a prototype. It is + // allowed to introduce const variables in + // JSContextExtensionObjects. They are treated specially in + // SetProperty and no setters are invoked for those since they are + // not real JSObjects. + if (initial_value->IsTheHole() && + !context_ext->IsJSContextExtensionObject()) { + LookupResult lookup; + context_ext->Lookup(*name, &lookup); + if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) { + return ThrowRedeclarationError("const", name); + } + } RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode)); } @@ -1212,11 +1237,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) { // there, there is a property with this name in the prototype chain. // We follow Safari and Firefox behavior and only set the property // locally if there is an explicit initialization value that we have - // to assign to the property. When adding the property we take - // special precautions to always add it as a local property even in - // case of callbacks in the prototype chain (this rules out using - // SetProperty). We have SetLocalPropertyIgnoreAttributes for - // this. + // to assign to the property. // Note that objects can have hidden prototypes, so we need to traverse // the whole chain of hidden prototypes to do a 'local' lookup. JSObject* real_holder = global; @@ -1277,11 +1298,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) { } global = Top::context()->global(); - if (assign) { - return global->SetLocalPropertyIgnoreAttributes(*name, - args[1], - attributes); - } + if (assign) return global->SetProperty(*name, args[1], attributes); return Heap::undefined_value(); } @@ -3673,6 +3690,8 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) { is_element) { // Normalize the elements to enable attributes on the property. if (js_object->IsJSGlobalProxy()) { + // We do not need to do access checks here since these has already + // been performed by the call to GetOwnProperty. Handle<Object> proto(js_object->GetPrototype()); // If proxy is detached, ignore the assignment. Alternatively, // we could throw an exception. @@ -6927,6 +6946,7 @@ static MaybeObject* Runtime_NewObject(Arguments args) { bool first_allocation = !shared->live_objects_may_exist(); Handle<JSObject> result = Factory::NewJSObject(function); + RETURN_IF_EMPTY_HANDLE(result); // Delay setting the stub if inobject slack tracking is in progress. if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) { TrySettingInlineConstructStub(function); diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc index 83d7de3afe..78db26a509 100644 --- a/deps/v8/src/top.cc +++ b/deps/v8/src/top.cc @@ -735,9 +735,8 @@ Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) { bool can_be_caught_externally = false; ShouldReportException(&can_be_caught_externally, is_catchable_by_javascript(exception)); - if (can_be_caught_externally) { - thread_local_.catcher_ = try_catch_handler(); - } + thread_local_.catcher_ = can_be_caught_externally ? + try_catch_handler() : NULL; // Set the exception being re-thrown. set_pending_exception(exception); @@ -913,9 +912,10 @@ void Top::DoThrow(MaybeObject* exception, } } - if (can_be_caught_externally) { - thread_local_.catcher_ = try_catch_handler(); - } + // Do not forget to clean catcher_ if currently thrown exception cannot + // be caught. If necessary, ReThrow will update the catcher. + thread_local_.catcher_ = can_be_caught_externally ? + try_catch_handler() : NULL; // NOTE: Notifying the debugger or generating the message // may have caused new exceptions. For now, we just ignore diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc index 0ff7649340..945043da9b 100644 --- a/deps/v8/src/v8.cc +++ b/deps/v8/src/v8.cc @@ -54,7 +54,12 @@ bool V8::Initialize(Deserializer* des) { if (has_been_disposed_ || has_fatal_error_) return false; if (IsRunning()) return true; +#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI) + use_crankshaft_ = false; +#else use_crankshaft_ = FLAG_crankshaft; +#endif + // Peephole optimization might interfere with deoptimization. FLAG_peephole_optimization = !use_crankshaft_; is_running_ = true; diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 884b6f414d..91e19c13d8 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -92,7 +92,7 @@ function GlobalIsFinite(number) { // ECMA-262 - 15.1.2.2 function GlobalParseInt(string, radix) { - if (IS_UNDEFINED(radix)) { + if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) { // Some people use parseInt instead of Math.floor. This // optimization makes parseInt on a Smi 12 times faster (60ns // vs 800ns). The following optimization makes parseInt on a @@ -105,7 +105,7 @@ function GlobalParseInt(string, radix) { // Truncate number. return string | 0; } - radix = 0; + if (IS_UNDEFINED(radix)) radix = 0; } else { radix = TO_INT32(radix); if (!(radix == 0 || (2 <= radix && radix <= 36))) diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 8c233375e7..59824863d2 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 #define MINOR_VERSION 1 -#define BUILD_NUMBER 5 +#define BUILD_NUMBER 6 #define PATCH_LEVEL 0 #define CANDIDATE_VERSION false diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 697f6cd403..ea41a202d5 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -190,13 +190,13 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) { // ----------------------------------------------------------------------------- // Register constants. -const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = { - // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12 - 0, 3, 2, 1, 7, 8, 9, 11, 14, 12 +const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = { + // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12 + 0, 3, 2, 1, 7, 8, 9, 11, 14, 12 }; -const int Register::allocationIndexByRegisterCode[kNumRegisters] = { - 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1 +const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = { + 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1 }; @@ -3114,8 +3114,8 @@ void Assembler::RecordDebugBreakSlot() { } -void Assembler::RecordComment(const char* msg) { - if (FLAG_code_comments) { +void Assembler::RecordComment(const char* msg, bool force) { + if (FLAG_code_comments || force) { EnsureSpace ensure_space(this); RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); } diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 91e7e6cc61..553fbe4220 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -99,12 +99,12 @@ struct Register { static const int kNumAllocatableRegisters = 10; static int ToAllocationIndex(Register reg) { - return allocationIndexByRegisterCode[reg.code()]; + return kAllocationIndexByRegisterCode[reg.code()]; } static Register FromAllocationIndex(int index) { ASSERT(index >= 0 && index < kNumAllocatableRegisters); - Register result = { registerCodeByAllocationIndex[index] }; + Register result = { kRegisterCodeByAllocationIndex[index] }; return result; } @@ -155,8 +155,8 @@ struct Register { int code_; private: - static const int registerCodeByAllocationIndex[kNumAllocatableRegisters]; - static const int allocationIndexByRegisterCode[kNumRegisters]; + static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters]; + static const int kAllocationIndexByRegisterCode[kNumRegisters]; }; const Register rax = { 0 }; @@ -1312,7 +1312,7 @@ class Assembler : public Malloced { // Record a comment relocation entry that can be used by a disassembler. // Use --code-comments to enable. - void RecordComment(const char* msg); + void RecordComment(const char* msg, bool force = false); // Writes a single word of data in the code stream. // Used for inline tables, e.g., jump-tables. diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 0cfe665ced..240087e177 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -1336,54 +1336,33 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) { - GenerateRegisterArgsPush(masm); - // Registers containing left and right operands respectively. - Register lhs = rdx; - Register rhs = rax; - - // Test for string arguments before calling runtime. - Label not_strings, both_strings, not_string1, string1, string1_smi2; - - __ JumpIfNotString(lhs, r8, ¬_string1); - - // First argument is a a string, test second. - __ JumpIfSmi(rhs, &string1_smi2); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); - __ j(above_equal, &string1); - - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, rhs, rbx, rcx, r8, true, &string1); + ASSERT(op_ == Token::ADD); + NearLabel left_not_string, call_runtime; - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ movq(Operand(rsp, 1 * kPointerSize), rbx); - __ TailCallStub(&string_add_stub); + // Registers containing left and right operands respectively. + Register left = rdx; + Register right = rax; - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); + // Test if left operand is a string. + __ JumpIfSmi(left, &left_not_string); + __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx); + __ j(above_equal, &left_not_string); + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); - // First argument was not a string, test second. - __ bind(¬_string1); - __ JumpIfNotString(rhs, rhs, ¬_strings); + // Left operand is not a string, test right. + __ bind(&left_not_string); + __ JumpIfSmi(right, &call_runtime); + __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx); + __ j(above_equal, &call_runtime); - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_right_stub); - __ bind(¬_strings); // Neither argument is a string. - // Pop arguments, because CallRuntimeCode wants to push them again. - __ pop(rcx); - __ pop(rax); - __ pop(rdx); - __ push(rcx); + __ bind(&call_runtime); } @@ -1440,9 +1419,11 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(operands_type_ == TRBinaryOpIC::STRING); ASSERT(op_ == Token::ADD); GenerateStringAddCode(masm); - + // Try to add arguments as strings, otherwise, transition to the generic + // TRBinaryOpIC type. GenerateTypeTransition(masm); } @@ -3461,6 +3442,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // is and instance of the function and anything else to // indicate that the value is not an instance. + // None of the flags are supported on X64. + ASSERT(flags_ == kNoFlags); + // Get the object - go slow case if it's a smi. Label slow; __ movq(rax, Operand(rsp, 2 * kPointerSize)); @@ -3536,10 +3520,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } -Register InstanceofStub::left() { return rax; } +// Passing arguments in registers is not supported. +Register InstanceofStub::left() { return no_reg; } -Register InstanceofStub::right() { return rdx; } +Register InstanceofStub::right() { return no_reg; } int CompareStub::MinorKey() { @@ -3798,14 +3783,15 @@ void StringCharAtGenerator::GenerateSlow( void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; // Load the two arguments. - __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument. - __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument. + __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left). + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right). // Make sure that both arguments are strings if not known in advance. - if (string_check_) { + if (flags_ == NO_STRING_ADD_FLAGS) { Condition is_smi; is_smi = masm->CheckSmi(rax); __ j(is_smi, &string_add_runtime); @@ -3817,6 +3803,20 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ j(is_smi, &string_add_runtime); __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9); __ j(above_equal, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi, + &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi, + &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } } // Both arguments are strings. @@ -3844,14 +3844,14 @@ void StringAddStub::Generate(MacroAssembler* masm) { // rbx: length of first string // rcx: length of second string // rdx: second string - // r8: map of first string if string check was performed above - // r9: map of second string if string check was performed above + // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS) + // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS) Label string_add_flat_result, longer_than_two; __ bind(&both_not_zero_length); // If arguments where known to be strings, maps are not loaded to r8 and r9 // by the code above. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); } @@ -4037,6 +4037,54 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1); + __ j(below, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + false, + ¬_cached); + __ movq(arg, scratch1); + __ movq(Operand(rsp, stack_offset), arg); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1. + __ j(not_equal, slow); + __ testb(FieldOperand(scratch1, Map::kBitField2Offset), + Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ j(zero, slow); + __ movq(arg, FieldOperand(arg, JSValue::kValueOffset)); + __ movq(Operand(rsp, stack_offset), arg); + + __ bind(&done); } diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 119b699304..1e6fc65140 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -360,24 +360,35 @@ class StringHelper : public AllStatic { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB }; class StringAddStub: public CodeStub { public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} private: Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } + int MinorKey() { return flags_; } void Generate(MacroAssembler* masm); - // Should the stub check whether arguments are strings? - bool string_check_; + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* slow); + + const StringAddFlags flags_; }; diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 150ed664b0..c07bcf9044 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -7239,8 +7239,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); if (variable != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); @@ -7249,7 +7249,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3); frame_->Push(&answer); - return; } else if (slot != NULL && slot->type() == Slot::LOOKUP) { // Call the runtime to delete from the context holding the named @@ -7260,13 +7259,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { frame_->EmitPush(variable->name()); Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); frame_->Push(&answer); - return; + } else { + // Default: Result of deleting non-global, not dynamically + // introduced variables is false. + frame_->Push(Factory::false_value()); } - - // Default: Result of deleting non-global, not dynamically - // introduced variables is false. - frame_->Push(Factory::false_value()); - } else { // Default: Result of deleting expressions is true. Load(node->expression()); // may have side-effects diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index 7d6e6d8522..595dedc474 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -358,14 +358,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast<intptr_t>(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) output_frame->SetRegister(rsi.code(), value); if (FLAG_trace_deopt) { diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index a28bcb79ff..8711f42384 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -3075,8 +3075,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } } else if (var != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); if (var->is_global()) { __ push(GlobalObjectOperand()); __ Push(var->name()); @@ -3114,16 +3114,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index e1ebb3eaca..e6904b4dad 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -37,6 +37,37 @@ namespace v8 { namespace internal { +// When invoking builtins, we need to record the safepoint in the middle of +// the invoke instruction sequence generated by the macro assembler. +class SafepointGenerator : public PostCallGenerator { + public: + SafepointGenerator(LCodeGen* codegen, + LPointerMap* pointers, + int deoptimization_index, + bool ensure_reloc_space = false) + : codegen_(codegen), + pointers_(pointers), + deoptimization_index_(deoptimization_index), + ensure_reloc_space_(ensure_reloc_space) { } + virtual ~SafepointGenerator() { } + + virtual void Generate() { + // Ensure that we have enough space in the reloc info to patch + // this with calls when doing deoptimization. + if (ensure_reloc_space_) { + codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true); + } + codegen_->RecordSafepoint(pointers_, deoptimization_index_); + } + + private: + LCodeGen* codegen_; + LPointerMap* pointers_; + int deoptimization_index_; + bool ensure_reloc_space_; +}; + + #define __ masm()-> bool LCodeGen::GenerateCode() { @@ -132,6 +163,45 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is still in rdi. + __ push(rdi); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both rax and rsi. It replaces the context + // passed to us. It's saved in the stack and kept live in rsi. + __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); + + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ movq(rax, Operand(rbp, parameter_offset)); + // Store it in the context. + int context_offset = Context::SlotOffset(slot->index()); + __ movq(Operand(rsi, context_offset), rax); + // Update the write barrier. This clobbers all involved + // registers, so we have use a third register to avoid + // clobbering rsi. + __ movq(rcx, rsi); + __ RecordWrite(rcx, context_offset, rax, rbx); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); @@ -252,8 +322,7 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const { Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { Handle<Object> literal = chunk_->LookupLiteral(op); - Representation r = chunk_->LookupLiteralRepresentation(op); - ASSERT(r.IsTagged()); + ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged()); return literal; } @@ -539,6 +608,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -652,7 +727,42 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { void LCodeGen::DoModI(LModI* instr) { - Abort("Unimplemented: %s", "DoModI"); + LOperand* right = instr->InputAt(1); + ASSERT(ToRegister(instr->result()).is(rdx)); + ASSERT(ToRegister(instr->InputAt(0)).is(rax)); + ASSERT(!ToRegister(instr->InputAt(1)).is(rax)); + ASSERT(!ToRegister(instr->InputAt(1)).is(rdx)); + + Register right_reg = ToRegister(right); + + // Check for x % 0. + if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + __ testl(right_reg, right_reg); + DeoptimizeIf(zero, instr->environment()); + } + + // Sign extend eax to edx. (We are using only the low 32 bits of the values.) + __ cdq(); + + // Check for (0 % -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + NearLabel positive_left; + NearLabel done; + __ testl(rax, rax); + __ j(not_sign, &positive_left); + __ idivl(right_reg); + + // Test the remainder for 0, because then the result would be -0. + __ testl(rdx, rdx); + __ j(not_zero, &done); + + DeoptimizeIf(no_condition, instr->environment()); + __ bind(&positive_left); + __ idivl(right_reg); + __ bind(&done); + } else { + __ idivl(right_reg); + } } @@ -929,7 +1039,19 @@ void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) { void LCodeGen::DoValueOf(LValueOf* instr) { - Abort("Unimplemented: %s", "DoValueOf"); + Register input = ToRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + ASSERT(input.is(result)); + NearLabel done; + // If the object is a smi return the object. + __ JumpIfSmi(input, &done); + + // If the object is not a value type, return the object. + __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister); + __ j(not_equal, &done); + __ movq(result, FieldOperand(input, JSValue::kValueOffset)); + + __ bind(&done); } @@ -1599,7 +1721,18 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - Abort("Unimplemented: %s", "DoInstanceOf"); + InstanceofStub stub(InstanceofStub::kNoFlags); + __ push(ToRegister(instr->InputAt(0))); + __ push(ToRegister(instr->InputAt(1))); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + NearLabel true_value, done; + __ testq(rax, rax); + __ j(zero, &true_value); + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); + __ jmp(&done); + __ bind(&true_value); + __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); + __ bind(&done); } @@ -1607,7 +1740,9 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - InstanceofStub stub(InstanceofStub::kArgsInRegisters); + InstanceofStub stub(InstanceofStub::kNoFlags); + __ push(ToRegister(instr->InputAt(0))); + __ push(ToRegister(instr->InputAt(1))); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ testq(rax, rax); EmitBranch(true_block, false_block, zero); @@ -1615,13 +1750,65 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { - Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal"); + class DeferredInstanceOfKnownGlobal: public LDeferredCode { + public: + DeferredInstanceOfKnownGlobal(LCodeGen* codegen, + LInstanceOfKnownGlobal* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { + codegen()->DoDeferredLInstanceOfKnownGlobal(instr_); + } + + private: + LInstanceOfKnownGlobal* instr_; + }; + + + DeferredInstanceOfKnownGlobal* deferred; + deferred = new DeferredInstanceOfKnownGlobal(this, instr); + + Label false_result; + Register object = ToRegister(instr->InputAt(0)); + + // A Smi is not an instance of anything. + __ JumpIfSmi(object, &false_result); + + // Null is not an instance of anything. + __ CompareRoot(object, Heap::kNullValueRootIndex); + __ j(equal, &false_result); + + // String values are not instances of anything. + __ JumpIfNotString(object, kScratchRegister, deferred->entry()); + + __ bind(&false_result); + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); + + __ bind(deferred->exit()); } -void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check) { - Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl"); +void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { + __ PushSafepointRegisters(); + + InstanceofStub stub(InstanceofStub::kNoFlags); + + __ push(ToRegister(instr->InputAt(0))); + __ Push(instr->function()); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); + __ movq(kScratchRegister, rax); + __ PopSafepointRegisters(); + __ testq(kScratchRegister, kScratchRegister); + Label load_false; + Label done; + __ j(not_zero, &load_false); + __ LoadRoot(rax, Heap::kTrueValueRootIndex); + __ jmp(&done); + __ bind(&load_false); + __ LoadRoot(rax, Heap::kFalseValueRootIndex); + __ bind(&done); } @@ -1718,7 +1905,20 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { - Abort("Unimplemented: %s", "DoLoadContextSlot"); + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ movq(result, ContextOperand(context, instr->slot_index())); +} + + +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { + Register context = ToRegister(instr->context()); + Register value = ToRegister(instr->value()); + __ movq(ContextOperand(context, instr->slot_index()), value); + if (instr->needs_write_barrier()) { + int offset = Context::SlotOffset(instr->slot_index()); + __ RecordWrite(context, offset, value, kScratchRegister); + } } @@ -1861,7 +2061,11 @@ void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) { void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoLoadKeyedGeneric"); + ASSERT(ToRegister(instr->object()).is(rdx)); + ASSERT(ToRegister(instr->key()).is(rax)); + + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -1915,25 +2119,77 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { void LCodeGen::DoApplyArguments(LApplyArguments* instr) { - Abort("Unimplemented: %s", "DoApplyArguments"); + Register receiver = ToRegister(instr->receiver()); + Register function = ToRegister(instr->function()); + Register length = ToRegister(instr->length()); + Register elements = ToRegister(instr->elements()); + ASSERT(receiver.is(rax)); // Used for parameter count. + ASSERT(function.is(rdi)); // Required by InvokeFunction. + ASSERT(ToRegister(instr->result()).is(rax)); + + // If the receiver is null or undefined, we have to pass the global object + // as a receiver. + NearLabel global_object, receiver_ok; + __ CompareRoot(receiver, Heap::kNullValueRootIndex); + __ j(equal, &global_object); + __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); + __ j(equal, &global_object); + + // The receiver should be a JS object. + Condition is_smi = __ CheckSmi(receiver); + DeoptimizeIf(is_smi, instr->environment()); + __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister); + DeoptimizeIf(below, instr->environment()); + __ jmp(&receiver_ok); + + __ bind(&global_object); + // TODO(kmillikin): We have a hydrogen value for the global object. See + // if it's better to use it than to explicitly fetch it from the context + // here. + __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX)); + __ bind(&receiver_ok); + + // Copy the arguments to this function possibly from the + // adaptor frame below it. + const uint32_t kArgumentsLimit = 1 * KB; + __ cmpq(length, Immediate(kArgumentsLimit)); + DeoptimizeIf(above, instr->environment()); + + __ push(receiver); + __ movq(receiver, length); + + // Loop through the arguments pushing them onto the execution + // stack. + NearLabel invoke, loop; + // length is a small non-negative integer, due to the test above. + __ testl(length, length); + __ j(zero, &invoke); + __ bind(&loop); + __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); + __ decl(length); + __ j(not_zero, &loop); + + // Invoke the function. + __ bind(&invoke); + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + LPointerMap* pointers = instr->pointer_map(); + LEnvironment* env = instr->deoptimization_environment(); + RecordPosition(pointers->position()); + RegisterEnvironmentForDeoptimization(env); + SafepointGenerator safepoint_generator(this, + pointers, + env->deoptimization_index(), + true); + v8::internal::ParameterCount actual(rax); + __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); } void LCodeGen::DoPushArgument(LPushArgument* instr) { LOperand* argument = instr->InputAt(0); if (argument->IsConstantOperand()) { - LConstantOperand* const_op = LConstantOperand::cast(argument); - Handle<Object> literal = chunk_->LookupLiteral(const_op); - Representation r = chunk_->LookupLiteralRepresentation(const_op); - if (r.IsInteger32()) { - ASSERT(literal->IsNumber()); - __ push(Immediate(static_cast<int32_t>(literal->Number()))); - } else if (r.IsDouble()) { - Abort("unsupported double immediate"); - } else { - ASSERT(r.IsTagged()); - __ Push(literal); - } + EmitPushConstantOperand(argument); } else if (argument->IsRegister()) { __ push(ToRegister(argument)); } else { @@ -1949,6 +2205,15 @@ void LCodeGen::DoContext(LContext* instr) { } +void LCodeGen::DoOuterContext(LOuterContext* instr) { + Register context = ToRegister(instr->context()); + Register result = ToRegister(instr->result()); + __ movq(result, + Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ movq(result, FieldOperand(result, JSFunction::kContextOffset)); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register result = ToRegister(instr->result()); __ movq(result, GlobalObjectOperand()); @@ -2016,22 +2281,76 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathFloor"); + XMMRegister xmm_scratch = xmm0; + Register output_reg = ToRegister(instr->result()); + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. + __ ucomisd(input_reg, xmm_scratch); + + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(below_equal, instr->environment()); + } else { + DeoptimizeIf(below, instr->environment()); + } + + // Use truncating instruction (OK because input is positive). + __ cvttsd2si(output_reg, input_reg); + + // Overflow is signalled with minint. + __ cmpl(output_reg, Immediate(0x80000000)); + DeoptimizeIf(equal, instr->environment()); } void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathRound"); + const XMMRegister xmm_scratch = xmm0; + Register output_reg = ToRegister(instr->result()); + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + + // xmm_scratch = 0.5 + __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE); + __ movq(xmm_scratch, kScratchRegister); + + // input = input + 0.5 + __ addsd(input_reg, xmm_scratch); + + // We need to return -0 for the input range [-0.5, 0[, otherwise + // compute Math.floor(value + 0.5). + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ ucomisd(input_reg, xmm_scratch); + DeoptimizeIf(below_equal, instr->environment()); + } else { + // If we don't need to bailout on -0, we check only bailout + // on negative inputs. + __ xorpd(xmm_scratch, xmm_scratch); // Zero the register. + __ ucomisd(input_reg, xmm_scratch); + DeoptimizeIf(below, instr->environment()); + } + + // Compute Math.floor(value + 0.5). + // Use truncating instruction (OK because input is positive). + __ cvttsd2si(output_reg, input_reg); + + // Overflow is signalled with minint. + __ cmpl(output_reg, Immediate(0x80000000)); + DeoptimizeIf(equal, instr->environment()); } void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathSqrt"); + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); + __ sqrtsd(input_reg, input_reg); } void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoMathPowHalf"); + XMMRegister xmm_scratch = xmm0; + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); + __ xorpd(xmm_scratch, xmm_scratch); + __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. + __ sqrtsd(input_reg, input_reg); } @@ -2056,12 +2375,45 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoUnaryMathOperation"); + switch (instr->op()) { + case kMathAbs: + DoMathAbs(instr); + break; + case kMathFloor: + DoMathFloor(instr); + break; + case kMathRound: + DoMathRound(instr); + break; + case kMathSqrt: + DoMathSqrt(instr); + break; + case kMathPowHalf: + DoMathPowHalf(instr); + break; + case kMathCos: + DoMathCos(instr); + break; + case kMathSin: + DoMathSin(instr); + break; + case kMathLog: + DoMathLog(instr); + break; + + default: + UNREACHABLE(); + } } void LCodeGen::DoCallKeyed(LCallKeyed* instr) { - Abort("Unimplemented: %s", "DoCallKeyed"); + ASSERT(ToRegister(instr->key()).is(rcx)); + ASSERT(ToRegister(instr->result()).is(rax)); + + int arity = instr->arity(); + Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); + CallCode(ic, RelocInfo::CODE_TARGET, instr); } @@ -2077,7 +2429,13 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) { void LCodeGen::DoCallFunction(LCallFunction* instr) { - Abort("Unimplemented: %s", "DoCallFunction"); + ASSERT(ToRegister(instr->result()).is(rax)); + + int arity = instr->arity(); + CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE); + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ Drop(1); } @@ -2144,7 +2502,30 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreNamedGeneric"); + ASSERT(ToRegister(instr->object()).is(rdx)); + ASSERT(ToRegister(instr->value()).is(rax)); + + __ Move(rcx, instr->hydrogen()->name()); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) { + Register external_pointer = ToRegister(instr->external_pointer()); + Register key = ToRegister(instr->key()); + Register value = ToRegister(instr->value()); + + { // Clamp the value to [0..255]. + NearLabel done; + __ testl(value, Immediate(0xFFFFFF00)); + __ j(zero, &done); + __ setcc(negative, value); // 1 if negative, 0 if positive. + __ decb(value); // 0 if negative, 255 if positive. + __ bind(&done); + } + + __ movb(Operand(external_pointer, key, times_1, 0), value); } @@ -2190,7 +2571,152 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreKeyedGeneric"); + ASSERT(ToRegister(instr->object()).is(rdx)); + ASSERT(ToRegister(instr->key()).is(rcx)); + ASSERT(ToRegister(instr->value()).is(rax)); + + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + CallCode(ic, RelocInfo::CODE_TARGET, instr); +} + + +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { + class DeferredStringCharCodeAt: public LDeferredCode { + public: + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + private: + LStringCharCodeAt* instr_; + }; + + Register string = ToRegister(instr->string()); + Register index = no_reg; + int const_index = -1; + if (instr->index()->IsConstantOperand()) { + const_index = ToInteger32(LConstantOperand::cast(instr->index())); + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (!Smi::IsValid(const_index)) { + // Guaranteed to be out of bounds because of the assert above. + // So the bounds check that must dominate this instruction must + // have deoptimized already. + if (FLAG_debug_code) { + __ Abort("StringCharCodeAt: out of bounds index."); + } + // No code needs to be generated. + return; + } + } else { + index = ToRegister(instr->index()); + } + Register result = ToRegister(instr->result()); + + DeferredStringCharCodeAt* deferred = + new DeferredStringCharCodeAt(this, instr); + + NearLabel flat_string, ascii_string, done; + + // Fetch the instance type of the receiver into result register. + __ movq(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset)); + + // We need special handling for non-sequential strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ testb(result, Immediate(kStringRepresentationMask)); + __ j(zero, &flat_string); + + // Handle cons strings and go to deferred code for the rest. + __ testb(result, Immediate(kIsConsStringMask)); + __ j(zero, deferred->entry()); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset), + Heap::kEmptyStringRootIndex); + __ j(not_equal, deferred->entry()); + // Get the first of the two strings and load its instance type. + __ movq(string, FieldOperand(string, ConsString::kFirstOffset)); + __ movq(result, FieldOperand(string, HeapObject::kMapOffset)); + __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ testb(result, Immediate(kStringRepresentationMask)); + __ j(not_zero, deferred->entry()); + + // Check for ASCII or two-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ testb(result, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii_string); + + // Two-byte string. + // Load the two-byte character code into the result register. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + if (instr->index()->IsConstantOperand()) { + __ movzxwl(result, + FieldOperand(string, + SeqTwoByteString::kHeaderSize + + (kUC16Size * const_index))); + } else { + __ movzxwl(result, FieldOperand(string, + index, + times_2, + SeqTwoByteString::kHeaderSize)); + } + __ jmp(&done); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + if (instr->index()->IsConstantOperand()) { + __ movzxbl(result, FieldOperand(string, + SeqAsciiString::kHeaderSize + const_index)); + } else { + __ movzxbl(result, FieldOperand(string, + index, + times_1, + SeqAsciiString::kHeaderSize)); + } + __ bind(&done); + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { + Register string = ToRegister(instr->string()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Set(result, 0); + + __ PushSafepointRegisters(); + __ push(string); + // Push the index as a smi. This is safe because of the checks in + // DoStringCharCodeAt above. + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); + if (instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + __ Push(Smi::FromInt(const_index)); + } else { + Register index = ToRegister(instr->index()); + __ Integer32ToSmi(index, index); + __ push(index); + } + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt); + RecordSafepointWithRegisters( + instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex); + if (FLAG_debug_code) { + __ AbortIfNotSmi(rax); + } + __ SmiToInteger32(rax, rax); + __ StoreToSafepointRegisterSlot(result, rax); + __ PopSafepointRegisters(); } @@ -2403,7 +2929,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { void LCodeGen::DoDoubleToI(LDoubleToI* instr) { - Abort("Unimplemented: %s", "DoDoubleToI"); + LOperand* input = instr->InputAt(0); + ASSERT(input->IsDoubleRegister()); + LOperand* result = instr->result(); + ASSERT(result->IsRegister()); + + XMMRegister input_reg = ToDoubleRegister(input); + Register result_reg = ToRegister(result); + + if (instr->truncating()) { + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. + __ cvttsd2siq(result_reg, input_reg); + __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE); + __ cmpl(result_reg, kScratchRegister); + DeoptimizeIf(equal, instr->environment()); + } else { + __ cvttsd2si(result_reg, input_reg); + __ cvtlsi2sd(xmm0, result_reg); + __ ucomisd(xmm0, input_reg); + DeoptimizeIf(not_equal, instr->environment()); + DeoptimizeIf(parity_even, instr->environment()); // NaN. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + NearLabel done; + // The integer converted back is equal to the original. We + // only have to test if we got -0 as an input. + __ testl(result_reg, result_reg); + __ j(not_zero, &done); + __ movmskpd(result_reg, input_reg); + // Bit 0 contains the sign of the double in input_reg. + // If input was positive, we are ok and return 0, otherwise + // deoptimize. + __ andl(result_reg, Immediate(1)); + DeoptimizeIf(not_zero, instr->environment()); + __ bind(&done); + } + } } @@ -2552,7 +3113,54 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { - Abort("Unimplemented: %s", "DoRegExpLiteral"); + NearLabel materialized; + // Registers will be used as follows: + // rdi = JS function. + // rcx = literals array. + // rbx = regexp literal. + // rax = regexp literal clone. + __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset)); + int literal_offset = FixedArray::kHeaderSize + + instr->hydrogen()->literal_index() * kPointerSize; + __ movq(rbx, FieldOperand(rcx, literal_offset)); + __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); + __ j(not_equal, &materialized); + + // Create regexp literal using runtime function + // Result will be in rax. + __ push(rcx); + __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); + __ Push(instr->hydrogen()->pattern()); + __ Push(instr->hydrogen()->flags()); + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); + __ movq(rbx, rax); + + __ bind(&materialized); + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; + Label allocated, runtime_allocate; + __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); + __ jmp(&allocated); + + __ bind(&runtime_allocate); + __ push(rbx); + __ Push(Smi::FromInt(size)); + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); + __ pop(rbx); + + __ bind(&allocated); + // Copy the content into the newly allocated memory. + // (Unroll copy loop once for better throughput). + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { + __ movq(rdx, FieldOperand(rbx, i)); + __ movq(rcx, FieldOperand(rbx, i + kPointerSize)); + __ movq(FieldOperand(rax, i), rdx); + __ movq(FieldOperand(rax, i + kPointerSize), rcx); + } + if ((size % (2 * kPointerSize)) != 0) { + __ movq(rdx, FieldOperand(rbx, size - kPointerSize)); + __ movq(FieldOperand(rax, size - kPointerSize), rdx); + } } @@ -2575,60 +3183,56 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { void LCodeGen::DoTypeof(LTypeof* instr) { - Abort("Unimplemented: %s", "DoTypeof"); + LOperand* input = instr->InputAt(0); + if (input->IsConstantOperand()) { + __ Push(ToHandle(LConstantOperand::cast(input))); + } else if (input->IsRegister()) { + __ push(ToRegister(input)); + } else { + ASSERT(input->IsStackSlot()); + __ push(ToOperand(input)); + } + CallRuntime(Runtime::kTypeof, 1, instr); } void LCodeGen::DoTypeofIs(LTypeofIs* instr) { - Abort("Unimplemented: %s", "DoTypeofIs"); -} - - -void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { + Register input = ToRegister(instr->InputAt(0)); Register result = ToRegister(instr->result()); - NearLabel true_label; - NearLabel false_label; + Label true_label; + Label false_label; NearLabel done; - EmitIsConstructCall(result); - __ j(equal, &true_label); - + Condition final_branch_condition = EmitTypeofIs(&true_label, + &false_label, + input, + instr->type_literal()); + __ j(final_branch_condition, &true_label); + __ bind(&false_label); __ LoadRoot(result, Heap::kFalseValueRootIndex); __ jmp(&done); __ bind(&true_label); __ LoadRoot(result, Heap::kTrueValueRootIndex); - __ bind(&done); } -void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { - Register temp = ToRegister(instr->TempAt(0)); - int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - EmitIsConstructCall(temp); - EmitBranch(true_block, false_block, equal); -} - - -void LCodeGen::EmitIsConstructCall(Register temp) { - // Get the frame pointer for the calling frame. - __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - - // Skip the arguments adaptor frame if it exists. - NearLabel check_frame_marker; - __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(not_equal, &check_frame_marker); - __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset)); - - // Check the marker in the calling frame. - __ bind(&check_frame_marker); - __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset), - Smi::FromInt(StackFrame::CONSTRUCT)); +void LCodeGen::EmitPushConstantOperand(LOperand* operand) { + ASSERT(operand->IsConstantOperand()); + LConstantOperand* const_op = LConstantOperand::cast(operand); + Handle<Object> literal = chunk_->LookupLiteral(const_op); + Representation r = chunk_->LookupLiteralRepresentation(const_op); + if (r.IsInteger32()) { + ASSERT(literal->IsNumber()); + __ push(Immediate(static_cast<int32_t>(literal->Number()))); + } else if (r.IsDouble()) { + Abort("unsupported double immediate"); + } else { + ASSERT(r.IsTagged()); + __ Push(literal); + } } @@ -2712,6 +3316,54 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, } +void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { + Register result = ToRegister(instr->result()); + NearLabel true_label; + NearLabel false_label; + NearLabel done; + + EmitIsConstructCall(result); + __ j(equal, &true_label); + + __ LoadRoot(result, Heap::kFalseValueRootIndex); + __ jmp(&done); + + __ bind(&true_label); + __ LoadRoot(result, Heap::kTrueValueRootIndex); + + + __ bind(&done); +} + + +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { + Register temp = ToRegister(instr->TempAt(0)); + int true_block = chunk_->LookupDestination(instr->true_block_id()); + int false_block = chunk_->LookupDestination(instr->false_block_id()); + + EmitIsConstructCall(temp); + EmitBranch(true_block, false_block, equal); +} + + +void LCodeGen::EmitIsConstructCall(Register temp) { + // Get the frame pointer for the calling frame. + __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + NearLabel check_frame_marker; + __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(not_equal, &check_frame_marker); + __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset), + Smi::FromInt(StackFrame::CONSTRUCT)); +} + + void LCodeGen::DoLazyBailout(LLazyBailout* instr) { // No code for lazy bailout instruction. Used to capture environment after a // call for populating the safepoint data with deoptimization data. @@ -2724,7 +3376,36 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { - Abort("Unimplemented: %s", "DoDeleteProperty"); + LOperand* obj = instr->object(); + LOperand* key = instr->key(); + // Push object. + if (obj->IsRegister()) { + __ push(ToRegister(obj)); + } else { + __ push(ToOperand(obj)); + } + // Push key. + if (key->IsConstantOperand()) { + EmitPushConstantOperand(key); + } else if (key->IsRegister()) { + __ push(ToRegister(key)); + } else { + __ push(ToOperand(key)); + } + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + LPointerMap* pointers = instr->pointer_map(); + LEnvironment* env = instr->deoptimization_environment(); + RecordPosition(pointers->position()); + RegisterEnvironmentForDeoptimization(env); + // Create safepoint generator that will also ensure enough space in the + // reloc info for patching in deoptimization (since this is invoking a + // builtin) + SafepointGenerator safepoint_generator(this, + pointers, + env->deoptimization_index(), + true); + __ Push(Smi::FromInt(strict_mode_flag())); + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator); } diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 6f8f06e345..52409f207b 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -90,8 +90,8 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredStackCheck(LGoto* instr); - void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check); + void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); + void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -117,6 +117,10 @@ class LCodeGen BASE_EMBEDDED { bool is_done() const { return status_ == DONE; } bool is_aborted() const { return status_ == ABORTED; } + int strict_mode_flag() const { + return info_->is_strict() ? kStrictMode : kNonStrictMode; + } + LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } @@ -197,6 +201,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); @@ -225,6 +230,9 @@ class LCodeGen BASE_EMBEDDED { // Caller should branch on equal condition. void EmitIsConstructCall(Register temp); + // Emits code for pushing a constant operand. + void EmitPushConstantOperand(LOperand* operand); + LChunk* const chunk_; MacroAssembler* const masm_; CompilationInfo* const info_; diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index fba29a69e5..2ed109d137 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -296,8 +296,15 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) { } +void LStoreContextSlot::PrintDataTo(StringStream* stream) { + InputAt(0)->PrintTo(stream); + stream->Add("[%d] <- ", slot_index()); + InputAt(1)->PrintTo(stream); +} + + void LCallKeyed::PrintDataTo(StringStream* stream) { - stream->Add("[ecx] #%d / ", arity()); + stream->Add("[rcx] #%d / ", arity()); } @@ -398,7 +405,7 @@ void LChunk::MarkEmptyBlocks() { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -407,7 +414,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -1081,9 +1106,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) { } else if (v->IsInstanceOf()) { HInstanceOf* instance_of = HInstanceOf::cast(v); LInstanceOfAndBranch* result = - new LInstanceOfAndBranch( - UseFixed(instance_of->left(), InstanceofStub::left()), - UseFixed(instance_of->right(), InstanceofStub::right())); + new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax), + UseFixed(instance_of->right(), rdx)); return MarkAsCall(result, instr); } else if (v->IsTypeofIs()) { HTypeofIs* typeof_is = HTypeofIs::cast(v); @@ -1124,21 +1148,32 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { - Abort("Unimplemented: %s", "DoInstanceOf"); - return NULL; + LOperand* left = UseFixed(instr->left(), rax); + LOperand* right = UseFixed(instr->right(), rdx); + LInstanceOf* result = new LInstanceOf(left, right); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { - Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal"); - return NULL; + LInstanceOfKnownGlobal* result = + new LInstanceOfKnownGlobal(UseRegisterAtStart(instr->value())); + MarkAsSaveDoubles(result); + return AssignEnvironment(AssignPointerMap(DefineFixed(result, rax))); } LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { - Abort("Unimplemented: %s", "DoApplyArguments"); - return NULL; + LOperand* function = UseFixed(instr->function(), rdi); + LOperand* receiver = UseFixed(instr->receiver(), rax); + LOperand* length = UseFixed(instr->length(), rbx); + LOperand* elements = UseFixed(instr->elements(), rcx); + LApplyArguments* result = new LApplyArguments(function, + receiver, + length, + elements); + return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY); } @@ -1155,8 +1190,8 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) { LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { - Abort("Unimplemented: DoOuterContext"); - return NULL; + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LOuterContext(context)); } @@ -1178,14 +1213,39 @@ LInstruction* LChunkBuilder::DoCallConstantFunction( LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { - Abort("Unimplemented: %s", "DoUnaryMathOperation"); - return NULL; + BuiltinFunctionId op = instr->op(); + if (op == kMathLog || op == kMathSin || op == kMathCos) { + LOperand* input = UseFixedDouble(instr->value(), xmm1); + LUnaryMathOperation* result = new LUnaryMathOperation(input); + return MarkAsCall(DefineFixedDouble(result, xmm1), instr); + } else { + LOperand* input = UseRegisterAtStart(instr->value()); + LUnaryMathOperation* result = new LUnaryMathOperation(input); + switch (op) { + case kMathAbs: + return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); + case kMathFloor: + return AssignEnvironment(DefineAsRegister(result)); + case kMathRound: + return AssignEnvironment(DefineAsRegister(result)); + case kMathSqrt: + return DefineSameAsFirst(result); + case kMathPowHalf: + return DefineSameAsFirst(result); + default: + UNREACHABLE(); + return NULL; + } + } } LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { - Abort("Unimplemented: %s", "DoCallKeyed"); - return NULL; + ASSERT(instr->key()->representation().IsTagged()); + LOperand* key = UseFixed(instr->key(), rcx); + argument_count_ -= instr->argument_count(); + LCallKeyed* result = new LCallKeyed(key); + return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1216,8 +1276,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { - Abort("Unimplemented: %s", "DoCallFunction"); - return NULL; + argument_count_ -= instr->argument_count(); + LCallFunction* result = new LCallFunction(); + return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1285,8 +1346,32 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { LInstruction* LChunkBuilder::DoMod(HMod* instr) { - Abort("Unimplemented: %s", "DoMod"); - return NULL; + if (instr->representation().IsInteger32()) { + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); + // The temporary operand is necessary to ensure that right is not allocated + // into edx. + LOperand* temp = FixedTemp(rdx); + LOperand* value = UseFixed(instr->left(), rax); + LOperand* divisor = UseRegister(instr->right()); + LModI* mod = new LModI(value, divisor, temp); + LInstruction* result = DefineFixed(mod, rdx); + return (instr->CheckFlag(HValue::kBailoutOnMinusZero) || + instr->CheckFlag(HValue::kCanBeDivByZero)) + ? AssignEnvironment(result) + : result; + } else if (instr->representation().IsTagged()) { + return DoArithmeticT(Token::MOD, instr); + } else { + ASSERT(instr->representation().IsDouble()); + // We call a C function for double modulo. It can't trigger a GC. + // We need to use fixed result register for the call. + // TODO(fschneider): Allow any register as input registers. + LOperand* left = UseFixedDouble(instr->left(), xmm1); + LOperand* right = UseFixedDouble(instr->right(), xmm2); + LArithmeticD* result = new LArithmeticD(Token::MOD, left, right); + return MarkAsCall(DefineFixedDouble(result, xmm1), instr); + } } @@ -1461,8 +1546,9 @@ LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) { LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { - Abort("Unimplemented: %s", "DoValueOf"); - return NULL; + LOperand* object = UseRegister(instr->value()); + LValueOf* result = new LValueOf(object); + return AssignEnvironment(DefineSameAsFirst(result)); } @@ -1519,12 +1605,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignPointerMap(Define(result, result_temp)); } else { ASSERT(to.IsInteger32()); - bool needs_temp = instr->CanTruncateToInt32() && - !CpuFeatures::IsSupported(SSE3); - LOperand* value = needs_temp ? - UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* temp = needs_temp ? TempRegister() : NULL; - return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp))); + LOperand* value = UseRegister(instr->value()); + return AssignEnvironment(DefineAsRegister(new LDoubleToI(value))); } } else if (from.IsInteger32()) { if (to.IsTagged()) { @@ -1622,14 +1704,23 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { - Abort("Unimplemented: %s", "DoLoadContextSlot"); - return NULL; + LOperand* context = UseRegisterAtStart(instr->value()); + return DefineAsRegister(new LLoadContextSlot(context)); } LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - Abort("Unimplemented: DoStoreContextSlot"); - return NULL; + Abort("Unimplemented: DoStoreContextSlot"); // Temporarily disabled (whesse). + LOperand* context; + LOperand* value; + if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); + value = UseTempRegister(instr->value()); + } else { + context = UseRegister(instr->context()); + value = UseRegister(instr->value()); + } + return new LStoreContextSlot(context, value); } @@ -1692,8 +1783,11 @@ LInstruction* LChunkBuilder::DoLoadPixelArrayElement( LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoLoadKeyedGeneric"); - return NULL; + LOperand* object = UseFixed(instr->object(), rdx); + LOperand* key = UseFixed(instr->key(), rax); + + LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key); + return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1716,9 +1810,31 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + ASSERT(instr->value()->representation().IsInteger32()); + ASSERT(instr->external_pointer()->representation().IsExternal()); + ASSERT(instr->key()->representation().IsInteger32()); + + LOperand* external_pointer = UseRegister(instr->external_pointer()); + LOperand* val = UseTempRegister(instr->value()); + LOperand* key = UseRegister(instr->key()); + + return new LStorePixelArrayElement(external_pointer, key, val); +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreKeyedGeneric"); - return NULL; + LOperand* object = UseFixed(instr->object(), rdx); + LOperand* key = UseFixed(instr->key(), rcx); + LOperand* value = UseFixed(instr->value(), rax); + + ASSERT(instr->object()->representation().IsTagged()); + ASSERT(instr->key()->representation().IsTagged()); + ASSERT(instr->value()->representation().IsTagged()); + + LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value); + return MarkAsCall(result, instr); } @@ -1743,14 +1859,19 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { - Abort("Unimplemented: %s", "DoStoreNamedGeneric"); - return NULL; + LOperand* object = UseFixed(instr->object(), rdx); + LOperand* value = UseFixed(instr->value(), rax); + + LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value); + return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { - Abort("Unimplemented: %s", "DoStringCharCodeAt"); - return NULL; + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegisterOrConstant(instr->index()); + LStringCharCodeAt* result = new LStringCharCodeAt(string, index); + return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } @@ -1771,8 +1892,7 @@ LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { - Abort("Unimplemented: %s", "DoRegExpLiteral"); - return NULL; + return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr); } @@ -1782,14 +1902,16 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - Abort("Unimplemented: %s", "DoDeleteProperty"); - return NULL; + LDeleteProperty* result = + new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key())); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { - Abort("Unimplemented: %s", "DoOsrEntry"); - return NULL; + allocator_->MarkAsOsrEntry(); + current_block_->last_environment()->set_ast_id(instr->ast_id()); + return AssignEnvironment(new LOsrEntry); } @@ -1800,8 +1922,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { - Abort("Unimplemented: %s", "DoUnknownOSRValue"); - return NULL; + int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width. + return DefineAsSpilled(new LUnknownOSRValue, spill_index); } @@ -1812,7 +1934,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - Abort("Unimplemented: %s", "DoArgumentsObject"); + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } @@ -1827,14 +1952,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - Abort("Unimplemented: %s", "DoTypeof"); - return NULL; + LTypeof* result = new LTypeof(UseAtStart(instr->value())); + return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) { - Abort("Unimplemented: %s", "DoTypeofIs"); - return NULL; + return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value()))); } diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index abffe50b19..fed5b8cb88 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -131,6 +129,7 @@ class LCodeGen; V(NumberUntagD) \ V(ObjectLiteral) \ V(OsrEntry) \ + V(OuterContext) \ V(Parameter) \ V(PixelArrayLength) \ V(Power) \ @@ -141,11 +140,14 @@ class LCodeGen; V(SmiTag) \ V(SmiUntag) \ V(StackCheck) \ + V(StoreContextSlot) \ V(StoreGlobal) \ V(StoreKeyedFastElement) \ V(StoreKeyedGeneric) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ + V(StorePixelArrayElement) \ + V(StringCharCodeAt) \ V(StringLength) \ V(SubI) \ V(TaggedToI) \ @@ -830,11 +832,10 @@ class LInstanceOfAndBranch: public LControlInstruction<2, 0> { }; -class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { +class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> { public: - LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) { + explicit LInstanceOfKnownGlobal(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, @@ -1005,11 +1006,10 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> { }; -class LValueOf: public LTemplateInstruction<1, 1, 1> { +class LValueOf: public LTemplateInstruction<1, 1, 0> { public: - LValueOf(LOperand* value, LOperand* temp) { + explicit LValueOf(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of") @@ -1246,6 +1246,25 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> { }; +class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> { + public: + LStoreContextSlot(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; + } + + DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") + DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) + + LOperand* context() { return InputAt(0); } + LOperand* value() { return InputAt(1); } + int slot_index() { return hydrogen()->slot_index(); } + int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } + + virtual void PrintDataTo(StringStream* stream); +}; + + class LPushArgument: public LTemplateInstruction<0, 1, 0> { public: explicit LPushArgument(LOperand* value) { @@ -1262,6 +1281,18 @@ class LContext: public LTemplateInstruction<1, 0, 0> { }; +class LOuterContext: public LTemplateInstruction<1, 1, 0> { + public: + explicit LOuterContext(LOperand* context) { + inputs_[0] = context; + } + + DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context") + + LOperand* context() { return InputAt(0); } +}; + + class LGlobalObject: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") @@ -1295,6 +1326,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> { DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_HYDROGEN_ACCESSOR(CallKeyed) + LOperand* key() { return inputs_[0]; } + virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } @@ -1315,6 +1348,8 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> { class LCallFunction: public LTemplateInstruction<1, 0, 0> { public: + LCallFunction() {} + DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") DECLARE_HYDROGEN_ACCESSOR(CallFunction) @@ -1403,11 +1438,10 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> { // Sometimes truncating conversion from a tagged value to an int32. -class LDoubleToI: public LTemplateInstruction<1, 1, 1> { +class LDoubleToI: public LTemplateInstruction<1, 1, 0> { public: - LDoubleToI(LOperand* value, LOperand* temp) { + explicit LDoubleToI(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") @@ -1468,34 +1502,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 1> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamed(LOperand* obj, LOperand* val) { - inputs_[0] = obj; - inputs_[1] = val; + LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { + inputs_[0] = object; + inputs_[1] = value; + temps_[0] = temp; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); LOperand* object() { return inputs_[0]; } LOperand* value() { return inputs_[1]; } - Handle<Object> name() const { return hydrogen()->name(); } -}; - - -class LStoreNamedField: public LStoreNamed { - public: - LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) - : LStoreNamed(obj, val) { - temps_[0] = temp; - } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + Handle<Object> name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } @@ -1503,25 +1526,35 @@ class LStoreNamedField: public LStoreNamed { }; -class LStoreNamedGeneric: public LStoreNamed { +class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } + LStoreNamedGeneric(LOperand* object, LOperand* value) { + inputs_[0] = object; + inputs_[1] = value; + } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + Handle<Object> name() const { return hydrogen()->name(); } }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1531,23 +1564,56 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} + LStorePixelArrayElement(LOperand* external_pointer, + LOperand* key, + LOperand* val) { + inputs_[0] = external_pointer; + inputs_[1] = key; + inputs_[2] = val; + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) + DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement, + "store-pixel-array-element") + DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement) + + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; -class LStoreKeyedGeneric: public LStoreKeyed { +class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } + LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) { + inputs_[0] = object; + inputs_[1] = key; + inputs_[2] = value; + } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } +}; + + +class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { + public: + LStringCharCodeAt(LOperand* string, LOperand* index) { + inputs_[0] = string; + inputs_[1] = index; + } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) + + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } }; diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index e7d02d2003..48e42c85ca 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -623,7 +623,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( } -void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { +void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + PostCallGenerator* post_call_generator) { // Calls are not allowed in some stubs. ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); @@ -632,7 +634,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { // parameter count to avoid emitting code to do the check. ParameterCount expected(0); GetBuiltinEntry(rdx, id); - InvokeCode(rdx, expected, expected, flag); + InvokeCode(rdx, expected, expected, flag, post_call_generator); } @@ -1444,15 +1446,17 @@ void MacroAssembler::Pushad() { // r15 is kSmiConstantRegister STATIC_ASSERT(11 == kNumSafepointSavedRegisters); // Use lea for symmetry with Popad. - lea(rsp, Operand(rsp, - -(kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize)); + int sp_delta = + (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; + lea(rsp, Operand(rsp, -sp_delta)); } void MacroAssembler::Popad() { // Popad must not change the flags, so use lea instead of addq. - lea(rsp, Operand(rsp, - (kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize)); + int sp_delta = + (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; + lea(rsp, Operand(rsp, sp_delta)); pop(r14); pop(r12); pop(r11); @@ -1494,6 +1498,16 @@ int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = { }; +void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) { + movq(SafepointRegisterSlot(dst), src); +} + + +Operand MacroAssembler::SafepointRegisterSlot(Register reg) { + return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); +} + + void MacroAssembler::PushTryHandler(CodeLocation try_location, HandlerType type) { // Adjust this code if not the case. @@ -1835,11 +1849,19 @@ void MacroAssembler::DebugBreak() { void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { NearLabel done; - InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); + InvokePrologue(expected, + actual, + Handle<Code>::null(), + code, + &done, + flag, + post_call_generator); if (flag == CALL_FUNCTION) { call(code); + if (post_call_generator != NULL) post_call_generator->Generate(); } else { ASSERT(flag == JUMP_FUNCTION); jmp(code); @@ -1852,12 +1874,20 @@ void MacroAssembler::InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { NearLabel done; Register dummy = rax; - InvokePrologue(expected, actual, code, dummy, &done, flag); + InvokePrologue(expected, + actual, + code, + dummy, + &done, + flag, + post_call_generator); if (flag == CALL_FUNCTION) { Call(code, rmode); + if (post_call_generator != NULL) post_call_generator->Generate(); } else { ASSERT(flag == JUMP_FUNCTION); Jump(code, rmode); @@ -1868,7 +1898,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code, void MacroAssembler::InvokeFunction(Register function, const ParameterCount& actual, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { ASSERT(function.is(rdi)); movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); @@ -1879,13 +1910,14 @@ void MacroAssembler::InvokeFunction(Register function, movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); ParameterCount expected(rbx); - InvokeCode(rdx, expected, actual, flag); + InvokeCode(rdx, expected, actual, flag, post_call_generator); } void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { ASSERT(function->is_compiled()); // Get the function and setup the context. Move(rdi, Handle<JSFunction>(function)); @@ -1896,12 +1928,17 @@ void MacroAssembler::InvokeFunction(JSFunction* function, // the Code object every time we call the function. movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); ParameterCount expected(function->shared()->formal_parameter_count()); - InvokeCode(rdx, expected, actual, flag); + InvokeCode(rdx, expected, actual, flag, post_call_generator); } else { // Invoke the cached code. Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); - InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); + InvokeCode(code, + expected, + actual, + RelocInfo::CODE_TARGET, + flag, + post_call_generator); } } diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 8352518323..4c5c60c8f0 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -58,6 +58,7 @@ typedef Operand MemOperand; // Forward declaration. class JumpTarget; +class PostCallGenerator; struct SmiIndex { SmiIndex(Register index_register, ScaleFactor scale) @@ -170,10 +171,9 @@ class MacroAssembler: public Assembler { // Push and pop the registers that can hold pointers. void PushSafepointRegisters() { Pushad(); } void PopSafepointRegisters() { Popad(); } - static int SafepointRegisterStackIndex(int reg_code) { - return kNumSafepointRegisters - 1 - - kSafepointPushRegisterIndices[reg_code]; - } + // Store the value in register src in the safepoint register stack + // slot for register dst. + void StoreToSafepointRegisterSlot(Register dst, Register src); // --------------------------------------------------------------------------- @@ -183,27 +183,33 @@ class MacroAssembler: public Assembler { void InvokeCode(Register code, const ParameterCount& expected, const ParameterCount& actual, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); void InvokeCode(Handle<Code> code, const ParameterCount& expected, const ParameterCount& actual, RelocInfo::Mode rmode, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); // Invoke the JavaScript function in the given register. Changes the // current context to the context in the function before invoking. void InvokeFunction(Register function, const ParameterCount& actual, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); void InvokeFunction(JSFunction* function, const ParameterCount& actual, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); // Invoke specified builtin JavaScript function. Adds an entry to // the unresolved list if the name does not resolve. - void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag); + void InvokeBuiltin(Builtins::JavaScript id, + InvokeFlag flag, + PostCallGenerator* post_call_generator = NULL); // Store the function for the given builtin in the target register. void GetBuiltinFunction(Register target, Builtins::JavaScript id); @@ -996,7 +1002,8 @@ class MacroAssembler: public Assembler { Handle<Code> code_constant, Register code_register, LabelType* done, - InvokeFlag flag); + InvokeFlag flag, + PostCallGenerator* post_call_generator); // Activation support. void EnterFrame(StackFrame::Type type); @@ -1027,6 +1034,17 @@ class MacroAssembler: public Assembler { Object* PopHandleScopeHelper(Register saved, Register scratch, bool gc_allowed); + + + // Compute memory operands for safepoint stack slots. + Operand SafepointRegisterSlot(Register reg); + static int SafepointRegisterStackIndex(int reg_code) { + return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1; + } + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; @@ -1050,6 +1068,17 @@ class CodePatcher { }; +// Helper class for generating code or data associated with the code +// right after a call instruction. As an example this can be used to +// generate safepoint data after calls for crankshaft. +class PostCallGenerator { + public: + PostCallGenerator() { } + virtual ~PostCallGenerator() { } + virtual void Generate() = 0; +}; + + // ----------------------------------------------------------------------------- // Static helper functions. @@ -1756,7 +1785,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Handle<Code> code_constant, Register code_register, LabelType* done, - InvokeFlag flag) { + InvokeFlag flag, + PostCallGenerator* post_call_generator) { bool definitely_matches = false; NearLabel invoke; if (expected.is_immediate()) { @@ -1807,6 +1837,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, if (flag == CALL_FUNCTION) { Call(adaptor, RelocInfo::CODE_TARGET); + if (post_call_generator != NULL) post_call_generator->Generate(); jmp(done); } else { Jump(adaptor, RelocInfo::CODE_TARGET); diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 973fece32a..c27e1b8c42 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -2060,8 +2060,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a two-byte string or a symbol. @@ -2076,8 +2077,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2096,8 +2098,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index a7422c2543..e573eb29ba 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -47,29 +47,8 @@ test-serialize/TestThatAlwaysFails: FAIL test-serialize/DependentTestThatAlwaysFails: FAIL ############################################################################## -[ $arch == x64 ] - -# Optimization is currently not working on crankshaft x64 and ARM. -test-heap/TestInternalWeakLists: PASS || FAIL -test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL - - -############################################################################## -[ $arch == x64 && $crankshaft ] - -# Tests that fail with crankshaft. -test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL -test-deoptimization/DeoptimizeLoadICStoreIC: FAIL -test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL -test-deoptimization/DeoptimizeCompare: PASS || FAIL - -############################################################################## [ $arch == arm ] -# Optimization is currently not working on crankshaft x64 and ARM. -test-heap/TestInternalWeakLists: PASS || FAIL -test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL - # We cannot assume that we can throw OutOfMemory exceptions in all situations. # Apparently our ARM box is in such a state. Skip the test as it also runs for # a long time. diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 3de5b92d48..0da3f1cdc7 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -5652,8 +5652,7 @@ TEST(AccessControl) { } -// This is a regression test for issue 1154. -TEST(AccessControlObjectKeys) { +TEST(AccessControlES5) { v8::HandleScope handle_scope; v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); @@ -5677,7 +5676,29 @@ TEST(AccessControlObjectKeys) { v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("other"), global0); + // Regression test for issue 1154. ExpectTrue("Object.keys(other).indexOf('blocked_prop') == -1"); + + ExpectUndefined("other.blocked_prop"); + + // Regression test for issue 1027. + CompileRun("Object.defineProperty(\n" + " other, 'blocked_prop', {configurable: false})"); + ExpectUndefined("other.blocked_prop"); + ExpectUndefined( + "Object.getOwnPropertyDescriptor(other, 'blocked_prop')"); + + // Regression test for issue 1171. + ExpectTrue("Object.isExtensible(other)"); + CompileRun("Object.preventExtensions(other)"); + ExpectTrue("Object.isExtensible(other)"); + + // Object.seal and Object.freeze. + CompileRun("Object.freeze(other)"); + ExpectTrue("Object.isExtensible(other)"); + + CompileRun("Object.seal(other)"); + ExpectTrue("Object.isExtensible(other)"); } @@ -10825,6 +10846,24 @@ THREADED_TEST(PixelArray) { "result"); CHECK_EQ(32640, result->Int32Value()); + // Make sure that pixel array stores are optimized by crankshaft. + result = CompileRun("function pa_init(p) {" + "for (var i = 0; i < 256; ++i) { p[i] = i; }" + "}" + "function pa_load(p) {" + " var sum = 0;" + " for (var i=0; i<256; ++i) {" + " sum += p[i];" + " }" + " return sum; " + "}" + "for (var i = 0; i < 100000; ++i) {" + " pa_init(pixels);" + "}" + "result = pa_load(pixels);" + "result"); + CHECK_EQ(32640, result->Int32Value()); + free(pixel_data); } @@ -10844,6 +10883,53 @@ THREADED_TEST(PixelArrayInfo) { } +static v8::Handle<Value> NotHandledIndexedPropertyGetter( + uint32_t index, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::Handle<Value>(); +} + + +static v8::Handle<Value> NotHandledIndexedPropertySetter( + uint32_t index, + Local<Value> value, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::Handle<Value>(); +} + + +THREADED_TEST(PixelArrayWithInterceptor) { + v8::HandleScope scope; + LocalContext context; + const int kElementCount = 260; + uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount)); + i::Handle<i::PixelArray> pixels = + i::Factory::NewPixelArray(kElementCount, pixel_data); + for (int i = 0; i < kElementCount; i++) { + pixels->set(i, i % 256); + } + v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); + templ->SetIndexedPropertyHandler(NotHandledIndexedPropertyGetter, + NotHandledIndexedPropertySetter); + v8::Handle<v8::Object> obj = templ->NewInstance(); + obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount); + context->Global()->Set(v8_str("pixels"), obj); + v8::Handle<v8::Value> result = CompileRun("pixels[1]"); + CHECK_EQ(1, result->Int32Value()); + result = CompileRun("var sum = 0;" + "for (var i = 0; i < 8; i++) {" + " sum += pixels[i] = pixels[i] = -i;" + "}" + "sum;"); + CHECK_EQ(-28, result->Int32Value()); + result = CompileRun("pixels.hasOwnProperty('1')"); + CHECK(result->BooleanValue()); + free(pixel_data); +} + + static int ExternalArrayElementSize(v8::ExternalArrayType array_type) { switch (array_type) { case v8::kExternalByteArray: diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc index 239d8ae695..7f06bc34d8 100644 --- a/deps/v8/test/cctest/test-cpu-profiler.cc +++ b/deps/v8/test/cctest/test-cpu-profiler.cc @@ -50,7 +50,7 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc, i::Address frame3 = NULL) { i::TickSample* sample = proc->TickSampleEvent(); sample->pc = frame1; - sample->function = frame1; + sample->tos = frame1; sample->frames_count = 0; if (frame2 != NULL) { sample->stack[0] = frame2; @@ -103,7 +103,8 @@ TEST(CodeEvents) { i::Heap::empty_string(), 0, ToAddress(0x1000), - 0x100); + 0x100, + ToAddress(0x10000)); processor.CodeCreateEvent(i::Logger::BUILTIN_TAG, "bbb", ToAddress(0x1200), @@ -116,8 +117,6 @@ TEST(CodeEvents) { processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500)); processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10); processor.CodeDeleteEvent(ToAddress(0x1600)); - processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000), - TokenEnumerator::kNoSecurityToken); // Enqueue a tick event to enable code events processing. EnqueueTickSampleEvent(&processor, ToAddress(0x1000)); @@ -139,9 +138,6 @@ TEST(CodeEvents) { CHECK_NE(NULL, entry4); CHECK_EQ("ddd", entry4->name()); CHECK_EQ(NULL, generator.code_map()->FindEntry(ToAddress(0x1600))); - CodeEntry* entry5 = generator.code_map()->FindEntry(ToAddress(0x1700)); - CHECK_NE(NULL, entry5); - CHECK_EQ(aaa_str, entry5->name()); } diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc index 88fa79b70e..6ea4c849ee 100644 --- a/deps/v8/test/cctest/test-decls.cc +++ b/deps/v8/test/cctest/test-decls.cc @@ -223,7 +223,7 @@ TEST(Unknown) { { DeclarationContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } @@ -278,7 +278,7 @@ TEST(Present) { { PresentPropertyContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } @@ -332,7 +332,7 @@ TEST(Absent) { { AbsentPropertyContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } @@ -422,7 +422,7 @@ TEST(Appearing) { { AppearingPropertyContext context; context.Check("function x() { }; x", 1, // access - 1, // declaration + 0, 0, EXPECT_RESULT); } diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc index c85f6c0bc3..bf72184fb0 100644 --- a/deps/v8/test/cctest/test-log-stack-tracer.cc +++ b/deps/v8/test/cctest/test-log-stack-tracer.cc @@ -33,6 +33,7 @@ #include "v8.h" +#include "api.h" #include "codegen.h" #include "log.h" #include "top.h" @@ -200,16 +201,16 @@ static void InitializeVM() { } -static void CheckJSFunctionAtAddress(const char* func_name, Address addr) { - CHECK(i::Heap::Contains(addr)); - i::Object* obj = i::HeapObject::FromAddress(addr); - CHECK(obj->IsJSFunction()); - CHECK(JSFunction::cast(obj)->shared()->name()->IsString()); - i::SmartPointer<char> found_name = - i::String::cast( - JSFunction::cast( - obj)->shared()->name())->ToCString(); - CHECK_EQ(func_name, *found_name); +static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) { + i::Code* code = function->code(); + return code->contains(addr); +} + +static bool IsAddressWithinFuncCode(const char* func_name, Address addr) { + v8::Local<v8::Value> func = env->Global()->Get(v8_str(func_name)); + CHECK(func->IsFunction()); + JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func)); + return IsAddressWithinFuncCode(js_func, addr); } @@ -309,8 +310,8 @@ TEST(CFromJSStackTrace) { // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace" CHECK_GT(sample.frames_count, base + 1); - CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[base + 0]); - CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 1]); + CHECK(IsAddressWithinFuncCode("JSFuncDoTrace", sample.stack[base + 0])); + CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 1])); } @@ -351,9 +352,6 @@ TEST(PureJSStackTrace) { // DoTraceHideCEntryFPAddress(EBP) [native] // StackTracer::Trace // - // The last JS function called. It is only visible through - // sample.function, as its return address is above captured EBP value. - CheckJSFunctionAtAddress("JSFuncDoTrace", sample.function); // The VM state tracking keeps track of external callbacks and puts // them at the top of the sample stack. @@ -363,8 +361,8 @@ TEST(PureJSStackTrace) { // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace" CHECK_GT(sample.frames_count, base + 1); - CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 0]); - CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[base + 1]); + CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 0])); + CHECK(IsAddressWithinFuncCode("OuterJSTrace", sample.stack[base + 1])); } diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc index 032a183661..30b8a48dbf 100644 --- a/deps/v8/test/cctest/test-log.cc +++ b/deps/v8/test/cctest/test-log.cc @@ -1053,10 +1053,10 @@ static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) { // Skip size. ref_s = strchr(ref_s, ',') + 1; new_s = strchr(new_s, ',') + 1; - int ref_len = StrChrLen(ref_s, '\n'); - int new_len = StrChrLen(new_s, '\n'); - // If reference is anonymous (""), it's OK to have anything in new. - if (ref_len == 2) return true; + CHECK_EQ('"', ref_s[0]); + CHECK_EQ('"', new_s[0]); + int ref_len = StrChrLen(ref_s + 1, '\"'); + int new_len = StrChrLen(new_s + 1, '\"'); // A special case for ErrorPrototype. Haven't yet figured out why they // are different. const char* error_prototype = "\"ErrorPrototype"; @@ -1074,21 +1074,6 @@ static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) { return true; } } - // Code objects can change their optimizability: code object may start - // as optimizable, but later be discovered to be actually not optimizable. - // Alas, we don't record this info as of now, so we allow cases when - // ref is thought to be optimizable while traverse finds it to be - // not optimizable. - if (ref_s[1] == '~') { // Code object used to be optimizable - if (new_s[1] == ' ') { // ...but later was set unoptimizable. - CHECK_EQ('"', ref_s[0]); - CHECK_EQ('"', new_s[0]); - ref_s += 2; // Cut the leading quote and the marker - ref_len -= 2; - new_s += 1; // Cut the leading quote only. - new_len -= 1; - } - } return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0; } diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc index 151cf50a5c..8ee40385a6 100755 --- a/deps/v8/test/cctest/test-parsing.cc +++ b/deps/v8/test/cctest/test-parsing.cc @@ -321,14 +321,17 @@ TEST(Regress928) { data->Initialize(); - int first_function = strstr(program, "function") - program; - int first_lbrace = first_function + strlen("function () "); + int first_function = + static_cast<int>(strstr(program, "function") - program); + int first_lbrace = first_function + static_cast<int>(strlen("function () ")); CHECK_EQ('{', program[first_lbrace]); i::FunctionEntry entry1 = data->GetFunctionEntry(first_lbrace); CHECK(!entry1.is_valid()); - int second_function = strstr(program + first_lbrace, "function") - program; - int second_lbrace = second_function + strlen("function () "); + int second_function = + static_cast<int>(strstr(program + first_lbrace, "function") - program); + int second_lbrace = + second_function + static_cast<int>(strlen("function () ")); CHECK_EQ('{', program[second_lbrace]); i::FunctionEntry entry2 = data->GetFunctionEntry(second_lbrace); CHECK(entry2.is_valid()); diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc index f849d404ae..c60d0720aa 100644 --- a/deps/v8/test/cctest/test-profile-generator.cc +++ b/deps/v8/test/cctest/test-profile-generator.cc @@ -600,13 +600,13 @@ TEST(RecordTickSample) { // -> ccc -> aaa - sample3 TickSample sample1; sample1.pc = ToAddress(0x1600); - sample1.function = ToAddress(0x1500); + sample1.tos = ToAddress(0x1500); sample1.stack[0] = ToAddress(0x1510); sample1.frames_count = 1; generator.RecordTickSample(sample1); TickSample sample2; sample2.pc = ToAddress(0x1925); - sample2.function = ToAddress(0x1900); + sample2.tos = ToAddress(0x1900); sample2.stack[0] = ToAddress(0x1780); sample2.stack[1] = ToAddress(0x10000); // non-existent. sample2.stack[2] = ToAddress(0x1620); @@ -614,7 +614,7 @@ TEST(RecordTickSample) { generator.RecordTickSample(sample2); TickSample sample3; sample3.pc = ToAddress(0x1510); - sample3.function = ToAddress(0x1500); + sample3.tos = ToAddress(0x1500); sample3.stack[0] = ToAddress(0x1910); sample3.stack[1] = ToAddress(0x1610); sample3.frames_count = 2; diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc index 1cbaf2bf66..80910c2564 100644 --- a/deps/v8/test/cctest/test-serialize.cc +++ b/deps/v8/test/cctest/test-serialize.cc @@ -104,7 +104,7 @@ TEST(ExternalReferenceEncoder) { ExternalReferenceEncoder encoder; CHECK_EQ(make_code(BUILTIN, Builtins::ArrayCode), Encode(encoder, Builtins::ArrayCode)); - CHECK_EQ(make_code(RUNTIME_FUNCTION, Runtime::kAbort), + CHECK_EQ(make_code(v8::internal::RUNTIME_FUNCTION, Runtime::kAbort), Encode(encoder, Runtime::kAbort)); CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty), Encode(encoder, IC_Utility(IC::kLoadCallbackProperty))); @@ -142,7 +142,8 @@ TEST(ExternalReferenceDecoder) { CHECK_EQ(AddressOf(Builtins::ArrayCode), decoder.Decode(make_code(BUILTIN, Builtins::ArrayCode))); CHECK_EQ(AddressOf(Runtime::kAbort), - decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort))); + decoder.Decode(make_code(v8::internal::RUNTIME_FUNCTION, + Runtime::kAbort))); CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)), decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty))); ExternalReference keyed_load_function = diff --git a/deps/v8/test/mjsunit/regress/regress-1105.js b/deps/v8/test/mjsunit/compiler/regress-valueof.js index cfe2bd389c..7b29b46a66 100644 --- a/deps/v8/test/mjsunit/regress/regress-1105.js +++ b/deps/v8/test/mjsunit/compiler/regress-valueof.js @@ -25,14 +25,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This should properly catch the exception from the setter triggered -// by the loaded file, and it should not fail an assertion in debug mode. +// Flags: --allow-natives-syntax -__defineSetter__("x", function(){ throw 42; }); +// Test valueof with integer input. +function f(x) { var y = x + 1; return %_ValueOf(y); } -try { - this.eval('function x(){}'); - assertUnreachable(); -} catch (e) { - assertEquals(42, e); -} +for (var i=0; i<100000; i++) f(42); + +assertEquals(43, f(42)); diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js index 558282f52b..fe580f3500 100644 --- a/deps/v8/test/mjsunit/mjsunit.js +++ b/deps/v8/test/mjsunit/mjsunit.js @@ -104,6 +104,13 @@ function deepEquals(a, b) { } +function assertSame(expected, found, name_opt) { + if (found !== expected) { + fail(expected, found, name_opt); + } +} + + function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(expected, found, name_opt); diff --git a/deps/v8/test/mjsunit/regress/regress-1151.js b/deps/v8/test/mjsunit/regress/regress-1151.js index d36126e6e8..8d0bca9057 100644 --- a/deps/v8/test/mjsunit/regress/regress-1151.js +++ b/deps/v8/test/mjsunit/regress/regress-1151.js @@ -30,3 +30,20 @@ __defineSetter__.__proto__ = function() {}; __defineSetter__['prototype'] + +eval.__proto__ = function () { }; +eval['prototype'] = {}; + +// Test that we are compatible with Safari on prototypes set locally and +// on the actual prototype set using __proto__ on objects that has the +// should_have_prototype set to false. +function f() { return 42; } +f.prototype = 43; +__defineGetter__.__proto__ = f; + +// Regression test for not returning undefined. +assertEquals(__defineGetter__.prototype, 43); + +// Regression test for not crashing. +__defineGetter__.prototype = "foo"; +assertEquals(__defineGetter__.prototype, "foo"); diff --git a/deps/v8/test/mjsunit/regress/regress-1166.js b/deps/v8/test/mjsunit/regress/regress-1166.js new file mode 100644 index 0000000000..d75d397eaa --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1166.js @@ -0,0 +1,35 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Deoptimization after a short-circuit logical operation in an effect +// context should not see the value of the expression. +function observe(x, y) { return x; } + +function test(x) { return observe(1, ((false || false), x + 1)); } + +for (var i = 0; i < 10000000; ++i) test(0); +test("a"); diff --git a/deps/v8/test/mjsunit/regress/regress-1167.js b/deps/v8/test/mjsunit/regress/regress-1167.js new file mode 100644 index 0000000000..8437d83bcc --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1167.js @@ -0,0 +1,72 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Deoptimization after a logical not in an effect context should not see a +// value for the logical not expression. +function test0(n) { + var a = new Array(n); + for (var i = 0; i < n; ++i) { + // ~ of a non-numeric value is used to trigger deoptimization. + a[i] = void(!(delete 'object')) % ~(delete 4); + } +} + +// OSR (after deoptimization) is used to observe the stack height mismatch. +for (var i = 0; i < 5; ++i) { + for (var j = 1; j < 12; ++j) { + test0(j * 1000); + } +} + + +// Similar test with a different subexpression of unary !. +function test1(n) { + var a = new Array(n); + for (var i = 0; i < n; ++i) { + a[i] = void(!(- 'object')) % ~(delete 4); + } +} + +for (i = 0; i < 5; ++i) { + for (j = 1; j < 12; ++j) { + test1(j * 1000); + } +} + + +// A similar issue, different subexpression of unary ! (e0 !== e1 is +// translated into !(e0 == e1)) and different effect context. +function side_effect() { } +function observe(x, y) { return x; } +function test2(x) { + return observe(this, + (((side_effect.observe <= side_effect.side_effect) !== false), + x + 1)); +} + +for (var i = 0; i < 1000000; ++i) test2(0); +test2(test2); diff --git a/deps/v8/test/mjsunit/regress/regress-1170.js b/deps/v8/test/mjsunit/regress/regress-1170.js new file mode 100644 index 0000000000..8a5a9cfb19 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1170.js @@ -0,0 +1,66 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var setter_value = 0; + +__proto__.__defineSetter__("a", function(v) { setter_value = v; }); +eval("var a = 1"); +assertEquals(1, setter_value); +assertFalse(hasOwnProperty("a")); + +eval("with({}) { eval('var a = 2') }"); +assertEquals(2, setter_value); +assertFalse(hasOwnProperty("a")); + +// Function declarations are treated specially to match Safari. We do +// not call setters for them. +eval("function a() {}"); +assertTrue(hasOwnProperty("a")); + +__proto__.__defineSetter__("b", function(v) { assertUnreachable(); }); +try { + eval("const b = 23"); + assertUnreachable(); +} catch(e) { + assertTrue(/TypeError/.test(e)); +} +try { + eval("with({}) { eval('const b = 23') }"); + assertUnreachable(); +} catch(e) { + assertTrue(/TypeError/.test(e)); +} + +__proto__.__defineSetter__("c", function(v) { throw 42; }); +try { + eval("var c = 1"); + assertUnreachable(); +} catch(e) { + assertEquals(42, e); + assertFalse(hasOwnProperty("c")); +} + diff --git a/deps/v8/test/mjsunit/regress/regress-1172.js b/deps/v8/test/mjsunit/regress/regress-1172.js new file mode 100644 index 0000000000..f5ef67b86e --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1172.js @@ -0,0 +1,39 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Check that 'constructor' property is forcefully installed on +// function's prototype even in the presence of JS accessors. + +// Note: no setters would lead to runtime exception if we ever attempt +// to use JS accessors to set 'constructor' property. +Object.prototype.__defineGetter__('constructor', function() { throw 42; }); + +function f() {} +assertSame(f, f.prototype.constructor); + +var o = new f(); +assertSame(f, o.constructor); diff --git a/deps/v8/test/mjsunit/regress/regress-1174.js b/deps/v8/test/mjsunit/regress/regress-1174.js new file mode 100644 index 0000000000..7c014bf02f --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1174.js @@ -0,0 +1,43 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Test that we do not crash when doing deoptimization of a function that has +// reloc info that only take up 1 byte per call (like KeyedStoreIC). + +function Regular() { + this[0] >>= 0; + this[1] ^= 1; +} + +function foo() { + var regular = new Regular(); + %DeoptimizeFunction(Regular); +} + +foo(); diff --git a/deps/v8/test/mjsunit/regress/regress-1176.js b/deps/v8/test/mjsunit/regress/regress-1176.js new file mode 100644 index 0000000000..58eda1bf36 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1176.js @@ -0,0 +1,33 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"use strict"; +function strict_delete_this() { + // "delete this" is allowed in strict mode. + delete this; +} +strict_delete_this(); diff --git a/deps/v8/test/mjsunit/regress/regress-1184.js b/deps/v8/test/mjsunit/regress/regress-1184.js new file mode 100644 index 0000000000..0bb1b3c0b9 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1184.js @@ -0,0 +1,47 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test the case when finally clause throws another exception (stack overflow) +// which goes through some try/catch block---we need to clear v8::TryCatch +// catcher as it doesn't catch original exception any more. + +o = {}; +o.__defineGetter__('foo', function() { throw 42; }); +function f() { + try { + // throw below sets up Top::thread_local_.catcher_... + throw 42; + } finally { + // ...JS accessor traverses v8 runtime/JS boundary and + // when coming back from JS to v8 runtime, retraverses + // stack with catcher set while processing exception + // which is not caught by external try catch. + try { o.foo; } catch(e) { }; + return; + } +}; +f(); diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js index fbba64ed66..ab3e535ec3 100644 --- a/deps/v8/test/mjsunit/strict-mode.js +++ b/deps/v8/test/mjsunit/strict-mode.js @@ -291,6 +291,13 @@ CheckStrictMode("function strict() { var variable; delete variable; }", SyntaxError); CheckStrictMode("var variable; delete variable;", SyntaxError); +(function TestStrictDelete() { + "use strict"; + // "delete this" is allowed in strict mode and should work. + function strict_delete() { delete this; } + strict_delete(); +})(); + // Prefix unary operators other than delete, ++, -- are valid in strict mode (function StrictModeUnaryOperators() { "use strict"; @@ -438,7 +445,7 @@ repeat(10, function() { testAssignToUndefined(false); }); })(); // Not transforming this in Function.call and Function.apply. -(function testThisTransform() { +(function testThisTransformCallApply() { function non_strict() { return this; } @@ -478,3 +485,218 @@ repeat(10, function() { testAssignToUndefined(false); }); assertEquals(typeof strict.apply("Hello"), "string"); assertTrue(strict.apply(object) === object); })(); + +(function testThisTransform() { + try { + function strict() { + "use strict"; + return typeof(this); + } + function nonstrict() { + return typeof(this); + } + + // Concat to avoid symbol. + var strict_name = "str" + "ict"; + var nonstrict_name = "non" + "str" + "ict"; + var strict_number = 17; + var nonstrict_number = 19; + var strict_name_get = "str" + "ict" + "get"; + var nonstrict_name_get = "non" + "str" + "ict" + "get" + var strict_number_get = 23; + var nonstrict_number_get = 29; + + function install(t) { + t.prototype.strict = strict; + t.prototype.nonstrict = nonstrict; + t.prototype[strict_number] = strict; + t.prototype[nonstrict_number] = nonstrict; + Object.defineProperty(t.prototype, strict_name_get, + { get: function() { return strict; }, + configurable: true }); + Object.defineProperty(t.prototype, nonstrict_name_get, + { get: function() { return nonstrict; }, + configurable: true }); + Object.defineProperty(t.prototype, strict_number_get, + { get: function() { return strict; }, + configurable: true }); + Object.defineProperty(t.prototype, nonstrict_number_get, + { get: function() { return nonstrict; }, + configurable: true }); + } + + function cleanup(t) { + delete t.prototype.strict; + delete t.prototype.nonstrict; + delete t.prototype[strict_number]; + delete t.prototype[nonstrict_number]; + delete t.prototype[strict_name_get]; + delete t.prototype[nonstrict_name_get]; + delete t.prototype[strict_number_get]; + delete t.prototype[nonstrict_number_get]; + } + + // Set up fakes + install(String); + install(Number); + install(Boolean) + + function callStrict(o) { + return o.strict(); + } + function callNonStrict(o) { + return o.nonstrict(); + } + function callKeyedStrict(o) { + return o[strict_name](); + } + function callKeyedNonStrict(o) { + return o[nonstrict_name](); + } + function callIndexedStrict(o) { + return o[strict_number](); + } + function callIndexedNonStrict(o) { + return o[nonstrict_number](); + } + function callStrictGet(o) { + return o.strictget(); + } + function callNonStrictGet(o) { + return o.nonstrictget(); + } + function callKeyedStrictGet(o) { + return o[strict_name_get](); + } + function callKeyedNonStrictGet(o) { + return o[nonstrict_name_get](); + } + function callIndexedStrictGet(o) { + return o[strict_number_get](); + } + function callIndexedNonStrictGet(o) { + return o[nonstrict_number_get](); + } + + for (var i = 0; i < 10; i ++) { + assertEquals(("hello").strict(), "string"); + assertEquals(("hello").nonstrict(), "object"); + assertEquals(("hello")[strict_name](), "string"); + assertEquals(("hello")[nonstrict_name](), "object"); + assertEquals(("hello")[strict_number](), "string"); + assertEquals(("hello")[nonstrict_number](), "object"); + + assertEquals((10 + i).strict(), "number"); + assertEquals((10 + i).nonstrict(), "object"); + assertEquals((10 + i)[strict_name](), "number"); + assertEquals((10 + i)[nonstrict_name](), "object"); + assertEquals((10 + i)[strict_number](), "number"); + assertEquals((10 + i)[nonstrict_number](), "object"); + + assertEquals((true).strict(), "boolean"); + assertEquals((true).nonstrict(), "object"); + assertEquals((true)[strict_name](), "boolean"); + assertEquals((true)[nonstrict_name](), "object"); + assertEquals((true)[strict_number](), "boolean"); + assertEquals((true)[nonstrict_number](), "object"); + + assertEquals((false).strict(), "boolean"); + assertEquals((false).nonstrict(), "object"); + assertEquals((false)[strict_name](), "boolean"); + assertEquals((false)[nonstrict_name](), "object"); + assertEquals((false)[strict_number](), "boolean"); + assertEquals((false)[nonstrict_number](), "object"); + + assertEquals(callStrict("howdy"), "string"); + assertEquals(callNonStrict("howdy"), "object"); + assertEquals(callKeyedStrict("howdy"), "string"); + assertEquals(callKeyedNonStrict("howdy"), "object"); + assertEquals(callIndexedStrict("howdy"), "string"); + assertEquals(callIndexedNonStrict("howdy"), "object"); + + assertEquals(callStrict(17 + i), "number"); + assertEquals(callNonStrict(17 + i), "object"); + assertEquals(callKeyedStrict(17 + i), "number"); + assertEquals(callKeyedNonStrict(17 + i), "object"); + assertEquals(callIndexedStrict(17 + i), "number"); + assertEquals(callIndexedNonStrict(17 + i), "object"); + + assertEquals(callStrict(true), "boolean"); + assertEquals(callNonStrict(true), "object"); + assertEquals(callKeyedStrict(true), "boolean"); + assertEquals(callKeyedNonStrict(true), "object"); + assertEquals(callIndexedStrict(true), "boolean"); + assertEquals(callIndexedNonStrict(true), "object"); + + assertEquals(callStrict(false), "boolean"); + assertEquals(callNonStrict(false), "object"); + assertEquals(callKeyedStrict(false), "boolean"); + assertEquals(callKeyedNonStrict(false), "object"); + assertEquals(callIndexedStrict(false), "boolean"); + assertEquals(callIndexedNonStrict(false), "object"); + + // All of the above, with getters + assertEquals(("hello").strictget(), "string"); + assertEquals(("hello").nonstrictget(), "object"); + assertEquals(("hello")[strict_name_get](), "string"); + assertEquals(("hello")[nonstrict_name_get](), "object"); + assertEquals(("hello")[strict_number_get](), "string"); + assertEquals(("hello")[nonstrict_number_get](), "object"); + + assertEquals((10 + i).strictget(), "number"); + assertEquals((10 + i).nonstrictget(), "object"); + assertEquals((10 + i)[strict_name_get](), "number"); + assertEquals((10 + i)[nonstrict_name_get](), "object"); + assertEquals((10 + i)[strict_number_get](), "number"); + assertEquals((10 + i)[nonstrict_number_get](), "object"); + + assertEquals((true).strictget(), "boolean"); + assertEquals((true).nonstrictget(), "object"); + assertEquals((true)[strict_name_get](), "boolean"); + assertEquals((true)[nonstrict_name_get](), "object"); + assertEquals((true)[strict_number_get](), "boolean"); + assertEquals((true)[nonstrict_number_get](), "object"); + + assertEquals((false).strictget(), "boolean"); + assertEquals((false).nonstrictget(), "object"); + assertEquals((false)[strict_name_get](), "boolean"); + assertEquals((false)[nonstrict_name_get](), "object"); + assertEquals((false)[strict_number_get](), "boolean"); + assertEquals((false)[nonstrict_number_get](), "object"); + + assertEquals(callStrictGet("howdy"), "string"); + assertEquals(callNonStrictGet("howdy"), "object"); + assertEquals(callKeyedStrictGet("howdy"), "string"); + assertEquals(callKeyedNonStrictGet("howdy"), "object"); + assertEquals(callIndexedStrictGet("howdy"), "string"); + assertEquals(callIndexedNonStrictGet("howdy"), "object"); + + assertEquals(callStrictGet(17 + i), "number"); + assertEquals(callNonStrictGet(17 + i), "object"); + assertEquals(callKeyedStrictGet(17 + i), "number"); + assertEquals(callKeyedNonStrictGet(17 + i), "object"); + assertEquals(callIndexedStrictGet(17 + i), "number"); + assertEquals(callIndexedNonStrictGet(17 + i), "object"); + + assertEquals(callStrictGet(true), "boolean"); + assertEquals(callNonStrictGet(true), "object"); + assertEquals(callKeyedStrictGet(true), "boolean"); + assertEquals(callKeyedNonStrictGet(true), "object"); + assertEquals(callIndexedStrictGet(true), "boolean"); + assertEquals(callIndexedNonStrictGet(true), "object"); + + assertEquals(callStrictGet(false), "boolean"); + assertEquals(callNonStrictGet(false), "object"); + assertEquals(callKeyedStrictGet(false), "boolean"); + assertEquals(callKeyedNonStrictGet(false), "object"); + assertEquals(callIndexedStrictGet(false), "boolean"); + assertEquals(callIndexedNonStrictGet(false), "object"); + + } + } finally { + // Cleanup + cleanup(String); + cleanup(Number); + cleanup(Boolean); + } +})(); diff --git a/deps/v8/test/sputnik/README b/deps/v8/test/sputnik/README index 94c689bd28..50d721f36e 100644 --- a/deps/v8/test/sputnik/README +++ b/deps/v8/test/sputnik/README @@ -1,6 +1,6 @@ To run the sputniktests you must check out the test suite from googlecode.com. The test expectations are currently relative to -version 28. To get the tests run the following command within +version 94. To get the tests run the following command within v8/test/sputnik/ - svn co http://sputniktests.googlecode.com/svn/trunk/ -r28 sputniktests + svn co http://sputniktests.googlecode.com/svn/trunk/ -r94 sputniktests diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status index 966500d069..6da87eac95 100644 --- a/deps/v8/test/sputnik/sputnik.status +++ b/deps/v8/test/sputnik/sputnik.status @@ -102,33 +102,20 @@ S7.8.4_A4.3_T5: FAIL_OK S7.8.4_A7.2_T5: FAIL_OK # We allow some keywords to be used as identifiers -S7.5.3_A1.17: FAIL_OK S7.5.3_A1.26: FAIL_OK S7.5.3_A1.18: FAIL_OK S7.5.3_A1.27: FAIL_OK -S7.5.3_A1.28: FAIL_OK -S7.5.3_A1.19: FAIL_OK -S7.5.3_A1.29: FAIL_OK -S7.5.3_A1.1: FAIL_OK -S7.5.3_A1.2: FAIL_OK -S7.5.3_A1.3: FAIL_OK -S7.5.3_A1.4: FAIL_OK S7.5.3_A1.5: FAIL_OK -S7.5.3_A1.8: FAIL_OK S7.5.3_A1.9: FAIL_OK S7.5.3_A1.10: FAIL_OK S7.5.3_A1.11: FAIL_OK +# native +S7.5.3_A1.20: FAIL_OK S7.5.3_A1.21: FAIL_OK -S7.5.3_A1.12: FAIL_OK -S7.5.3_A1.30: FAIL_OK -S7.5.3_A1.31: FAIL_OK -S7.5.3_A1.13: FAIL_OK S7.5.3_A1.22: FAIL_OK S7.5.3_A1.23: FAIL_OK -S7.5.3_A1.14: FAIL_OK S7.5.3_A1.15: FAIL_OK S7.5.3_A1.24: FAIL_OK -S7.5.3_A1.25: FAIL_OK S7.5.3_A1.16: FAIL_OK # This checks for non-262 behavior @@ -199,10 +186,40 @@ S9.9_A2: FAIL_OK S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug +# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1196 +S8.7_A5_T2: FAIL + +# V8 bugs: http://code.google.com/p/v8/issues/detail?id=1198 +# V8 should not wrap this when calling builtin functions +S15.2.4.3_A12: FAIL +S15.2.4.7_A13: FAIL +# Object.prototype.toString +S15.2.4.2_A12: FAIL +S15.2.4.2_A13: FAIL +# Object.prototype.toLocaleString +S15.2.4.3_A13: FAIL +S15.2.4.4_A13: FAIL +S15.2.4.4_A12: FAIL +# Object.prototype.propertyIsEnumerable +S15.2.4.7_A12: FAIL +# Object.prototype.hasOwnProperty +S15.2.4.5_A12: FAIL +S15.2.4.5_A13: FAIL +# Object.prototype.isPrototypeOf +S15.2.4.6_A13: FAIL +S15.2.4.6_A12: FAIL + +# Invalid test case (recent change adding var changes semantics) +S8.3_A1_T1: FAIL +# Test bug: http://code.google.com/p/sputniktests/issues/detail?id=35 +S15.5.4.8_A1_T1: FAIL +# Invalid test case (recent change adding var changes semantics) +S15.3_A3_T1: FAIL +# Invalid test case (recent change adding var changes semantics) +S15.3_A3_T3: FAIL # These tests fail because we had to add bugs to be compatible with JSC. See # http://code.google.com/p/chromium/issues/detail?id=1717 -S15.4.4_A1.1_T2: FAIL_OK S15.5.4.1_A1_T2: FAIL_OK S15.5.4_A1: FAIL_OK S15.5.4_A3: FAIL_OK diff --git a/deps/v8/test/sputnik/testcfg.py b/deps/v8/test/sputnik/testcfg.py index f7a5edcca6..31e4b226f8 100644 --- a/deps/v8/test/sputnik/testcfg.py +++ b/deps/v8/test/sputnik/testcfg.py @@ -88,7 +88,8 @@ class SputnikTestConfiguration(test.TestConfiguration): sys.path.append(modroot) import sputnik globals()['sputnik'] = sputnik - test_suite = sputnik.TestSuite(testroot) + # Do not run strict mode tests yet. TODO(mmaly) + test_suite = sputnik.TestSuite(testroot, False) test_suite.Validate() tests = test_suite.EnumerateTests([]) result = [] diff --git a/deps/v8/tools/disasm.py b/deps/v8/tools/disasm.py new file mode 100644 index 0000000000..c326382dfb --- /dev/null +++ b/deps/v8/tools/disasm.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +# +# Copyright 2011 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import subprocess +import tempfile + + +# Avoid using the slow (google-specific) wrapper around objdump. +OBJDUMP_BIN = "/usr/bin/objdump" +if not os.path.exists(OBJDUMP_BIN): + OBJDUMP_BIN = "objdump" + + +_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"] + +_DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$") +_DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)") + +# Keys must match constants in Logger::LogCodeInfo. +_ARCH_MAP = { + "ia32": "-m i386", + "x64": "-m i386 -M x86-64", + "arm": "-m arm" # Not supported by our objdump build. +} + + +def GetDisasmLines(filename, offset, size, arch, inplace): + tmp_name = None + if not inplace: + # Create a temporary file containing a copy of the code. + assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch + arch_flags = _ARCH_MAP[arch] + tmp_name = tempfile.mktemp(".v8code") + command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \ + "%s %s -D -b binary %s %s" % ( + filename, tmp_name, size, offset, + OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags, + tmp_name) + else: + command = "%s %s --start-address=%d --stop-address=%d -d %s " % ( + OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), + offset, + offset + size, + filename) + process = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = process.communicate() + lines = out.split("\n") + header_line = 0 + for i, line in enumerate(lines): + if _DISASM_HEADER_RE.match(line): + header_line = i + break + if tmp_name: + os.unlink(tmp_name) + split_lines = [] + for line in lines[header_line + 1:]: + match = _DISASM_LINE_RE.match(line) + if match: + line_address = int(match.group(1), 16) + split_lines.append((line_address, match.group(2))) + return split_lines diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py new file mode 100755 index 0000000000..de681b2b4d --- /dev/null +++ b/deps/v8/tools/grokdump.py @@ -0,0 +1,840 @@ +#!/usr/bin/env python +# +# Copyright 2011 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import ctypes +import mmap +import optparse +import os +import disasm +import sys +import types +import codecs +import re + + +USAGE="""usage: %prog [OPTION]... + +Minidump analyzer. + +Shows the processor state at the point of exception including the +stack of the active thread and the referenced objects in the V8 +heap. Code objects are disassembled and the addresses linked from the +stack (pushed return addresses) are marked with "=>". + + +Examples: + $ %prog 12345678-1234-1234-1234-123456789abcd-full.dmp +""" + +DEBUG=False + + +def DebugPrint(s): + if not DEBUG: return + print s + + +class Descriptor(object): + """Descriptor of a structure in a memory.""" + + def __init__(self, fields): + self.fields = fields + self.is_flexible = False + for _, type_or_func in fields: + if isinstance(type_or_func, types.FunctionType): + self.is_flexible = True + break + if not self.is_flexible: + self.ctype = Descriptor._GetCtype(fields) + self.size = ctypes.sizeof(self.ctype) + + def Read(self, memory, offset): + if self.is_flexible: + fields_copy = self.fields[:] + last = 0 + for name, type_or_func in fields_copy: + if isinstance(type_or_func, types.FunctionType): + partial_ctype = Descriptor._GetCtype(fields_copy[:last]) + partial_object = partial_ctype.from_buffer(memory, offset) + type = type_or_func(partial_object) + if type is not None: + fields_copy[last] = (name, type) + last += 1 + else: + last += 1 + complete_ctype = Descriptor._GetCtype(fields_copy[:last]) + else: + complete_ctype = self.ctype + return complete_ctype.from_buffer(memory, offset) + + @staticmethod + def _GetCtype(fields): + class Raw(ctypes.Structure): + _fields_ = fields + _pack_ = 1 + + def __str__(self): + return "{" + ", ".join("%s: %s" % (field, self.__getattribute__(field)) + for field, _ in Raw._fields_) + "}" + return Raw + + +# Set of structures and constants that describe the layout of minidump +# files. Based on MSDN and Google Breakpad. + +MINIDUMP_HEADER = Descriptor([ + ("signature", ctypes.c_uint32), + ("version", ctypes.c_uint32), + ("stream_count", ctypes.c_uint32), + ("stream_directories_rva", ctypes.c_uint32), + ("checksum", ctypes.c_uint32), + ("time_date_stampt", ctypes.c_uint32), + ("flags", ctypes.c_uint64) +]) + +MINIDUMP_LOCATION_DESCRIPTOR = Descriptor([ + ("data_size", ctypes.c_uint32), + ("rva", ctypes.c_uint32) +]) + +MINIDUMP_DIRECTORY = Descriptor([ + ("stream_type", ctypes.c_uint32), + ("location", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +MD_EXCEPTION_MAXIMUM_PARAMETERS = 15 + +MINIDUMP_EXCEPTION = Descriptor([ + ("code", ctypes.c_uint32), + ("flags", ctypes.c_uint32), + ("record", ctypes.c_uint64), + ("address", ctypes.c_uint64), + ("parameter_count", ctypes.c_uint32), + ("unused_alignment", ctypes.c_uint32), + ("information", ctypes.c_uint64 * MD_EXCEPTION_MAXIMUM_PARAMETERS) +]) + +MINIDUMP_EXCEPTION_STREAM = Descriptor([ + ("thread_id", ctypes.c_uint32), + ("unused_alignment", ctypes.c_uint32), + ("exception", MINIDUMP_EXCEPTION.ctype), + ("thread_context", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +# Stream types. +MD_UNUSED_STREAM = 0 +MD_RESERVED_STREAM_0 = 1 +MD_RESERVED_STREAM_1 = 2 +MD_THREAD_LIST_STREAM = 3 +MD_MODULE_LIST_STREAM = 4 +MD_MEMORY_LIST_STREAM = 5 +MD_EXCEPTION_STREAM = 6 +MD_SYSTEM_INFO_STREAM = 7 +MD_THREAD_EX_LIST_STREAM = 8 +MD_MEMORY_64_LIST_STREAM = 9 +MD_COMMENT_STREAM_A = 10 +MD_COMMENT_STREAM_W = 11 +MD_HANDLE_DATA_STREAM = 12 +MD_FUNCTION_TABLE_STREAM = 13 +MD_UNLOADED_MODULE_LIST_STREAM = 14 +MD_MISC_INFO_STREAM = 15 +MD_MEMORY_INFO_LIST_STREAM = 16 +MD_THREAD_INFO_LIST_STREAM = 17 +MD_HANDLE_OPERATION_LIST_STREAM = 18 + +MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE = 80 + +MINIDUMP_FLOATING_SAVE_AREA_X86 = Descriptor([ + ("control_word", ctypes.c_uint32), + ("status_word", ctypes.c_uint32), + ("tag_word", ctypes.c_uint32), + ("error_offset", ctypes.c_uint32), + ("error_selector", ctypes.c_uint32), + ("data_offset", ctypes.c_uint32), + ("data_selector", ctypes.c_uint32), + ("register_area", ctypes.c_uint8 * MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE), + ("cr0_npx_state", ctypes.c_uint32) +]) + +MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE = 512 + +# Context flags. +MD_CONTEXT_X86 = 0x00010000 +MD_CONTEXT_X86_CONTROL = (MD_CONTEXT_X86 | 0x00000001) +MD_CONTEXT_X86_INTEGER = (MD_CONTEXT_X86 | 0x00000002) +MD_CONTEXT_X86_SEGMENTS = (MD_CONTEXT_X86 | 0x00000004) +MD_CONTEXT_X86_FLOATING_POINT = (MD_CONTEXT_X86 | 0x00000008) +MD_CONTEXT_X86_DEBUG_REGISTERS = (MD_CONTEXT_X86 | 0x00000010) +MD_CONTEXT_X86_EXTENDED_REGISTERS = (MD_CONTEXT_X86 | 0x00000020) + +def EnableOnFlag(type, flag): + return lambda o: [None, type][int((o.context_flags & flag) != 0)] + +MINIDUMP_CONTEXT_X86 = Descriptor([ + ("context_flags", ctypes.c_uint32), + # MD_CONTEXT_X86_DEBUG_REGISTERS. + ("dr0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + ("dr7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)), + # MD_CONTEXT_X86_FLOATING_POINT. + ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_X86.ctype, + MD_CONTEXT_X86_FLOATING_POINT)), + # MD_CONTEXT_X86_SEGMENTS. + ("gs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + ("fs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + ("es", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + ("ds", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)), + # MD_CONTEXT_X86_INTEGER. + ("edi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("esi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("ebx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("edx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("ecx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + ("eax", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)), + # MD_CONTEXT_X86_CONTROL. + ("ebp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("eip", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("cs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("esp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + ("ss", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)), + # MD_CONTEXT_X86_EXTENDED_REGISTERS. + ("extended_registers", + EnableOnFlag(ctypes.c_uint8 * MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE, + MD_CONTEXT_X86_EXTENDED_REGISTERS)) +]) + +MINIDUMP_MEMORY_DESCRIPTOR = Descriptor([ + ("start", ctypes.c_uint64), + ("memory", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +MINIDUMP_MEMORY_DESCRIPTOR64 = Descriptor([ + ("start", ctypes.c_uint64), + ("size", ctypes.c_uint64) +]) + +MINIDUMP_MEMORY_LIST = Descriptor([ + ("range_count", ctypes.c_uint32), + ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count) +]) + +MINIDUMP_MEMORY_LIST64 = Descriptor([ + ("range_count", ctypes.c_uint64), + ("base_rva", ctypes.c_uint64), + ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR64.ctype * m.range_count) +]) + +MINIDUMP_THREAD = Descriptor([ + ("id", ctypes.c_uint32), + ("suspend_count", ctypes.c_uint32), + ("priority_class", ctypes.c_uint32), + ("priority", ctypes.c_uint32), + ("ted", ctypes.c_uint64), + ("stack", MINIDUMP_MEMORY_DESCRIPTOR.ctype), + ("context", MINIDUMP_LOCATION_DESCRIPTOR.ctype) +]) + +MINIDUMP_THREAD_LIST = Descriptor([ + ("thread_count", ctypes.c_uint32), + ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count) +]) + + +class MinidumpReader(object): + """Minidump (.dmp) reader.""" + + _HEADER_MAGIC = 0x504d444d + + def __init__(self, options, minidump_name): + self.minidump_name = minidump_name + self.minidump_file = open(minidump_name, "r") + self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE) + self.header = MINIDUMP_HEADER.Read(self.minidump, 0) + if self.header.signature != MinidumpReader._HEADER_MAGIC: + print >>sys.stderr, "Warning: unsupported minidump header magic" + DebugPrint(self.header) + directories = [] + offset = self.header.stream_directories_rva + for _ in xrange(self.header.stream_count): + directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset)) + offset += MINIDUMP_DIRECTORY.size + self.exception = None + self.exception_context = None + self.memory_list = None + self.thread_map = {} + for d in directories: + DebugPrint(d) + # TODO(vitalyr): extract system info including CPU features. + if d.stream_type == MD_EXCEPTION_STREAM: + self.exception = MINIDUMP_EXCEPTION_STREAM.Read( + self.minidump, d.location.rva) + DebugPrint(self.exception) + self.exception_context = MINIDUMP_CONTEXT_X86.Read( + self.minidump, self.exception.thread_context.rva) + DebugPrint(self.exception_context) + elif d.stream_type == MD_THREAD_LIST_STREAM: + thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva) + assert ctypes.sizeof(thread_list) == d.location.data_size + DebugPrint(thread_list) + for thread in thread_list.threads: + DebugPrint(thread) + self.thread_map[thread.id] = thread + elif d.stream_type == MD_MEMORY_LIST_STREAM: + print >>sys.stderr, "Warning: not a full minidump" + ml = MINIDUMP_MEMORY_LIST.Read(self.minidump, d.location.rva) + DebugPrint(ml) + for m in ml.ranges: + DebugPrint(m) + elif d.stream_type == MD_MEMORY_64_LIST_STREAM: + assert self.memory_list is None + self.memory_list = MINIDUMP_MEMORY_LIST64.Read( + self.minidump, d.location.rva) + assert ctypes.sizeof(self.memory_list) == d.location.data_size + DebugPrint(self.memory_list) + + def IsValidAddress(self, address): + return self.FindLocation(address) is not None + + def ReadU8(self, address): + location = self.FindLocation(address) + return ctypes.c_uint8.from_buffer(self.minidump, location).value + + def ReadU32(self, address): + location = self.FindLocation(address) + return ctypes.c_uint32.from_buffer(self.minidump, location).value + + def ReadBytes(self, address, size): + location = self.FindLocation(address) + return self.minidump[location:location + size] + + def FindLocation(self, address): + # TODO(vitalyr): only works for full minidumps (...64 structure variants). + offset = 0 + for r in self.memory_list.ranges: + if r.start <= address < r.start + r.size: + return self.memory_list.base_rva + offset + address - r.start + offset += r.size + return None + + def GetDisasmLines(self, address, size): + location = self.FindLocation(address) + if location is None: return [] + return disasm.GetDisasmLines(self.minidump_name, + location, + size, + "ia32", + False) + + + def Dispose(self): + self.minidump.close() + self.minidump_file.close() + + +# List of V8 instance types. Obtained by adding the code below to any .cc file. +# +# #define DUMP_TYPE(T) printf("%d: \"%s\",\n", T, #T); +# struct P { +# P() { +# printf("{\n"); +# INSTANCE_TYPE_LIST(DUMP_TYPE) +# printf("}\n"); +# } +# }; +# static P p; +INSTANCE_TYPES = { + 64: "SYMBOL_TYPE", + 68: "ASCII_SYMBOL_TYPE", + 65: "CONS_SYMBOL_TYPE", + 69: "CONS_ASCII_SYMBOL_TYPE", + 66: "EXTERNAL_SYMBOL_TYPE", + 74: "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE", + 70: "EXTERNAL_ASCII_SYMBOL_TYPE", + 0: "STRING_TYPE", + 4: "ASCII_STRING_TYPE", + 1: "CONS_STRING_TYPE", + 5: "CONS_ASCII_STRING_TYPE", + 2: "EXTERNAL_STRING_TYPE", + 10: "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE", + 6: "EXTERNAL_ASCII_STRING_TYPE", + 6: "PRIVATE_EXTERNAL_ASCII_STRING_TYPE", + 128: "MAP_TYPE", + 129: "CODE_TYPE", + 130: "ODDBALL_TYPE", + 131: "JS_GLOBAL_PROPERTY_CELL_TYPE", + 132: "HEAP_NUMBER_TYPE", + 133: "PROXY_TYPE", + 134: "BYTE_ARRAY_TYPE", + 135: "PIXEL_ARRAY_TYPE", + 136: "EXTERNAL_BYTE_ARRAY_TYPE", + 137: "EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE", + 138: "EXTERNAL_SHORT_ARRAY_TYPE", + 139: "EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE", + 140: "EXTERNAL_INT_ARRAY_TYPE", + 141: "EXTERNAL_UNSIGNED_INT_ARRAY_TYPE", + 142: "EXTERNAL_FLOAT_ARRAY_TYPE", + 143: "FILLER_TYPE", + 144: "ACCESSOR_INFO_TYPE", + 145: "ACCESS_CHECK_INFO_TYPE", + 146: "INTERCEPTOR_INFO_TYPE", + 147: "CALL_HANDLER_INFO_TYPE", + 148: "FUNCTION_TEMPLATE_INFO_TYPE", + 149: "OBJECT_TEMPLATE_INFO_TYPE", + 150: "SIGNATURE_INFO_TYPE", + 151: "TYPE_SWITCH_INFO_TYPE", + 152: "SCRIPT_TYPE", + 153: "CODE_CACHE_TYPE", + 156: "FIXED_ARRAY_TYPE", + 157: "SHARED_FUNCTION_INFO_TYPE", + 158: "JS_MESSAGE_OBJECT_TYPE", + 159: "JS_VALUE_TYPE", + 160: "JS_OBJECT_TYPE", + 161: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 162: "JS_GLOBAL_OBJECT_TYPE", + 163: "JS_BUILTINS_OBJECT_TYPE", + 164: "JS_GLOBAL_PROXY_TYPE", + 165: "JS_ARRAY_TYPE", + 166: "JS_REGEXP_TYPE", + 167: "JS_FUNCTION_TYPE", + 154: "DEBUG_INFO_TYPE", + 155: "BREAK_POINT_INFO_TYPE", +} + + +class Printer(object): + """Printer with indentation support.""" + + def __init__(self): + self.indent = 0 + + def Indent(self): + self.indent += 2 + + def Dedent(self): + self.indent -= 2 + + def Print(self, string): + print "%s%s" % (self._IndentString(), string) + + def PrintLines(self, lines): + indent = self._IndentString() + print "\n".join("%s%s" % (indent, line) for line in lines) + + def _IndentString(self): + return self.indent * " " + + +ADDRESS_RE = re.compile(r"0x[0-9a-fA-F]+") + + +def FormatDisasmLine(start, heap, line): + line_address = start + line[0] + stack_slot = heap.stack_map.get(line_address) + marker = " " + if stack_slot: + marker = "=>" + code = AnnotateAddresses(heap, line[1]) + return "%s%08x %08x: %s" % (marker, line_address, line[0], code) + + +def AnnotateAddresses(heap, line): + extra = [] + for m in ADDRESS_RE.finditer(line): + maybe_address = int(m.group(0), 16) + object = heap.FindObject(maybe_address) + if not object: continue + extra.append(str(object)) + if len(extra) == 0: return line + return "%s ;; %s" % (line, ", ".join(extra)) + + +class HeapObject(object): + def __init__(self, heap, map, address): + self.heap = heap + self.map = map + self.address = address + + def Is(self, cls): + return isinstance(self, cls) + + def Print(self, p): + p.Print(str(self)) + + def __str__(self): + return "HeapObject(%08x, %s)" % (self.address, + INSTANCE_TYPES[self.map.instance_type]) + + def ObjectField(self, offset): + field_value = self.heap.reader.ReadU32(self.address + offset) + return self.heap.FindObjectOrSmi(field_value) + + def SmiField(self, offset): + field_value = self.heap.reader.ReadU32(self.address + offset) + assert (field_value & 1) == 0 + return field_value / 2 + + +class Map(HeapObject): + INSTANCE_TYPE_OFFSET = 8 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.instance_type = \ + heap.reader.ReadU8(self.address + Map.INSTANCE_TYPE_OFFSET) + + +class String(HeapObject): + LENGTH_OFFSET = 4 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.length = self.SmiField(String.LENGTH_OFFSET) + + def GetChars(self): + return "?string?" + + def Print(self, p): + p.Print(str(self)) + + def __str__(self): + return "\"%s\"" % self.GetChars() + + +class SeqString(String): + CHARS_OFFSET = 12 + + def __init__(self, heap, map, address): + String.__init__(self, heap, map, address) + self.chars = heap.reader.ReadBytes(self.address + SeqString.CHARS_OFFSET, + self.length) + + def GetChars(self): + return self.chars + + +class ExternalString(String): + RESOURCE_OFFSET = 12 + + WEBKIT_RESOUCE_STRING_IMPL_OFFSET = 4 + WEBKIT_STRING_IMPL_CHARS_OFFSET = 8 + + def __init__(self, heap, map, address): + String.__init__(self, heap, map, address) + reader = heap.reader + self.resource = \ + reader.ReadU32(self.address + ExternalString.RESOURCE_OFFSET) + self.chars = "?external string?" + if not reader.IsValidAddress(self.resource): return + string_impl_address = self.resource + \ + ExternalString.WEBKIT_RESOUCE_STRING_IMPL_OFFSET + if not reader.IsValidAddress(string_impl_address): return + string_impl = reader.ReadU32(string_impl_address) + chars_ptr_address = string_impl + \ + ExternalString.WEBKIT_STRING_IMPL_CHARS_OFFSET + if not reader.IsValidAddress(chars_ptr_address): return + chars_ptr = reader.ReadU32(chars_ptr_address) + if not reader.IsValidAddress(chars_ptr): return + raw_chars = reader.ReadBytes(chars_ptr, 2 * self.length) + self.chars = codecs.getdecoder("utf16")(raw_chars)[0] + + def GetChars(self): + return self.chars + + +class ConsString(String): + LEFT_OFFSET = 12 + RIGHT_OFFSET = 16 + + def __init__(self, heap, map, address): + String.__init__(self, heap, map, address) + self.left = self.ObjectField(ConsString.LEFT_OFFSET) + self.right = self.ObjectField(ConsString.RIGHT_OFFSET) + + def GetChars(self): + return self.left.GetChars() + self.right.GetChars() + + +class Oddball(HeapObject): + TO_STRING_OFFSET = 4 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.to_string = self.ObjectField(Oddball.TO_STRING_OFFSET) + + def Print(self, p): + p.Print(str(self)) + + def __str__(self): + return "<%s>" % self.to_string.GetChars() + + +class FixedArray(HeapObject): + LENGTH_OFFSET = 4 + ELEMENTS_OFFSET = 8 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.length = self.SmiField(FixedArray.LENGTH_OFFSET) + + def Print(self, p): + p.Print("FixedArray(%08x) {" % self.address) + p.Indent() + p.Print("length: %d" % self.length) + for i in xrange(self.length): + offset = FixedArray.ELEMENTS_OFFSET + 4 * i + p.Print("[%08d] = %s" % (i, self.ObjectField(offset))) + p.Dedent() + p.Print("}") + + def __str__(self): + return "FixedArray(%08x, length=%d)" % (self.address, self.length) + + +class JSFunction(HeapObject): + CODE_ENTRY_OFFSET = 12 + SHARED_OFFSET = 20 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + code_entry = \ + heap.reader.ReadU32(self.address + JSFunction.CODE_ENTRY_OFFSET) + self.code = heap.FindObject(code_entry - Code.ENTRY_OFFSET + 1) + self.shared = self.ObjectField(JSFunction.SHARED_OFFSET) + + def Print(self, p): + source = "\n".join(" %s" % line for line in self._GetSource().split("\n")) + p.Print("JSFunction(%08x) {" % self.address) + p.Indent() + p.Print("inferred name: %s" % self.shared.inferred_name) + if self.shared.script.Is(Script) and self.shared.script.name.Is(String): + p.Print("script name: %s" % self.shared.script.name) + p.Print("source:") + p.PrintLines(self._GetSource().split("\n")) + p.Print("code:") + self.code.Print(p) + if self.code != self.shared.code: + p.Print("unoptimized code:") + self.shared.code.Print(p) + p.Dedent() + p.Print("}") + + def __str__(self): + inferred_name = "" + if self.shared.Is(SharedFunctionInfo): + inferred_name = self.shared.inferred_name + return "JSFunction(%08x, %s)" % (self.address, inferred_name) + + def _GetSource(self): + source = "?source?" + start = self.shared.start_position + end = self.shared.end_position + if not self.shared.script.Is(Script): return source + script_source = self.shared.script.source + if not script_source.Is(String): return source + return script_source.GetChars()[start:end] + + +class SharedFunctionInfo(HeapObject): + CODE_OFFSET = 2 * 4 + SCRIPT_OFFSET = 7 * 4 + INFERRED_NAME_OFFSET = 9 * 4 + START_POSITION_AND_TYPE_OFFSET = 17 * 4 + END_POSITION_OFFSET = 18 * 4 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.code = self.ObjectField(SharedFunctionInfo.CODE_OFFSET) + self.script = self.ObjectField(SharedFunctionInfo.SCRIPT_OFFSET) + self.inferred_name = \ + self.ObjectField(SharedFunctionInfo.INFERRED_NAME_OFFSET) + start_position_and_type = \ + self.SmiField(SharedFunctionInfo.START_POSITION_AND_TYPE_OFFSET) + self.start_position = start_position_and_type >> 2 + self.end_position = self.SmiField(SharedFunctionInfo.END_POSITION_OFFSET) + + +class Script(HeapObject): + SOURCE_OFFSET = 4 + NAME_OFFSET = 8 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.source = self.ObjectField(Script.SOURCE_OFFSET) + self.name = self.ObjectField(Script.NAME_OFFSET) + + +class Code(HeapObject): + INSTRUCTION_SIZE_OFFSET = 4 + ENTRY_OFFSET = 32 + + def __init__(self, heap, map, address): + HeapObject.__init__(self, heap, map, address) + self.entry = self.address + Code.ENTRY_OFFSET + self.instruction_size = \ + heap.reader.ReadU32(self.address + Code.INSTRUCTION_SIZE_OFFSET) + + def Print(self, p): + lines = self.heap.reader.GetDisasmLines(self.entry, self.instruction_size) + p.Print("Code(%08x) {" % self.address) + p.Indent() + p.Print("instruction_size: %d" % self.instruction_size) + p.PrintLines(self._FormatLine(line) for line in lines) + p.Dedent() + p.Print("}") + + def _FormatLine(self, line): + return FormatDisasmLine(self.entry, self.heap, line) + + +class V8Heap(object): + CLASS_MAP = { + "SYMBOL_TYPE": SeqString, + "ASCII_SYMBOL_TYPE": SeqString, + "CONS_SYMBOL_TYPE": ConsString, + "CONS_ASCII_SYMBOL_TYPE": ConsString, + "EXTERNAL_SYMBOL_TYPE": ExternalString, + "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString, + "EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString, + "STRING_TYPE": SeqString, + "ASCII_STRING_TYPE": SeqString, + "CONS_STRING_TYPE": ConsString, + "CONS_ASCII_STRING_TYPE": ConsString, + "EXTERNAL_STRING_TYPE": ExternalString, + "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE": ExternalString, + "EXTERNAL_ASCII_STRING_TYPE": ExternalString, + + "MAP_TYPE": Map, + "ODDBALL_TYPE": Oddball, + "FIXED_ARRAY_TYPE": FixedArray, + "JS_FUNCTION_TYPE": JSFunction, + "SHARED_FUNCTION_INFO_TYPE": SharedFunctionInfo, + "SCRIPT_TYPE": Script, + "CODE_TYPE": Code + } + + def __init__(self, reader, stack_map): + self.reader = reader + self.stack_map = stack_map + self.objects = {} + + def FindObjectOrSmi(self, tagged_address): + if (tagged_address & 1) == 0: return tagged_address / 2 + return self.FindObject(tagged_address) + + def FindObject(self, tagged_address): + if tagged_address in self.objects: + return self.objects[tagged_address] + if (tagged_address & 1) != 1: return None + address = tagged_address - 1 + if not self.reader.IsValidAddress(address): return None + map_tagged_address = self.reader.ReadU32(address) + if tagged_address == map_tagged_address: + # Meta map? + meta_map = Map(self, None, address) + instance_type_name = INSTANCE_TYPES.get(meta_map.instance_type) + if instance_type_name != "MAP_TYPE": return None + meta_map.map = meta_map + object = meta_map + else: + map = self.FindObject(map_tagged_address) + if map is None: return None + instance_type_name = INSTANCE_TYPES.get(map.instance_type) + if instance_type_name is None: return None + cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject) + object = cls(self, map, address) + self.objects[tagged_address] = object + return object + + +EIP_PROXIMITY = 64 + + +def AnalyzeMinidump(options, minidump_name): + reader = MinidumpReader(options, minidump_name) + DebugPrint("========================================") + if reader.exception is None: + print "Minidump has no exception info" + return + print "Exception info:" + exception_thread = reader.thread_map[reader.exception.thread_id] + print " thread id: %d" % exception_thread.id + print " code: %08X" % reader.exception.exception.code + print " context:" + print " eax: %08x" % reader.exception_context.eax + print " ebx: %08x" % reader.exception_context.ebx + print " ecx: %08x" % reader.exception_context.ecx + print " edx: %08x" % reader.exception_context.edx + print " edi: %08x" % reader.exception_context.edi + print " esi: %08x" % reader.exception_context.esi + print " ebp: %08x" % reader.exception_context.ebp + print " esp: %08x" % reader.exception_context.esp + print " eip: %08x" % reader.exception_context.eip + # TODO(vitalyr): decode eflags. + print " eflags: %s" % bin(reader.exception_context.eflags)[2:] + print + + stack_bottom = exception_thread.stack.start + \ + exception_thread.stack.memory.data_size + stack_map = {reader.exception_context.eip: -1} + for slot in xrange(reader.exception_context.esp, stack_bottom, 4): + maybe_address = reader.ReadU32(slot) + if not maybe_address in stack_map: + stack_map[maybe_address] = slot + heap = V8Heap(reader, stack_map) + + print "Disassembly around exception.eip:" + start = reader.exception_context.eip - EIP_PROXIMITY + lines = reader.GetDisasmLines(start, 2 * EIP_PROXIMITY) + for line in lines: + print FormatDisasmLine(start, heap, line) + print + + print "Annotated stack (from exception.esp to bottom):" + for slot in xrange(reader.exception_context.esp, stack_bottom, 4): + maybe_address = reader.ReadU32(slot) + heap_object = heap.FindObject(maybe_address) + print "%08x: %08x" % (slot, maybe_address) + if heap_object: + heap_object.Print(Printer()) + print + + reader.Dispose() + + +if __name__ == "__main__": + parser = optparse.OptionParser(USAGE) + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(1) + AnalyzeMinidump(options, args[0]) diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index 15185671b4..6dab52d877 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -598,6 +598,8 @@ '../../src/arm/lithium-arm.h', '../../src/arm/lithium-codegen-arm.cc', '../../src/arm/lithium-codegen-arm.h', + '../../src/arm/lithium-gap-resolver-arm.cc', + '../../src/arm/lithium-gap-resolver-arm.h', '../../src/arm/macro-assembler-arm.cc', '../../src/arm/macro-assembler-arm.h', '../../src/arm/regexp-macro-assembler-arm.cc', diff --git a/deps/v8/tools/linux-tick-processor.py b/deps/v8/tools/linux-tick-processor.py deleted file mode 100755 index 67c3b95525..0000000000 --- a/deps/v8/tools/linux-tick-processor.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Usage: process-ticks.py <logfile> -# Where <logfile> is the log file name (eg, v8.log). - -import subprocess, re, sys, tickprocessor - -class LinuxTickProcessor(tickprocessor.TickProcessor): - - def ParseVMSymbols(self, filename, start, end): - """Extract symbols and add them to the cpp entries.""" - # Extra both dynamic and non-dynamic symbols. - command = 'nm -C -n "%s"; nm -C -n -D "%s"' % (filename, filename) - process = subprocess.Popen(command, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - pipe = process.stdout - try: - for line in pipe: - row = re.match('^([0-9a-fA-F]{8}) . (.*)$', line) - if row: - addr = int(row.group(1), 16) - if addr < start and addr < end - start: - addr += start - self.cpp_entries.Insert(addr, tickprocessor.CodeEntry(addr, row.group(2))) - finally: - pipe.close() - - -class LinuxCmdLineProcessor(tickprocessor.CmdLineProcessor): - - def GetRequiredArgsNames(self): - return 'log_file' - - def ProcessRequiredArgs(self, args): - if len(args) != 1: - self.PrintUsageAndExit() - else: - self.log_file = args[0] - - -def Main(): - cmdline_processor = LinuxCmdLineProcessor() - cmdline_processor.ProcessArguments() - tick_processor = LinuxTickProcessor() - cmdline_processor.RunLogfileProcessing(tick_processor) - tick_processor.PrintResults() - - -if __name__ == '__main__': - Main() diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py index 8390d4afe3..7f12c133c5 100755 --- a/deps/v8/tools/ll_prof.py +++ b/deps/v8/tools/ll_prof.py @@ -30,13 +30,13 @@ import bisect import collections import ctypes +import disasm import mmap import optparse import os import re import subprocess import sys -import tempfile import time @@ -74,27 +74,12 @@ V8_GC_FAKE_MMAP = "/tmp/__v8_gc__" JS_ORIGIN = "js" JS_SNAPSHOT_ORIGIN = "js-snapshot" -# Avoid using the slow (google-specific) wrapper around objdump. -OBJDUMP_BIN = "/usr/bin/objdump" -if not os.path.exists(OBJDUMP_BIN): - OBJDUMP_BIN = "objdump" +OBJDUMP_BIN = disasm.OBJDUMP_BIN class Code(object): """Code object.""" - _COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"] - - _DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$") - _DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):.*") - - # Keys must match constants in Logger::LogCodeInfo. - _ARCH_MAP = { - "ia32": "-m i386", - "x64": "-m i386 -M x86-64", - "arm": "-m arm" # Not supported by our objdump build. - } - _id = 0 def __init__(self, name, start_address, end_address, origin, origin_offset): @@ -150,12 +135,7 @@ class Code(object): ticks_offsets = [t[0] for t in ticks_map] ticks_counts = [t[1] for t in ticks_map] # Get a list of disassembled lines and their addresses. - lines = [] - for line in self._GetDisasmLines(code_info, options): - match = Code._DISASM_LINE_RE.match(line) - if match: - line_address = int(match.group(1), 16) - lines.append((line_address, line)) + lines = self._GetDisasmLines(code_info, options) if len(lines) == 0: return # Print annotated lines. @@ -179,9 +159,9 @@ class Code(object): total_count += count count = 100.0 * count / self.self_ticks if count >= 0.01: - print "%15.2f %s" % (count, lines[i][1]) + print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1]) else: - print "%s %s" % (" " * 15, lines[i][1]) + print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1]) print assert total_count == self.self_ticks, \ "Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self) @@ -195,39 +175,17 @@ class Code(object): self.origin) def _GetDisasmLines(self, code_info, options): - tmp_name = None if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN: - assert code_info.arch in Code._ARCH_MAP, \ - "Unsupported architecture '%s'" % arch - arch_flags = Code._ARCH_MAP[code_info.arch] - # Create a temporary file just with this code object. - tmp_name = tempfile.mktemp(".v8code") - size = self.end_address - self.start_address - command = "dd if=%s.code of=%s bs=1 count=%d skip=%d && " \ - "%s %s -D -b binary %s %s" % ( - options.log, tmp_name, size, self.origin_offset, - OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS), arch_flags, - tmp_name) + inplace = False + filename = options.log + ".code" else: - command = "%s %s --start-address=%d --stop-address=%d -d %s " % ( - OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS), - self.origin_offset, - self.origin_offset + self.end_address - self.start_address, - self.origin) - process = subprocess.Popen(command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - out, err = process.communicate() - lines = out.split("\n") - header_line = 0 - for i, line in enumerate(lines): - if Code._DISASM_HEADER_RE.match(line): - header_line = i - break - if tmp_name: - os.unlink(tmp_name) - return lines[header_line + 1:] + inplace = True + filename = self.origin + return disasm.GetDisasmLines(filename, + self.origin_offset, + self.end_address - self.start_address, + code_info.arch, + inplace) class CodePage(object): @@ -353,7 +311,7 @@ class CodeLogReader(object): r"code-info,([^,]+),(\d+)") _CODE_CREATE_RE = re.compile( - r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"(.*)\"(?:,(\d+))?") + r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"(.*)\"(?:,(0x[a-f0-9]+),([~*])?)?(?:,(\d+))?") _CODE_MOVE_RE = re.compile( r"code-move,(0x[a-f0-9]+),(0x[a-f0-9]+)") @@ -400,12 +358,18 @@ class CodeLogReader(object): name = self.address_to_snapshot_name[start_address] origin = JS_SNAPSHOT_ORIGIN else: - name = "%s:%s" % (match.group(1), match.group(4)) + tag = match.group(1) + optimization_status = match.group(6) + func_name = match.group(4) + if optimization_status: + name = "%s:%s%s" % (tag, optimization_status, func_name) + else: + name = "%s:%s" % (tag, func_name) origin = JS_ORIGIN if self.is_snapshot: origin_offset = 0 else: - origin_offset = int(match.group(5)) + origin_offset = int(match.group(7)) code = Code(name, start_address, end_address, origin, origin_offset) conficting_code = self.code_map.Find(start_address) if conficting_code: diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js index 03bee8397d..c9c9437e93 100644 --- a/deps/v8/tools/profile.js +++ b/deps/v8/tools/profile.js @@ -38,11 +38,6 @@ function Profile() { this.bottomUpTree_ = new CallTree(); }; -/** - * Version of profiler log. - */ -Profile.VERSION = 2; - /** * Returns whether a function with the specified name must be skipped. @@ -69,6 +64,18 @@ Profile.Operation = { /** + * Enum for code state regarding its dynamic optimization. + * + * @enum {number} + */ +Profile.CodeState = { + COMPILED: 0, + OPTIMIZABLE: 1, + OPTIMIZED: 2 +}; + + +/** * Called whenever the specified operation has failed finding a function * containing the specified address. Should be overriden by subclasses. * See the Profile.Operation enum for the list of @@ -134,17 +141,30 @@ Profile.prototype.addCode = function( /** - * Creates an alias entry for a code entry. + * Registers dynamic (JIT-compiled) code entry. * - * @param {number} aliasAddr Alias address. - * @param {number} addr Code entry address. - */ -Profile.prototype.addCodeAlias = function( - aliasAddr, addr) { - var entry = this.codeMap_.findDynamicEntryByStartAddress(addr); - if (entry) { - this.codeMap_.addCode(aliasAddr, entry); + * @param {string} type Code entry type. + * @param {string} name Code entry name. + * @param {number} start Starting address. + * @param {number} size Code entry size. + * @param {number} funcAddr Shared function object address. + * @param {Profile.CodeState} state Optimization state. + */ +Profile.prototype.addFuncCode = function( + type, name, start, size, funcAddr, state) { + // As code and functions are in the same address space, + // it is safe to put them in a single code map. + var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr); + if (!func) { + func = new Profile.FunctionEntry(name); + this.codeMap_.addCode(funcAddr, func); + } else if (func.name !== name) { + // Function object has been overwritten with a new one. + func.name = name; } + var entry = new Profile.DynamicFuncCodeEntry(size, type, func, state); + this.codeMap_.addCode(start, entry); + return entry; }; @@ -183,7 +203,7 @@ Profile.prototype.deleteCode = function(start) { * @param {number} from Current code entry address. * @param {number} to New code entry address. */ -Profile.prototype.safeMoveDynamicCode = function(from, to) { +Profile.prototype.moveFunc = function(from, to) { if (this.codeMap_.findDynamicEntryByStartAddress(from)) { this.codeMap_.moveCode(from, to); } @@ -191,18 +211,6 @@ Profile.prototype.safeMoveDynamicCode = function(from, to) { /** - * Reports about deletion of a dynamic code entry. - * - * @param {number} start Starting address. - */ -Profile.prototype.safeDeleteDynamicCode = function(start) { - if (this.codeMap_.findDynamicEntryByStartAddress(start)) { - this.codeMap_.deleteCode(start); - } -}; - - -/** * Retrieves a code entry by an address. * * @param {number} addr Entry address. @@ -383,14 +391,7 @@ Profile.DynamicCodeEntry = function(size, type, name) { * Returns node name. */ Profile.DynamicCodeEntry.prototype.getName = function() { - var name = this.name; - if (name.length == 0) { - name = '<anonymous>'; - } else if (name.charAt(0) == ' ') { - // An anonymous function with location: " aaa.js:10". - name = '<anonymous>' + name; - } - return this.type + ': ' + name; + return this.type + ': ' + this.name; }; @@ -403,9 +404,73 @@ Profile.DynamicCodeEntry.prototype.getRawName = function() { Profile.DynamicCodeEntry.prototype.isJSFunction = function() { - return this.type == "Function" || - this.type == "LazyCompile" || - this.type == "Script"; + return false; +}; + + +/** + * Creates a dynamic code entry. + * + * @param {number} size Code size. + * @param {string} type Code type. + * @param {Profile.FunctionEntry} func Shared function entry. + * @param {Profile.CodeState} state Code optimization state. + * @constructor + */ +Profile.DynamicFuncCodeEntry = function(size, type, func, state) { + CodeMap.CodeEntry.call(this, size); + this.type = type; + this.func = func; + this.state = state; +}; + +Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"]; + +/** + * Returns node name. + */ +Profile.DynamicFuncCodeEntry.prototype.getName = function() { + var name = this.func.getName(); + return this.type + ': ' + Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state] + name; +}; + + +/** + * Returns raw node name (without type decoration). + */ +Profile.DynamicFuncCodeEntry.prototype.getRawName = function() { + return this.func.getName(); +}; + + +Profile.DynamicFuncCodeEntry.prototype.isJSFunction = function() { + return true; +}; + + +/** + * Creates a shared function object entry. + * + * @param {string} name Function name. + * @constructor + */ +Profile.FunctionEntry = function(name) { + CodeMap.CodeEntry.call(this, 0, name); +}; + + +/** + * Returns node name. + */ +Profile.FunctionEntry.prototype.getName = function() { + var name = this.name; + if (name.length == 0) { + name = '<anonymous>'; + } else if (name.charAt(0) == ' ') { + // An anonymous function with location: " aaa.js:10". + name = '<anonymous>' + name; + } + return name; }; diff --git a/deps/v8/tools/splaytree.py b/deps/v8/tools/splaytree.py deleted file mode 100644 index 8c3c4fe14e..0000000000 --- a/deps/v8/tools/splaytree.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -class Node(object): - """Nodes in the splay tree.""" - - def __init__(self, key, value): - self.key = key - self.value = value - self.left = None - self.right = None - - -class KeyNotFoundError(Exception): - """KeyNotFoundError is raised when removing a non-existing node.""" - - def __init__(self, key): - self.key = key - - -class SplayTree(object): - """The splay tree itself is just a reference to the root of the tree.""" - - def __init__(self): - """Create a new SplayTree.""" - self.root = None - - def IsEmpty(self): - """Is the SplayTree empty?""" - return not self.root - - def Insert(self, key, value): - """Insert a new node in the SplayTree.""" - # If the tree is empty, insert the new node. - if self.IsEmpty(): - self.root = Node(key, value) - return - # Splay on the key to move the last node on the search path for - # the key to the root of the tree. - self.Splay(key) - # Ignore repeated insertions with the same key. - if self.root.key == key: - return - # Insert the new node. - node = Node(key, value) - if key > self.root.key: - node.left = self.root - node.right = self.root.right - self.root.right = None - else: - node.right = self.root - node.left = self.root.left - self.root.left = None - self.root = node - - def Remove(self, key): - """Remove the node with the given key from the SplayTree.""" - # Raise exception for key that is not found if the tree is empty. - if self.IsEmpty(): - raise KeyNotFoundError(key) - # Splay on the key to move the node with the given key to the top. - self.Splay(key) - # Raise exception for key that is not found. - if self.root.key != key: - raise KeyNotFoundError(key) - removed = self.root - # Link out the root node. - if not self.root.left: - # No left child, so the new tree is just the right child. - self.root = self.root.right - else: - # Left child exists. - right = self.root.right - # Make the original left child the new root. - self.root = self.root.left - # Splay to make sure that the new root has an empty right child. - self.Splay(key) - # Insert the original right child as the right child of the new - # root. - self.root.right = right - return removed - - def Find(self, key): - """Returns the node with the given key or None if no such node exists.""" - if self.IsEmpty(): - return None - self.Splay(key) - if self.root.key == key: - return self.root - return None - - def FindMax(self): - """Returns the node with the largest key value.""" - if self.IsEmpty(): - return None - current = self.root - while current.right != None: - current = current.right - return current - - # Returns the node with the smallest key value. - def FindMin(self): - if self.IsEmpty(): - return None - current = self.root - while current.left != None: - current = current.left - return current - - def FindGreatestsLessThan(self, key): - """Returns node with greatest key less than or equal to the given key.""" - if self.IsEmpty(): - return None - # Splay on the key to move the node with the given key or the last - # node on the search path to the top of the tree. - self.Splay(key) - # Now the result is either the root node or the greatest node in - # the left subtree. - if self.root.key <= key: - return self.root - else: - tmp = self.root - self.root = self.root.left - result = self.FindMax() - self.root = tmp - return result - - def ExportValueList(self): - """Returns a list containing all the values of the nodes in the tree.""" - result = [] - nodes_to_visit = [self.root] - while len(nodes_to_visit) > 0: - node = nodes_to_visit.pop() - if not node: - continue - result.append(node.value) - nodes_to_visit.append(node.left) - nodes_to_visit.append(node.right) - return result - - def Splay(self, key): - """Perform splay operation. - - Perform the splay operation for the given key. Moves the node with - the given key to the top of the tree. If no node has the given - key, the last node on the search path is moved to the top of the - tree. - - This uses the simplified top-down splaying algorithm from: - - "Self-adjusting Binary Search Trees" by Sleator and Tarjan - - """ - if self.IsEmpty(): - return - # Create a dummy node. The use of the dummy node is a bit - # counter-intuitive: The right child of the dummy node will hold - # the L tree of the algorithm. The left child of the dummy node - # will hold the R tree of the algorithm. Using a dummy node, left - # and right will always be nodes and we avoid special cases. - dummy = left = right = Node(None, None) - current = self.root - while True: - if key < current.key: - if not current.left: - break - if key < current.left.key: - # Rotate right. - tmp = current.left - current.left = tmp.right - tmp.right = current - current = tmp - if not current.left: - break - # Link right. - right.left = current - right = current - current = current.left - elif key > current.key: - if not current.right: - break - if key > current.right.key: - # Rotate left. - tmp = current.right - current.right = tmp.left - tmp.left = current - current = tmp - if not current.right: - break - # Link left. - left.right = current - left = current - current = current.right - else: - break - # Assemble. - left.right = current.left - right.left = current.right - current.left = dummy.right - current.right = dummy.left - self.root = current diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index db2f3c9b90..f105a21c19 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -57,10 +57,23 @@ function readFile(fileName) { } +/** + * Parser for dynamic code optimization state. + */ +function parseState(s) { + switch (s) { + case "": return Profile.CodeState.COMPILED; + case "~": return Profile.CodeState.OPTIMIZABLE; + case "*": return Profile.CodeState.OPTIMIZED; + } + throw new Error("unknown code state: " + s); +} + + function SnapshotLogProcessor() { LogReader.call(this, { 'code-creation': { - parsers: [null, parseInt, parseInt, null], + parsers: [null, parseInt, parseInt, null, 'var-args'], processor: this.processCodeCreation }, 'code-move': { parsers: [parseInt, parseInt], processor: this.processCodeMove }, @@ -69,6 +82,7 @@ function SnapshotLogProcessor() { 'function-creation': null, 'function-move': null, 'function-delete': null, + 'sfi-move': null, 'snapshot-pos': { parsers: [parseInt, parseInt], processor: this.processSnapshotPosition }}); @@ -93,8 +107,14 @@ inherits(SnapshotLogProcessor, LogReader); SnapshotLogProcessor.prototype.processCodeCreation = function( - type, start, size, name) { - var entry = this.profile_.addCode(type, name, start, size); + type, start, size, name, maybe_func) { + if (maybe_func.length) { + var funcAddr = parseInt(maybe_func[0]); + var state = parseState(maybe_func[1]); + this.profile_.addFuncCode(type, name, start, size, funcAddr, state); + } else { + this.profile_.addCode(type, name, start, size); + } }; @@ -131,18 +151,14 @@ function TickProcessor( 'shared-library': { parsers: [null, parseInt, parseInt], processor: this.processSharedLibrary }, 'code-creation': { - parsers: [null, parseInt, parseInt, null], + parsers: [null, parseInt, parseInt, null, 'var-args'], processor: this.processCodeCreation }, 'code-move': { parsers: [parseInt, parseInt], processor: this.processCodeMove }, 'code-delete': { parsers: [parseInt], processor: this.processCodeDelete }, - 'function-creation': { parsers: [parseInt, parseInt], - processor: this.processFunctionCreation }, - 'function-move': { parsers: [parseInt, parseInt], + 'sfi-move': { parsers: [parseInt, parseInt], processor: this.processFunctionMove }, - 'function-delete': { parsers: [parseInt], - processor: this.processFunctionDelete }, 'snapshot-pos': { parsers: [parseInt, parseInt], processor: this.processSnapshotPosition }, 'tick': { parsers: [parseInt, parseInt, parseInt, parseInt, 'var-args'], @@ -155,6 +171,9 @@ function TickProcessor( processor: this.processJSProducer }, // Ignored events. 'profiler': null, + 'function-creation': null, + 'function-move': null, + 'function-delete': null, 'heap-sample-stats': null, 'heap-sample-item': null, 'heap-js-cons-item': null, @@ -285,9 +304,15 @@ TickProcessor.prototype.processSharedLibrary = function( TickProcessor.prototype.processCodeCreation = function( - type, start, size, name) { + type, start, size, name, maybe_func) { name = this.deserializedEntriesNames_[start] || name; - var entry = this.profile_.addCode(type, name, start, size); + if (maybe_func.length) { + var funcAddr = parseInt(maybe_func[0]); + var state = parseState(maybe_func[1]); + this.profile_.addFuncCode(type, name, start, size, funcAddr, state); + } else { + this.profile_.addCode(type, name, start, size); + } }; @@ -301,19 +326,8 @@ TickProcessor.prototype.processCodeDelete = function(start) { }; -TickProcessor.prototype.processFunctionCreation = function( - functionAddr, codeAddr) { - this.profile_.addCodeAlias(functionAddr, codeAddr); -}; - - TickProcessor.prototype.processFunctionMove = function(from, to) { - this.profile_.safeMoveDynamicCode(from, to); -}; - - -TickProcessor.prototype.processFunctionDelete = function(start) { - this.profile_.safeDeleteDynamicCode(start); + this.profile_.moveFunc(from, to); }; @@ -330,7 +344,7 @@ TickProcessor.prototype.includeTick = function(vmState) { }; -TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) { +TickProcessor.prototype.processTick = function(pc, sp, tos, vmState, stack) { this.ticks_.total++; if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++; if (!this.includeTick(vmState)) { @@ -338,19 +352,14 @@ TickProcessor.prototype.processTick = function(pc, sp, func, vmState, stack) { return; } - if (func) { - var funcEntry = this.profile_.findEntry(func); + if (tos) { + var funcEntry = this.profile_.findEntry(tos); if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) { - func = 0; - } else { - var currEntry = this.profile_.findEntry(pc); - if (!currEntry || !currEntry.isJSFunction || currEntry.isJSFunction()) { - func = 0; - } + tos = 0; } } - this.profile_.recordTick(this.processStack(pc, func, stack)); + this.profile_.recordTick(this.processStack(pc, tos, stack)); }; diff --git a/deps/v8/tools/tickprocessor.py b/deps/v8/tools/tickprocessor.py deleted file mode 100644 index c932e3fc48..0000000000 --- a/deps/v8/tools/tickprocessor.py +++ /dev/null @@ -1,571 +0,0 @@ -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import csv, splaytree, sys, re -from operator import itemgetter -import getopt, os, string - -class CodeEntry(object): - - def __init__(self, start_addr, name): - self.start_addr = start_addr - self.tick_count = 0 - self.name = name - self.stacks = {} - - def Tick(self, pc, stack): - self.tick_count += 1 - if len(stack) > 0: - stack.insert(0, self.ToString()) - stack_key = tuple(stack) - self.stacks[stack_key] = self.stacks.setdefault(stack_key, 0) + 1 - - def RegionTicks(self): - return None - - def SetStartAddress(self, start_addr): - self.start_addr = start_addr - - def ToString(self): - return self.name - - def IsSharedLibraryEntry(self): - return False - - def IsICEntry(self): - return False - - def IsJSFunction(self): - return False - -class SharedLibraryEntry(CodeEntry): - - def __init__(self, start_addr, name): - CodeEntry.__init__(self, start_addr, name) - - def IsSharedLibraryEntry(self): - return True - - -class JSCodeEntry(CodeEntry): - - def __init__(self, start_addr, name, type, size, assembler): - CodeEntry.__init__(self, start_addr, name) - self.type = type - self.size = size - self.assembler = assembler - self.region_ticks = None - self.builtin_ic_re = re.compile('^(Keyed)?(Call|Load|Store)IC_') - - def Tick(self, pc, stack): - super(JSCodeEntry, self).Tick(pc, stack) - if not pc is None: - offset = pc - self.start_addr - seen = [] - narrowest = None - narrowest_width = None - for region in self.Regions(): - if region.Contains(offset): - if (not region.name in seen): - seen.append(region.name) - if narrowest is None or region.Width() < narrowest.Width(): - narrowest = region - if len(seen) == 0: - return - if self.region_ticks is None: - self.region_ticks = {} - for name in seen: - if not name in self.region_ticks: - self.region_ticks[name] = [0, 0] - self.region_ticks[name][0] += 1 - if name == narrowest.name: - self.region_ticks[name][1] += 1 - - def RegionTicks(self): - return self.region_ticks - - def Regions(self): - if self.assembler: - return self.assembler.regions - else: - return [] - - def ToString(self): - name = self.name - if name == '': - name = '<anonymous>' - elif name.startswith(' '): - name = '<anonymous>' + name - return self.type + ': ' + name - - def IsICEntry(self): - return self.type in ('CallIC', 'LoadIC', 'StoreIC') or \ - (self.type == 'Builtin' and self.builtin_ic_re.match(self.name)) - - def IsJSFunction(self): - return self.type in ('Function', 'LazyCompile', 'Script') - -class CodeRegion(object): - - def __init__(self, start_offset, name): - self.start_offset = start_offset - self.name = name - self.end_offset = None - - def Contains(self, pc): - return (self.start_offset <= pc) and (pc <= self.end_offset) - - def Width(self): - return self.end_offset - self.start_offset - - -class Assembler(object): - - def __init__(self): - # Mapping from region ids to open regions - self.pending_regions = {} - self.regions = [] - - -class FunctionEnumerator(object): - - def __init__(self): - self.known_funcs = {} - self.next_func_id = 0 - - def GetFunctionId(self, name): - if not self.known_funcs.has_key(name): - self.known_funcs[name] = self.next_func_id - self.next_func_id += 1 - return self.known_funcs[name] - - def GetKnownFunctions(self): - known_funcs_items = self.known_funcs.items(); - known_funcs_items.sort(key = itemgetter(1)) - result = [] - for func, id_not_used in known_funcs_items: - result.append(func) - return result - - -VMStates = { 'JS': 0, 'GC': 1, 'COMPILER': 2, 'OTHER': 3, 'EXTERNAL' : 4 } - - -class TickProcessor(object): - - def __init__(self): - self.log_file = '' - self.deleted_code = [] - self.vm_extent = {} - # Map from assembler ids to the pending assembler objects - self.pending_assemblers = {} - # Map from code addresses the have been allocated but not yet officially - # created to their assemblers. - self.assemblers = {} - self.js_entries = splaytree.SplayTree() - self.cpp_entries = splaytree.SplayTree() - self.total_number_of_ticks = 0 - self.number_of_library_ticks = 0 - self.unaccounted_number_of_ticks = 0 - self.excluded_number_of_ticks = 0 - self.number_of_gc_ticks = 0 - # Flag indicating whether to ignore unaccounted ticks in the report - self.ignore_unknown = False - self.func_enum = FunctionEnumerator() - self.packed_stacks = [] - - def ProcessLogfile(self, filename, included_state = None, ignore_unknown = False, separate_ic = False, call_graph_json = False): - self.log_file = filename - self.included_state = included_state - self.ignore_unknown = ignore_unknown - self.separate_ic = separate_ic - self.call_graph_json = call_graph_json - - try: - logfile = open(filename, 'rb') - except IOError: - sys.exit("Could not open logfile: " + filename) - try: - try: - logreader = csv.reader(logfile) - row_num = 1 - for row in logreader: - row_num += 1 - if row[0] == 'tick': - self.ProcessTick(int(row[1], 16), int(row[2], 16), int(row[3], 16), int(row[4]), self.PreprocessStack(row[5:])) - elif row[0] == 'code-creation': - self.ProcessCodeCreation(row[1], int(row[2], 16), int(row[3]), row[4]) - elif row[0] == 'code-move': - self.ProcessCodeMove(int(row[1], 16), int(row[2], 16)) - elif row[0] == 'code-delete': - self.ProcessCodeDelete(int(row[1], 16)) - elif row[0] == 'function-creation': - self.ProcessFunctionCreation(int(row[1], 16), int(row[2], 16)) - elif row[0] == 'function-move': - self.ProcessFunctionMove(int(row[1], 16), int(row[2], 16)) - elif row[0] == 'function-delete': - self.ProcessFunctionDelete(int(row[1], 16)) - elif row[0] == 'shared-library': - self.AddSharedLibraryEntry(row[1], int(row[2], 16), int(row[3], 16)) - self.ParseVMSymbols(row[1], int(row[2], 16), int(row[3], 16)) - elif row[0] == 'begin-code-region': - self.ProcessBeginCodeRegion(int(row[1], 16), int(row[2], 16), int(row[3], 16), row[4]) - elif row[0] == 'end-code-region': - self.ProcessEndCodeRegion(int(row[1], 16), int(row[2], 16), int(row[3], 16)) - elif row[0] == 'code-allocate': - self.ProcessCodeAllocate(int(row[1], 16), int(row[2], 16)) - except csv.Error: - print("parse error in line " + str(row_num)) - raise - finally: - logfile.close() - - def AddSharedLibraryEntry(self, filename, start, end): - # Mark the pages used by this library. - i = start - while i < end: - page = i >> 12 - self.vm_extent[page] = 1 - i += 4096 - # Add the library to the entries so that ticks for which we do not - # have symbol information is reported as belonging to the library. - self.cpp_entries.Insert(start, SharedLibraryEntry(start, filename)) - - def ParseVMSymbols(self, filename, start, end): - return - - def ProcessCodeAllocate(self, addr, assem): - if assem in self.pending_assemblers: - assembler = self.pending_assemblers.pop(assem) - self.assemblers[addr] = assembler - - def ProcessCodeCreation(self, type, addr, size, name): - if addr in self.assemblers: - assembler = self.assemblers.pop(addr) - else: - assembler = None - self.js_entries.Insert(addr, JSCodeEntry(addr, name, type, size, assembler)) - - def ProcessCodeMove(self, from_addr, to_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - removed_node.value.SetStartAddress(to_addr); - self.js_entries.Insert(to_addr, removed_node.value) - except splaytree.KeyNotFoundError: - print('Code move event for unknown code: 0x%x' % from_addr) - - def ProcessCodeDelete(self, from_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - self.deleted_code.append(removed_node.value) - except splaytree.KeyNotFoundError: - print('Code delete event for unknown code: 0x%x' % from_addr) - - def ProcessFunctionCreation(self, func_addr, code_addr): - js_entry_node = self.js_entries.Find(code_addr) - if js_entry_node: - js_entry = js_entry_node.value - self.js_entries.Insert(func_addr, JSCodeEntry(func_addr, js_entry.name, js_entry.type, 1, None)) - - def ProcessFunctionMove(self, from_addr, to_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - removed_node.value.SetStartAddress(to_addr); - self.js_entries.Insert(to_addr, removed_node.value) - except splaytree.KeyNotFoundError: - return - - def ProcessFunctionDelete(self, from_addr): - try: - removed_node = self.js_entries.Remove(from_addr) - self.deleted_code.append(removed_node.value) - except splaytree.KeyNotFoundError: - return - - def ProcessBeginCodeRegion(self, id, assm, start, name): - if not assm in self.pending_assemblers: - self.pending_assemblers[assm] = Assembler() - assembler = self.pending_assemblers[assm] - assembler.pending_regions[id] = CodeRegion(start, name) - - def ProcessEndCodeRegion(self, id, assm, end): - assm = self.pending_assemblers[assm] - region = assm.pending_regions.pop(id) - region.end_offset = end - assm.regions.append(region) - - def IncludeTick(self, pc, sp, state): - return (self.included_state is None) or (self.included_state == state) - - def FindEntry(self, pc): - page = pc >> 12 - if page in self.vm_extent: - entry = self.cpp_entries.FindGreatestsLessThan(pc) - if entry != None: - return entry.value - else: - return entry - max = self.js_entries.FindMax() - min = self.js_entries.FindMin() - if max != None and pc < (max.key + max.value.size) and pc > min.key: - return self.js_entries.FindGreatestsLessThan(pc).value - return None - - def PreprocessStack(self, stack): - # remove all non-addresses (e.g. 'overflow') and convert to int - result = [] - for frame in stack: - if frame.startswith('0x'): - result.append(int(frame, 16)) - return result - - def ProcessStack(self, stack): - result = [] - for frame in stack: - entry = self.FindEntry(frame) - if entry != None: - result.append(entry.ToString()) - return result - - def ProcessTick(self, pc, sp, func, state, stack): - if state == VMStates['GC']: - self.number_of_gc_ticks += 1 - if not self.IncludeTick(pc, sp, state): - self.excluded_number_of_ticks += 1; - return - self.total_number_of_ticks += 1 - entry = self.FindEntry(pc) - if entry == None: - self.unaccounted_number_of_ticks += 1 - return - if entry.IsSharedLibraryEntry(): - self.number_of_library_ticks += 1 - if entry.IsICEntry() and not self.separate_ic: - if len(stack) > 0: - caller_pc = stack.pop(0) - self.total_number_of_ticks -= 1 - self.ProcessTick(caller_pc, sp, func, state, stack) - else: - self.unaccounted_number_of_ticks += 1 - else: - processed_stack = self.ProcessStack(stack) - if not entry.IsSharedLibraryEntry() and not entry.IsJSFunction(): - func_entry_node = self.js_entries.Find(func) - if func_entry_node and func_entry_node.value.IsJSFunction(): - processed_stack.insert(0, func_entry_node.value.ToString()) - entry.Tick(pc, processed_stack) - if self.call_graph_json: - self.AddToPackedStacks(pc, stack) - - def AddToPackedStacks(self, pc, stack): - full_stack = stack - full_stack.insert(0, pc) - func_names = self.ProcessStack(full_stack) - func_ids = [] - for func in func_names: - func_ids.append(self.func_enum.GetFunctionId(func)) - self.packed_stacks.append(func_ids) - - def PrintResults(self): - if not self.call_graph_json: - self.PrintStatistics() - else: - self.PrintCallGraphJSON() - - def PrintStatistics(self): - print('Statistical profiling result from %s, (%d ticks, %d unaccounted, %d excluded).' % - (self.log_file, - self.total_number_of_ticks, - self.unaccounted_number_of_ticks, - self.excluded_number_of_ticks)) - if self.total_number_of_ticks > 0: - js_entries = self.js_entries.ExportValueList() - js_entries.extend(self.deleted_code) - cpp_entries = self.cpp_entries.ExportValueList() - # Print the unknown ticks percentage if they are not ignored. - if not self.ignore_unknown and self.unaccounted_number_of_ticks > 0: - self.PrintHeader('Unknown') - self.PrintCounter(self.unaccounted_number_of_ticks) - # Print the library ticks. - self.PrintHeader('Shared libraries') - self.PrintEntries(cpp_entries, lambda e:e.IsSharedLibraryEntry()) - # Print the JavaScript ticks. - self.PrintHeader('JavaScript') - self.PrintEntries(js_entries, lambda e:not e.IsSharedLibraryEntry()) - # Print the C++ ticks. - self.PrintHeader('C++') - self.PrintEntries(cpp_entries, lambda e:not e.IsSharedLibraryEntry()) - # Print the GC ticks. - self.PrintHeader('GC') - self.PrintCounter(self.number_of_gc_ticks) - # Print call profile. - print('\n [Call profile]:') - print(' total call path') - js_entries.extend(cpp_entries) - self.PrintCallProfile(js_entries) - - def PrintHeader(self, header_title): - print('\n [%s]:' % header_title) - print(' ticks total nonlib name') - - def PrintCounter(self, ticks_count): - percentage = ticks_count * 100.0 / self.total_number_of_ticks - print(' %(ticks)5d %(total)5.1f%%' % { - 'ticks' : ticks_count, - 'total' : percentage, - }) - - def PrintEntries(self, entries, condition): - # If ignoring unaccounted ticks don't include these in percentage - # calculations - number_of_accounted_ticks = self.total_number_of_ticks - if self.ignore_unknown: - number_of_accounted_ticks -= self.unaccounted_number_of_ticks - - number_of_non_library_ticks = number_of_accounted_ticks - self.number_of_library_ticks - entries.sort(key=lambda e: (e.tick_count, e.ToString()), reverse=True) - for entry in entries: - if entry.tick_count > 0 and condition(entry): - total_percentage = entry.tick_count * 100.0 / number_of_accounted_ticks - if entry.IsSharedLibraryEntry(): - non_library_percentage = 0 - else: - non_library_percentage = entry.tick_count * 100.0 / number_of_non_library_ticks - print(' %(ticks)5d %(total)5.1f%% %(nonlib)6.1f%% %(name)s' % { - 'ticks' : entry.tick_count, - 'total' : total_percentage, - 'nonlib' : non_library_percentage, - 'name' : entry.ToString() - }) - region_ticks = entry.RegionTicks() - if not region_ticks is None: - items = region_ticks.items() - items.sort(key=lambda e: e[1][1], reverse=True) - for (name, ticks) in items: - print(' flat cum') - print(' %(flat)5.1f%% %(accum)5.1f%% %(name)s' % { - 'flat' : ticks[1] * 100.0 / entry.tick_count, - 'accum' : ticks[0] * 100.0 / entry.tick_count, - 'name': name - }) - - def PrintCallProfile(self, entries): - all_stacks = {} - total_stacks = 0 - for entry in entries: - all_stacks.update(entry.stacks) - for count in entry.stacks.itervalues(): - total_stacks += count - all_stacks_items = all_stacks.items(); - all_stacks_items.sort(key = itemgetter(1), reverse=True) - missing_percentage = (self.total_number_of_ticks - total_stacks) * 100.0 / self.total_number_of_ticks - print(' %(ticks)5d %(total)5.1f%% <no call path information>' % { - 'ticks' : self.total_number_of_ticks - total_stacks, - 'total' : missing_percentage - }) - for stack, count in all_stacks_items: - total_percentage = count * 100.0 / self.total_number_of_ticks - print(' %(ticks)5d %(total)5.1f%% %(call_path)s' % { - 'ticks' : count, - 'total' : total_percentage, - 'call_path' : stack[0] + ' <- ' + stack[1] - }) - - def PrintCallGraphJSON(self): - print('\nvar __profile_funcs = ["' + - '",\n"'.join(self.func_enum.GetKnownFunctions()) + - '"];') - print('var __profile_ticks = [') - str_packed_stacks = [] - for stack in self.packed_stacks: - str_packed_stacks.append('[' + ','.join(map(str, stack)) + ']') - print(',\n'.join(str_packed_stacks)) - print('];') - -class CmdLineProcessor(object): - - def __init__(self): - self.options = ["js", - "gc", - "compiler", - "other", - "external", - "ignore-unknown", - "separate-ic", - "call-graph-json"] - # default values - self.state = None - self.ignore_unknown = False - self.log_file = None - self.separate_ic = False - self.call_graph_json = False - - def ProcessArguments(self): - try: - opts, args = getopt.getopt(sys.argv[1:], "jgcoe", self.options) - except getopt.GetoptError: - self.PrintUsageAndExit() - for key, value in opts: - if key in ("-j", "--js"): - self.state = VMStates['JS'] - if key in ("-g", "--gc"): - self.state = VMStates['GC'] - if key in ("-c", "--compiler"): - self.state = VMStates['COMPILER'] - if key in ("-o", "--other"): - self.state = VMStates['OTHER'] - if key in ("-e", "--external"): - self.state = VMStates['EXTERNAL'] - if key in ("--ignore-unknown"): - self.ignore_unknown = True - if key in ("--separate-ic"): - self.separate_ic = True - if key in ("--call-graph-json"): - self.call_graph_json = True - self.ProcessRequiredArgs(args) - - def ProcessRequiredArgs(self, args): - return - - def GetRequiredArgsNames(self): - return - - def PrintUsageAndExit(self): - print('Usage: %(script_name)s --{%(opts)s} %(req_opts)s' % { - 'script_name': os.path.basename(sys.argv[0]), - 'opts': string.join(self.options, ','), - 'req_opts': self.GetRequiredArgsNames() - }) - sys.exit(2) - - def RunLogfileProcessing(self, tick_processor): - tick_processor.ProcessLogfile(self.log_file, self.state, self.ignore_unknown, - self.separate_ic, self.call_graph_json) - - -if __name__ == '__main__': - sys.exit('You probably want to run windows-tick-processor.py or linux-tick-processor.py.') diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj index 24321e52c7..10fbc58a6f 100644 --- a/deps/v8/tools/v8.xcodeproj/project.pbxproj +++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj @@ -211,6 +211,7 @@ 895692A512D4ED240072C313 /* objects-printer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8946827412C26EB700C914BC /* objects-printer.cc */; }; 8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; }; 895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895FA748107FFE73006F39D4 /* constants-arm.cc */; }; + 896FA1E5130F93D300042054 /* lithium-gap-resolver-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */; }; 896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; }; 897C77D012B68E3D000767A8 /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; }; 897C77D112B68E3D000767A8 /* d8-js.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988320F2A3B8B007D5254 /* d8-js.cc */; }; @@ -647,6 +648,8 @@ 895FA751107FFEAE006F39D4 /* register-allocator-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-arm.h"; path = "arm/register-allocator-arm.h"; sourceTree = "<group>"; }; 8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32.h"; path = "ia32/codegen-ia32.h"; sourceTree = "<group>"; }; 896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm.h"; path = "arm/codegen-arm.h"; sourceTree = "<group>"; }; + 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-arm.cc"; path = "arm/lithium-gap-resolver-arm.cc"; sourceTree = "<group>"; }; + 896FA1E4130F93D300042054 /* lithium-gap-resolver-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-arm.h"; path = "arm/lithium-gap-resolver-arm.h"; sourceTree = "<group>"; }; 8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; }; 897C77D912B68E3D000767A8 /* d8-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-arm"; sourceTree = BUILT_PRODUCTS_DIR; }; 897F767A0E71B4CC007ACF34 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -1538,6 +1541,8 @@ 893E24C812B14B510083370F /* lithium-arm.h */, 893E24C912B14B520083370F /* lithium-codegen-arm.cc */, 893E24CA12B14B520083370F /* lithium-codegen-arm.h */, + 896FA1E3130F93D300042054 /* lithium-gap-resolver-arm.cc */, + 896FA1E4130F93D300042054 /* lithium-gap-resolver-arm.h */, 897FF1540E719B8F00D62E90 /* macro-assembler-arm.cc */, 897FF1550E719B8F00D62E90 /* macro-assembler-arm.h */, 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */, @@ -2290,6 +2295,7 @@ 894A59EA12D777E80000766D /* lithium.cc in Sources */, 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */, 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */, + 896FA1E5130F93D300042054 /* lithium-gap-resolver-arm.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/deps/v8/tools/windows-tick-processor.py b/deps/v8/tools/windows-tick-processor.py deleted file mode 100755 index ade2bf27e1..0000000000 --- a/deps/v8/tools/windows-tick-processor.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -# Usage: process-ticks.py <binary> <logfile> -# -# Where <binary> is the binary program name (eg, v8_shell.exe) and -# <logfile> is the log file name (eg, v8.log). -# -# This tick processor expects to find a map file for the binary named -# binary.map if the binary is named binary.exe. The tick processor -# only works for statically linked executables - no information about -# shared libraries is logged from v8 on Windows. - -import os, re, sys, tickprocessor - -class WindowsTickProcessor(tickprocessor.TickProcessor): - - def Unmangle(self, name): - """Performs very simple unmangling of C++ names. - - Does not handle arguments and template arguments. The mangled names have - the form: - - ?LookupInDescriptor@JSObject@internal@v8@@...arguments info... - - """ - # Name is mangled if it starts with a question mark. - is_mangled = re.match("^\?(.*)", name) - if is_mangled: - substrings = is_mangled.group(1).split('@') - try: - # The function name is terminated by two @s in a row. Find the - # substrings that are part of the function name. - index = substrings.index('') - substrings = substrings[0:index] - except ValueError: - # If we did not find two @s in a row, the mangled name is not in - # the format we expect and we give up. - return name - substrings.reverse() - function_name = "::".join(substrings) - return function_name - return name - - - def ParseMapFile(self, filename): - """Parse map file and add symbol information to the cpp entries.""" - # Locate map file. - has_dot = re.match('^([a-zA-F0-9_-]*)[\.]?.*$', filename) - if has_dot: - map_file_name = has_dot.group(1) + '.map' - try: - map_file = open(map_file_name, 'rb') - except IOError: - sys.exit("Could not open map file: " + map_file_name) - else: - sys.exit("Could not find map file for executable: " + filename) - try: - max_addr = 0 - min_addr = 2**30 - # Process map file and search for function entries. - row_regexp = re.compile(' 0001:[0-9a-fA-F]{8}\s*([_\?@$0-9a-zA-Z]*)\s*([0-9a-fA-F]{8}).*') - for line in map_file: - row = re.match(row_regexp, line) - if row: - addr = int(row.group(2), 16) - if addr > max_addr: - max_addr = addr - if addr < min_addr: - min_addr = addr - mangled_name = row.group(1) - name = self.Unmangle(mangled_name) - self.cpp_entries.Insert(addr, tickprocessor.CodeEntry(addr, name)); - i = min_addr - # Mark the pages for which there are functions in the map file. - while i < max_addr: - page = i >> 12 - self.vm_extent[page] = 1 - i += 4096 - finally: - map_file.close() - - -class WindowsCmdLineProcessor(tickprocessor.CmdLineProcessor): - - def __init__(self): - super(WindowsCmdLineProcessor, self).__init__() - self.binary_file = None - - def GetRequiredArgsNames(self): - return 'binary log_file' - - def ProcessRequiredArgs(self, args): - if len(args) != 2: - self.PrintUsageAndExit() - else: - self.binary_file = args[0] - self.log_file = args[1] - - -def Main(): - cmdline_processor = WindowsCmdLineProcessor() - cmdline_processor.ProcessArguments() - tickprocessor = WindowsTickProcessor() - tickprocessor.ParseMapFile(cmdline_processor.binary_file) - cmdline_processor.RunLogfileProcessing(tickprocessor) - tickprocessor.PrintResults() - -if __name__ == '__main__': - Main() |