diff options
author | isaacs <i@izs.me> | 2012-02-23 16:35:46 -0800 |
---|---|---|
committer | isaacs <i@izs.me> | 2012-02-27 12:11:09 -0800 |
commit | f4641bd4de566145e99b05c47c8f3d629b0223cd (patch) | |
tree | 2d0ccb1c338bec65881b30c49707d9e688fed154 /deps/v8 | |
parent | 82ad1f87fa99b420a97cc9bfae727fce0b1bf8a4 (diff) | |
download | node-new-f4641bd4de566145e99b05c47c8f3d629b0223cd.tar.gz |
Update v8 to 3.9.9
Diffstat (limited to 'deps/v8')
179 files changed, 7422 insertions, 2793 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index b61faef74c..088daeabf9 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -23,10 +23,10 @@ shell_g /build/gyp /obj/ /out/ -/test/es5conform/data/ -/test/mozilla/data/ -/test/sputnik/sputniktests/ -/test/test262/data/ +/test/es5conform/data +/test/mozilla/data +/test/sputnik/sputniktests +/test/test262/data /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o /tools/visual_studio/Debug diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 1ff5ff604d..869be2b74b 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -23,14 +23,18 @@ Daniel James <dnljms@gmail.com> Dineel D Sule <dsule@codeaurora.org> Erich Ocean <erich.ocean@me.com> Fedor Indutny <fedor@indutny.com> +Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com> Jan de Mooij <jandemooij@gmail.com> Jay Freeman <saurik@saurik.com> Joel Stanley <joel.stan@gmail.com> John Jozwiak <jjozwiak@codeaurora.org> +Jonathan Liu <net147@gmail.com> Kun Zhang <zhangk@codeaurora.org> Martyn Capewell <martyn.capewell@arm.com> +Mathias Bynens <mathias@qiwi.be> Matt Hanselman <mjhanselman@gmail.com> Maxim Mossienko <maxim.mossienko@gmail.com> +Michael Lutz <michi@icosahedron.de> Michael Smith <mike@w3.org> Mike Gilbert <floppymaster@gmail.com> Paolo Giarrusso <p.giarrusso@gmail.com> diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 482cca8946..b299d99b09 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,58 @@ +2012-02-23: Version 3.9.9 + + Supported fast case for-in in Crankshaft. + + Sped up heap snapshot serialization and dominators construction. + + Randomized allocation addresses on windows. (Chromium issue 115151) + + Fixed compilation with MinGW-w64. (issue 1943) + + Fixed incorrect value of assignments to non-extensible properties. + + Fixed a crash bug in generated code on ia32. + + Performance and stability improvements on all platforms. + + +2012-02-21: Version 3.9.8 + + Fixed memory leak and missing #include in StartupDataDecompressor + (issue 1960). + + Renamed static methods to avoid shadowing virtual methods and fix Clang + C++11 compile error. + + Fixed sequence of element access in array builtins (issue 1790). + + Performance and stability improvements on all platforms. + + +2012-02-16: Version 3.9.7 + + Fixed V8 issues 1322, 1878, 1942, 1945 and Chromium issue 113924. + + Fixed GCC-4.7 warnings. + + Added Navier-Stokes benchmark. + + Performance and stability improvements on all platforms. + + +2012-02-14: Version 3.9.6 + + Fix template-related linker error. (issue 1936) + + Allow inlining of functions containing object literals. (issue 1322) + + Add --call-graph-size option to tickprocessor. (issue 1937) + + Heap Snapshot maximum size limit is too low for really big apps. At the + moment the limit is 256MB. (Chromium issue 113015) + + Performance and stability improvements on all platforms. + + 2012-02-09: Version 3.9.5 Removed unused command line flags. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index d4eaebef8d..84863e38ca 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -1,4 +1,4 @@ -# Copyright 2011 the V8 project authors. All rights reserved. +# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -296,10 +296,11 @@ V8_EXTRA_FLAGS = { '-Werror', '-W', '-Wno-unused-parameter', + '-Woverloaded-virtual', '-Wnon-virtual-dtor'] }, 'os:win32': { - 'WARNINGFLAGS': ['-pedantic', '-Wno-long-long'] + 'WARNINGFLAGS': ['-pedantic', '-Wno-long-long', '-Wno-pedantic-ms-format'] }, 'os:linux': { 'WARNINGFLAGS': ['-pedantic'], diff --git a/deps/v8/benchmarks/base.js b/deps/v8/benchmarks/base.js index ffabf24dda..62c37e1208 100644 --- a/deps/v8/benchmarks/base.js +++ b/deps/v8/benchmarks/base.js @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -78,7 +78,7 @@ BenchmarkSuite.suites = []; // Scores are not comparable across versions. Bump the version if // you're making changes that will affect that scores, e.g. if you add // a new benchmark or change an existing one. -BenchmarkSuite.version = '6'; +BenchmarkSuite.version = '7'; // To make the benchmark results predictable, we replace Math.random diff --git a/deps/v8/benchmarks/navier-stokes.js b/deps/v8/benchmarks/navier-stokes.js new file mode 100644 index 0000000000..b0dc3c8645 --- /dev/null +++ b/deps/v8/benchmarks/navier-stokes.js @@ -0,0 +1,387 @@ +/** + * Copyright 2012 the V8 project authors. All rights reserved. + * Copyright 2009 Oliver Hunt <http://nerget.com> + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +var NavierStokes = new BenchmarkSuite('NavierStokes', 1484000, + [new Benchmark('NavierStokes', + runNavierStokes, + setupNavierStokes, + tearDownNavierStokes)]); + +var solver = null; + +function runNavierStokes() +{ + solver.update(); +} + +function setupNavierStokes() +{ + solver = new FluidField(null); + solver.setResolution(128, 128); + solver.setIterations(20); + solver.setDisplayFunction(function(){}); + solver.setUICallback(prepareFrame); + solver.reset(); +} + +function tearDownNavierStokes() +{ + solver = null; +} + +function addPoints(field) { + var n = 64; + for (var i = 1; i <= n; i++) { + field.setVelocity(i, i, n, n); + field.setDensity(i, i, 5); + field.setVelocity(i, n - i, -n, -n); + field.setDensity(i, n - i, 20); + field.setVelocity(128 - i, n + i, -n, -n); + field.setDensity(128 - i, n + i, 30); + } +} + +var framesTillAddingPoints = 0; +var framesBetweenAddingPoints = 5; + +function prepareFrame(field) +{ + if (framesTillAddingPoints == 0) { + addPoints(field); + framesTillAddingPoints = framesBetweenAddingPoints; + framesBetweenAddingPoints++; + } else { + framesTillAddingPoints--; + } +} + +// Code from Oliver Hunt (http://nerget.com/fluidSim/pressure.js) starts here. +function FluidField(canvas) { + function addFields(x, s, dt) + { + for (var i=0; i<size ; i++ ) x[i] += dt*s[i]; + } + + function set_bnd(b, x) + { + if (b===1) { + for (var i = 1; i <= width; i++) { + x[i] = x[i + rowSize]; + x[i + (height+1) *rowSize] = x[i + height * rowSize]; + } + + for (var j = 1; i <= height; i++) { + x[j * rowSize] = -x[1 + j * rowSize]; + x[(width + 1) + j * rowSize] = -x[width + j * rowSize]; + } + } else if (b === 2) { + for (var i = 1; i <= width; i++) { + x[i] = -x[i + rowSize]; + x[i + (height + 1) * rowSize] = -x[i + height * rowSize]; + } + + for (var j = 1; j <= height; j++) { + x[j * rowSize] = x[1 + j * rowSize]; + x[(width + 1) + j * rowSize] = x[width + j * rowSize]; + } + } else { + for (var i = 1; i <= width; i++) { + x[i] = x[i + rowSize]; + x[i + (height + 1) * rowSize] = x[i + height * rowSize]; + } + + for (var j = 1; j <= height; j++) { + x[j * rowSize] = x[1 + j * rowSize]; + x[(width + 1) + j * rowSize] = x[width + j * rowSize]; + } + } + var maxEdge = (height + 1) * rowSize; + x[0] = 0.5 * (x[1] + x[rowSize]); + x[maxEdge] = 0.5 * (x[1 + maxEdge] + x[height * rowSize]); + x[(width+1)] = 0.5 * (x[width] + x[(width + 1) + rowSize]); + x[(width+1)+maxEdge] = 0.5 * (x[width + maxEdge] + x[(width + 1) + height * rowSize]); + } + + function lin_solve(b, x, x0, a, c) + { + if (a === 0 && c === 1) { + for (var j=1 ; j<=height; j++) { + var currentRow = j * rowSize; + ++currentRow; + for (var i = 0; i < width; i++) { + x[currentRow] = x0[currentRow]; + ++currentRow; + } + } + set_bnd(b, x); + } else { + var invC = 1 / c; + for (var k=0 ; k<iterations; k++) { + for (var j=1 ; j<=height; j++) { + var lastRow = (j - 1) * rowSize; + var currentRow = j * rowSize; + var nextRow = (j + 1) * rowSize; + var lastX = x[currentRow]; + ++currentRow; + for (var i=1; i<=width; i++) + lastX = x[currentRow] = (x0[currentRow] + a*(lastX+x[++currentRow]+x[++lastRow]+x[++nextRow])) * invC; + } + set_bnd(b, x); + } + } + } + + function diffuse(b, x, x0, dt) + { + var a = 0; + lin_solve(b, x, x0, a, 1 + 4*a); + } + + function lin_solve2(x, x0, y, y0, a, c) + { + if (a === 0 && c === 1) { + for (var j=1 ; j <= height; j++) { + var currentRow = j * rowSize; + ++currentRow; + for (var i = 0; i < width; i++) { + x[currentRow] = x0[currentRow]; + y[currentRow] = y0[currentRow]; + ++currentRow; + } + } + set_bnd(1, x); + set_bnd(2, y); + } else { + var invC = 1/c; + for (var k=0 ; k<iterations; k++) { + for (var j=1 ; j <= height; j++) { + var lastRow = (j - 1) * rowSize; + var currentRow = j * rowSize; + var nextRow = (j + 1) * rowSize; + var lastX = x[currentRow]; + var lastY = y[currentRow]; + ++currentRow; + for (var i = 1; i <= width; i++) { + lastX = x[currentRow] = (x0[currentRow] + a * (lastX + x[currentRow] + x[lastRow] + x[nextRow])) * invC; + lastY = y[currentRow] = (y0[currentRow] + a * (lastY + y[++currentRow] + y[++lastRow] + y[++nextRow])) * invC; + } + } + set_bnd(1, x); + set_bnd(2, y); + } + } + } + + function diffuse2(x, x0, y, y0, dt) + { + var a = 0; + lin_solve2(x, x0, y, y0, a, 1 + 4 * a); + } + + function advect(b, d, d0, u, v, dt) + { + var Wdt0 = dt * width; + var Hdt0 = dt * height; + var Wp5 = width + 0.5; + var Hp5 = height + 0.5; + for (var j = 1; j<= height; j++) { + var pos = j * rowSize; + for (var i = 1; i <= width; i++) { + var x = i - Wdt0 * u[++pos]; + var y = j - Hdt0 * v[pos]; + if (x < 0.5) + x = 0.5; + else if (x > Wp5) + x = Wp5; + var i0 = x | 0; + var i1 = i0 + 1; + if (y < 0.5) + y = 0.5; + else if (y > Hp5) + y = Hp5; + var j0 = y | 0; + var j1 = j0 + 1; + var s1 = x - i0; + var s0 = 1 - s1; + var t1 = y - j0; + var t0 = 1 - t1; + var row1 = j0 * rowSize; + var row2 = j1 * rowSize; + d[pos] = s0 * (t0 * d0[i0 + row1] + t1 * d0[i0 + row2]) + s1 * (t0 * d0[i1 + row1] + t1 * d0[i1 + row2]); + } + } + set_bnd(b, d); + } + + function project(u, v, p, div) + { + var h = -0.5 / Math.sqrt(width * height); + for (var j = 1 ; j <= height; j++ ) { + var row = j * rowSize; + var previousRow = (j - 1) * rowSize; + var prevValue = row - 1; + var currentRow = row; + var nextValue = row + 1; + var nextRow = (j + 1) * rowSize; + for (var i = 1; i <= width; i++ ) { + div[++currentRow] = h * (u[++nextValue] - u[++prevValue] + v[++nextRow] - v[++previousRow]); + p[currentRow] = 0; + } + } + set_bnd(0, div); + set_bnd(0, p); + + lin_solve(0, p, div, 1, 4 ); + var wScale = 0.5 * width; + var hScale = 0.5 * height; + for (var j = 1; j<= height; j++ ) { + var prevPos = j * rowSize - 1; + var currentPos = j * rowSize; + var nextPos = j * rowSize + 1; + var prevRow = (j - 1) * rowSize; + var currentRow = j * rowSize; + var nextRow = (j + 1) * rowSize; + + for (var i = 1; i<= width; i++) { + u[++currentPos] -= wScale * (p[++nextPos] - p[++prevPos]); + v[currentPos] -= hScale * (p[++nextRow] - p[++prevRow]); + } + } + set_bnd(1, u); + set_bnd(2, v); + } + + function dens_step(x, x0, u, v, dt) + { + addFields(x, x0, dt); + diffuse(0, x0, x, dt ); + advect(0, x, x0, u, v, dt ); + } + + function vel_step(u, v, u0, v0, dt) + { + addFields(u, u0, dt ); + addFields(v, v0, dt ); + var temp = u0; u0 = u; u = temp; + var temp = v0; v0 = v; v = temp; + diffuse2(u,u0,v,v0, dt); + project(u, v, u0, v0); + var temp = u0; u0 = u; u = temp; + var temp = v0; v0 = v; v = temp; + advect(1, u, u0, u0, v0, dt); + advect(2, v, v0, u0, v0, dt); + project(u, v, u0, v0 ); + } + var uiCallback = function(d,u,v) {}; + + function Field(dens, u, v) { + // Just exposing the fields here rather than using accessors is a measurable win during display (maybe 5%) + // but makes the code ugly. + this.setDensity = function(x, y, d) { + dens[(x + 1) + (y + 1) * rowSize] = d; + } + this.getDensity = function(x, y) { + return dens[(x + 1) + (y + 1) * rowSize]; + } + this.setVelocity = function(x, y, xv, yv) { + u[(x + 1) + (y + 1) * rowSize] = xv; + v[(x + 1) + (y + 1) * rowSize] = yv; + } + this.getXVelocity = function(x, y) { + return u[(x + 1) + (y + 1) * rowSize]; + } + this.getYVelocity = function(x, y) { + return v[(x + 1) + (y + 1) * rowSize]; + } + this.width = function() { return width; } + this.height = function() { return height; } + } + function queryUI(d, u, v) + { + for (var i = 0; i < size; i++) + u[i] = v[i] = d[i] = 0.0; + uiCallback(new Field(d, u, v)); + } + + this.update = function () { + queryUI(dens_prev, u_prev, v_prev); + vel_step(u, v, u_prev, v_prev, dt); + dens_step(dens, dens_prev, u, v, dt); + displayFunc(new Field(dens, u, v)); + } + this.setDisplayFunction = function(func) { + displayFunc = func; + } + + this.iterations = function() { return iterations; } + this.setIterations = function(iters) { + if (iters > 0 && iters <= 100) + iterations = iters; + } + this.setUICallback = function(callback) { + uiCallback = callback; + } + var iterations = 10; + var visc = 0.5; + var dt = 0.1; + var dens; + var dens_prev; + var u; + var u_prev; + var v; + var v_prev; + var width; + var height; + var rowSize; + var size; + var displayFunc; + function reset() + { + rowSize = width + 2; + size = (width+2)*(height+2); + dens = new Array(size); + dens_prev = new Array(size); + u = new Array(size); + u_prev = new Array(size); + v = new Array(size); + v_prev = new Array(size); + for (var i = 0; i < size; i++) + dens_prev[i] = u_prev[i] = v_prev[i] = dens[i] = u[i] = v[i] = 0; + } + this.reset = reset; + this.setResolution = function (hRes, wRes) + { + var res = wRes * hRes; + if (res > 0 && res < 1000000 && (wRes != width || hRes != height)) { + width = wRes; + height = hRes; + reset(); + return true; + } + return false; + } + this.setResolution(64, 64); +} diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html index 36d2ad511b..8786d1fb0c 100644 --- a/deps/v8/benchmarks/run.html +++ b/deps/v8/benchmarks/run.html @@ -14,6 +14,7 @@ <script type="text/javascript" src="earley-boyer.js"></script> <script type="text/javascript" src="regexp.js"></script> <script type="text/javascript" src="splay.js"></script> +<script type="text/javascript" src="navier-stokes.js"></script> <link type="text/css" rel="stylesheet" href="style.css" /> <script type="text/javascript"> var completed = 0; @@ -117,6 +118,7 @@ higher scores means better performance: <em>Bigger is better!</em> (<i>1761 lines</i>). </li> <li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li> +<li><b>NavierStokes (beta)</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>396 lines</i>).</li> </ul> <p> diff --git a/deps/v8/benchmarks/run.js b/deps/v8/benchmarks/run.js index da95fb498f..58f6265800 100644 --- a/deps/v8/benchmarks/run.js +++ b/deps/v8/benchmarks/run.js @@ -34,6 +34,7 @@ load('raytrace.js'); load('earley-boyer.js'); load('regexp.js'); load('splay.js'); +load('navier-stokes.js'); var success = true; diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 3637082bc1..a9c279e3ac 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -83,7 +83,6 @@ 'v8_use_snapshot%': 'true', 'host_os%': '<(OS)', 'v8_use_liveobjectlist%': 'false', - 'werror%': '-Werror', # With post mortem support enabled, metadata is embedded into libv8 that # describes various parameters of the VM for use by debuggers. See @@ -305,8 +304,8 @@ 'cflags': [ '-I/usr/pkg/include' ], }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { - 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', - '-Wnon-virtual-dtor' ], + 'cflags': [ '<(werror)', '-W', '-Wno-unused-parameter', + '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], }], ], }, # Debug @@ -352,6 +351,7 @@ }], # OS=="mac" ['OS=="win"', { 'msvs_configuration_attributes': { + 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'CharacterSet': '1', }, diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi index e9b056580d..add9d3ba53 100644 --- a/deps/v8/build/standalone.gypi +++ b/deps/v8/build/standalone.gypi @@ -61,7 +61,6 @@ 'host_arch%': '<(host_arch)', 'target_arch%': '<(target_arch)', 'v8_target_arch%': '<(v8_target_arch)', - 'werror%': '-Werror', 'conditions': [ ['(v8_target_arch=="arm" and host_arch!="arm") or \ (v8_target_arch=="mips" and host_arch!="mips") or \ @@ -84,7 +83,7 @@ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ or OS=="netbsd"', { 'target_defaults': { - 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', + 'cflags': [ '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-pthread', '-fno-rtti', '-fno-exceptions', '-pedantic' ], 'ldflags': [ '-pthread', ], diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 66a649d1e0..524fcb2b46 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1198,7 +1198,7 @@ class String : public Primitive { * passed in as parameters. */ V8EXPORT static Local<String> Concat(Handle<String> left, - Handle<String>right); + Handle<String> right); /** * Creates a new external string using the data defined in the given @@ -3850,7 +3850,7 @@ class Internals { static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; - static const int kJSObjectType = 0xa7; + static const int kJSObjectType = 0xa8; static const int kFirstNonstringType = 0x80; static const int kForeignType = 0x85; diff --git a/deps/v8/preparser/preparser-process.cc b/deps/v8/preparser/preparser-process.cc index b0aeb81e2a..368f63f6ce 100644 --- a/deps/v8/preparser/preparser-process.cc +++ b/deps/v8/preparser/preparser-process.cc @@ -200,12 +200,14 @@ void fail(v8::PreParserData* data, const char* message, ...) { vfprintf(stderr, message, args); va_end(args); fflush(stderr); - // Print preparser data to stdout. - uint32_t size = data->size(); - fprintf(stderr, "LOG: data size: %u\n", size); - if (!WriteBuffer(stdout, data->data(), size)) { - perror("ERROR: Writing data"); - fflush(stderr); + if (data != NULL) { + // Print preparser data to stdout. + uint32_t size = data->size(); + fprintf(stderr, "LOG: data size: %u\n", size); + if (!WriteBuffer(stdout, data->data(), size)) { + perror("ERROR: Writing data"); + fflush(stderr); + } } exit(EXIT_FAILURE); } diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 2f8f1d15da..67fded8eb0 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -42,6 +42,9 @@ #include "global-handles.h" #include "heap-profiler.h" #include "messages.h" +#ifdef COMPRESS_STARTUP_DATA_BZ2 +#include "natives.h" +#endif #include "parser.h" #include "platform.h" #include "profile-generator-inl.h" @@ -357,6 +360,7 @@ int StartupDataDecompressor::Decompress() { compressed_data[i].data = decompressed; } V8::SetDecompressedStartupData(compressed_data); + i::DeleteArray(compressed_data); return 0; } diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js index e94da9f065..79b41dd88c 100644 --- a/deps/v8/src/apinatives.js +++ b/deps/v8/src/apinatives.js @@ -37,8 +37,8 @@ function CreateDate(time) { } -const kApiFunctionCache = {}; -const functionCache = kApiFunctionCache; +var kApiFunctionCache = {}; +var functionCache = kApiFunctionCache; function Instantiate(data, name) { diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 50b6bce30b..993addca9e 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -313,7 +313,7 @@ static void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Counters* counters = masm->isolate()->counters(); Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, - has_non_smi_element; + has_non_smi_element, finish, cant_transition_map, not_double; // Check for array construction with zero arguments or one. __ cmp(r0, Operand(0, RelocInfo::NONE)); @@ -418,6 +418,8 @@ static void ArrayNativeCode(MacroAssembler* masm, __ bind(&entry); __ cmp(r4, r5); __ b(lt, &loop); + + __ bind(&finish); __ mov(sp, r7); // Remove caller arguments and receiver from the stack, setup return value and @@ -430,8 +432,39 @@ static void ArrayNativeCode(MacroAssembler* masm, __ Jump(lr); __ bind(&has_non_smi_element); + // Double values are handled by the runtime. + __ CheckMap( + r2, r9, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK); + __ bind(&cant_transition_map); __ UndoAllocationInNewSpace(r3, r4); __ b(call_generic_code); + + __ bind(¬_double); + // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // r3: JSArray + __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + FAST_ELEMENTS, + r2, + r9, + &cant_transition_map); + __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ RecordWriteField(r3, + HeapObject::kMapOffset, + r2, + r9, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + Label loop2; + __ sub(r7, r7, Operand(kPointerSize)); + __ bind(&loop2); + __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex)); + __ str(r2, MemOperand(r5, -kPointerSize, PreIndex)); + __ cmp(r4, r5); + __ b(lt, &loop2); + __ b(&finish); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index c65f5bdf84..62e6c80271 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -3439,6 +3439,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void InterruptStub::Generate(MacroAssembler* masm) { + __ TailCallRuntime(Runtime::kInterrupt, 0, 1); +} + + void MathPowStub::Generate(MacroAssembler* masm) { CpuFeatures::Scope vfp3_scope(VFP3); const Register base = r1; @@ -3674,17 +3679,6 @@ void CEntryStub::GenerateAheadOfTime() { } -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - __ Throw(r0); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - __ ThrowUncatchable(type, r0); -} - - void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -3865,13 +3859,27 @@ void CEntryStub::Generate(MacroAssembler* masm) { true); __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + // Set external caught exception to false. + Isolate* isolate = masm->isolate(); + ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, + isolate); + __ mov(r0, Operand(false, RelocInfo::NONE)); + __ mov(r2, Operand(external_caught)); + __ str(r0, MemOperand(r2)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); + __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ str(r0, MemOperand(r2)); + // Fall through to the next label. __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); + __ ThrowUncatchable(r0); __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); + __ Throw(r0); } @@ -4912,10 +4920,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label termination_exception; __ b(eq, &termination_exception); - __ Throw(r0); // Expects thrown value in r0. + __ Throw(r0); __ bind(&termination_exception); - __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. + __ ThrowUncatchable(r0); __ bind(&failure); // For failure and exception return null. @@ -7059,11 +7067,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { { r2, r1, r3, EMIT_REMEMBERED_SET }, { r3, r1, r2, EMIT_REMEMBERED_SET }, // KeyedStoreStubCompiler::GenerateStoreFastElement. - { r4, r2, r3, EMIT_REMEMBERED_SET }, + { r3, r2, r4, EMIT_REMEMBERED_SET }, + { r2, r3, r4, EMIT_REMEMBERED_SET }, // ElementsTransitionGenerator::GenerateSmiOnlyToObject // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { r2, r3, r9, EMIT_REMEMBERED_SET }, + { r2, r3, r9, OMIT_REMEMBERED_SET }, // ElementsTransitionGenerator::GenerateDoubleToObject { r6, r2, r0, EMIT_REMEMBERED_SET }, { r2, r6, r9, EMIT_REMEMBERED_SET }, diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index ce35b97c18..506f9b2d5d 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -90,11 +90,16 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( // -- r3 : target map, scratch for subsequent call // -- r4 : scratch (elements) // ----------------------------------- - Label loop, entry, convert_hole, gc_required; + Label loop, entry, convert_hole, gc_required, only_change_map, done; bool vfp3_supported = CpuFeatures::IsSupported(VFP3); - __ push(lr); + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); + __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex); + __ b(eq, &only_change_map); + + __ push(lr); __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); // r4: source FixedArray // r5: number of elements (smi-tagged) @@ -117,7 +122,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( r9, kLRHasBeenSaved, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, + OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Replace receiver's backing store with newly created FixedDoubleArray. __ add(r3, r6, Operand(kHeapObjectTag)); @@ -146,6 +151,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( __ b(&entry); + __ bind(&only_change_map); + __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ RecordWriteField(r2, + HeapObject::kMapOffset, + r3, + r9, + kLRHasBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ b(&done); + // Call into runtime if GC is required. __ bind(&gc_required); __ pop(lr); @@ -194,6 +211,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( if (!vfp3_supported) __ Pop(r1, r0); __ pop(lr); + __ bind(&done); } @@ -207,10 +225,15 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // -- r3 : target map, scratch for subsequent call // -- r4 : scratch (elements) // ----------------------------------- - Label entry, loop, convert_hole, gc_required; + Label entry, loop, convert_hole, gc_required, only_change_map; - __ push(lr); + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); + __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex); + __ b(eq, &only_change_map); + + __ push(lr); __ Push(r3, r2, r1, r0); __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); // r4: source FixedDoubleArray @@ -280,16 +303,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ b(lt, &loop); __ Pop(r3, r2, r1, r0); - // Update receiver's map. - __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ RecordWriteField(r2, - HeapObject::kMapOffset, - r3, - r9, - kLRHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); // Replace receiver's backing store with newly created and filled FixedArray. __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset)); __ RecordWriteField(r2, @@ -301,6 +314,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ pop(lr); + + __ bind(&only_change_map); + // Update receiver's map. + __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ RecordWriteField(r2, + HeapObject::kMapOffset, + r3, + r9, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 2adddef111..b48e842be7 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -123,10 +123,8 @@ class JumpPatchSite BASE_EMBEDDED { // // The function builds a JS frame. Please see JavaScriptFrameConstants in // frames-arm.h for its layout. -void FullCodeGenerator::Generate(CompilationInfo* info) { - ASSERT(info_ == NULL); - info_ = info; - scope_ = info->scope(); +void FullCodeGenerator::Generate() { + CompilationInfo* info = info_; handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); SetFunctionPosition(function()); @@ -142,7 +140,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // We can optionally optimize based on counters rather than statistical // sampling. if (info->ShouldSelfOptimize()) { - if (FLAG_trace_opt) { + if (FLAG_trace_opt_verbose) { PrintF("[adding self-optimization header to %s]\n", *info->function()->debug_name()->ToCString()); } @@ -331,7 +329,8 @@ void FullCodeGenerator::ClearAccumulator() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { +void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, + Label* back_edge_target) { Comment cmnt(masm_, "[ Stack check"); Label ok; __ LoadRoot(ip, Heap::kStackLimitRootIndex); @@ -935,6 +934,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ cmp(r0, null_value); __ b(eq, &exit); + PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); + // Convert the object to a JS object. Label convert, done_convert; __ JumpIfSmi(r0, &convert); @@ -956,48 +957,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // the JSObject::IsSimpleEnum cache validity checks. If we cannot // guarantee cache validity, call the runtime system to check cache // validity or get the property names in a fixed array. - Label next; - // Preload a couple of values used in the loop. - Register empty_fixed_array_value = r6; - __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); - Register empty_descriptor_array_value = r7; - __ LoadRoot(empty_descriptor_array_value, - Heap::kEmptyDescriptorArrayRootIndex); - __ mov(r1, r0); - __ bind(&next); - - // Check that there are no elements. Register r1 contains the - // current JS object we've reached through the prototype chain. - __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ cmp(r2, empty_fixed_array_value); - __ b(ne, &call_runtime); - - // Check that instance descriptors are not empty so that we can - // check for an enum cache. Leave the map in r2 for the subsequent - // prototype load. - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset)); - __ JumpIfSmi(r3, &call_runtime); - - // Check that there is an enum cache in the non-empty instance - // descriptors (r3). This is the case if the next enumeration - // index field does not contain a smi. - __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset)); - __ JumpIfSmi(r3, &call_runtime); - - // For all objects but the receiver, check that the cache is empty. - Label check_prototype; - __ cmp(r1, r0); - __ b(eq, &check_prototype); - __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset)); - __ cmp(r3, empty_fixed_array_value); - __ b(ne, &call_runtime); - - // Load the prototype from the map and loop if non-null. - __ bind(&check_prototype); - __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset)); - __ cmp(r1, null_value); - __ b(ne, &next); + __ CheckEnumCache(null_value, &call_runtime); // The enum cache is valid. Load the map of the object being // iterated over and use the cache for the iteration. @@ -1050,6 +1010,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ Push(r1, r0); // Fixed array length (as smi) and initial index. // Generate code for doing the condition check. + PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); __ bind(&loop); // Load the current count to r0, load the length to r1. __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize)); @@ -1093,7 +1054,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ mov(result_register(), r3); // Perform the assignment as if via '='. { EffectContext context(this); - EmitAssignment(stmt->each(), stmt->AssignmentId()); + EmitAssignment(stmt->each()); } // Generate code for the body of the loop. @@ -1106,7 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ add(r0, r0, Operand(Smi::FromInt(1))); __ push(r0); - EmitStackCheck(stmt); + EmitStackCheck(stmt, &loop); __ b(&loop); // Remove the pointers stored on the stack. @@ -1114,6 +1075,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ Drop(5); // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); __ bind(&exit); decrement_loop_depth(); } @@ -1524,7 +1486,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Smi::FromInt(0))); __ push(r1); VisitForStackValue(value); - __ CallRuntime(Runtime::kDefineAccessor, 4); + __ mov(r0, Operand(Smi::FromInt(NONE))); + __ push(r0); + __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); break; } } @@ -1875,7 +1839,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, } -void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { +void FullCodeGenerator::EmitAssignment(Expression* expr) { // Invalid left-hand sides are rewritten to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { @@ -1927,7 +1891,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { break; } } - PrepareForBailoutForId(bailout_ast_id, TOS_REG); context()->Plug(r0); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 1111c67faf..a934aacd36 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -1125,6 +1125,11 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { } +LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { + return MarkAsCall(new LDeclareGlobals, instr); +} + + LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LOperand* context = UseRegisterAtStart(instr->value()); return DefineAsRegister(new LGlobalObject(context)); @@ -2088,19 +2093,18 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { } -LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { - return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr); +LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { + return MarkAsCall(DefineFixed(new LFastLiteral, r0), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr); +LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { + return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralGeneric( - HObjectLiteralGeneric* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr); +LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { + return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr); } @@ -2264,4 +2268,32 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) { } +LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* object = UseFixed(instr->enumerable(), r0); + LForInPrepareMap* result = new LForInPrepareMap(object); + return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { + LOperand* map = UseRegister(instr->map()); + return AssignEnvironment(DefineAsRegister( + new LForInCacheArray(map))); +} + + +LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* map = UseRegisterAtStart(instr->map()); + return AssignEnvironment(new LCheckMapValue(value, map)); +} + + +LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* index = UseRegister(instr->index()); + return DefineAsRegister(new LLoadFieldByIndex(object, index)); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 45043593bd..1846922dbc 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -87,11 +87,13 @@ class LCodeGen; V(ConstantI) \ V(ConstantT) \ V(Context) \ + V(DeclareGlobals) \ V(DeleteProperty) \ V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ V(ElementsKind) \ + V(FastLiteral) \ V(FixedArrayBaseLength) \ V(FunctionLiteral) \ V(GetCachedArrayIndex) \ @@ -134,8 +136,7 @@ class LCodeGen; V(NumberTagD) \ V(NumberTagI) \ V(NumberUntagD) \ - V(ObjectLiteralFast) \ - V(ObjectLiteralGeneric) \ + V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ @@ -171,7 +172,12 @@ class LCodeGen; V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ V(UnknownOSRValue) \ - V(ValueOf) + V(ValueOf) \ + V(ForInPrepareMap) \ + V(ForInCacheArray) \ + V(CheckMapValue) \ + V(LoadFieldByIndex) + #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -1346,6 +1352,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> { }; +class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") + DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) +}; + + class LGlobalObject: public LTemplateInstruction<1, 1, 0> { public: explicit LGlobalObject(LOperand* context) { @@ -1909,24 +1922,24 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { }; -class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { +class LFastLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") - DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) + DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") + DECLARE_HYDROGEN_ACCESSOR(FastLiteral) }; -class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> { +class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) + DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") + DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) }; -class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> { +class LObjectLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) + DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") + DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) }; @@ -2056,6 +2069,62 @@ class LIn: public LTemplateInstruction<1, 2, 0> { }; +class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInPrepareMap(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") +}; + + +class LForInCacheArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInCacheArray(LOperand* map) { + inputs_[0] = map; + } + + LOperand* map() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") + + int idx() { + return HForInCacheArray::cast(this->hydrogen_value())->idx(); + } +}; + + +class LCheckMapValue: public LTemplateInstruction<0, 2, 0> { + public: + LCheckMapValue(LOperand* value, LOperand* map) { + inputs_[0] = value; + inputs_[1] = map; + } + + LOperand* value() { return inputs_[0]; } + LOperand* map() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") +}; + + +class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> { + public: + LLoadFieldByIndex(LOperand* object, LOperand* index) { + inputs_[0] = object; + inputs_[1] = index; + } + + LOperand* object() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") +}; + + class LChunkBuilder; class LChunk: public ZoneObject { public: diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 64ca1a37bf..8045556406 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -2873,6 +2873,16 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { } +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + __ push(cp); // The context is the first argument. + __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs()); + __ push(scratch0()); + __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); + __ push(scratch0()); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register result = ToRegister(instr->result()); __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); @@ -4370,26 +4380,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, ASSERT(!source.is(r2)); ASSERT(!result.is(r2)); + // Only elements backing stores for non-COW arrays need to be copied. + Handle<FixedArrayBase> elements(object->elements()); + bool has_elements = elements->length() > 0 && + elements->map() != isolate()->heap()->fixed_cow_array_map(); + // Increase the offset so that subsequent objects end up right after - // this one. - int current_offset = *offset; - int size = object->map()->instance_size(); - *offset += size; + // this object and its backing store. + int object_offset = *offset; + int object_size = object->map()->instance_size(); + int elements_offset = *offset + object_size; + int elements_size = has_elements ? elements->Size() : 0; + *offset += object_size + elements_size; // Copy object header. ASSERT(object->properties()->length() == 0); - ASSERT(object->elements()->length() == 0 || - object->elements()->map() == isolate()->heap()->fixed_cow_array_map()); int inobject_properties = object->map()->inobject_properties(); - int header_size = size - inobject_properties * kPointerSize; + int header_size = object_size - inobject_properties * kPointerSize; for (int i = 0; i < header_size; i += kPointerSize) { - __ ldr(r2, FieldMemOperand(source, i)); - __ str(r2, FieldMemOperand(result, current_offset + i)); + if (has_elements && i == JSObject::kElementsOffset) { + __ add(r2, result, Operand(elements_offset)); + } else { + __ ldr(r2, FieldMemOperand(source, i)); + } + __ str(r2, FieldMemOperand(result, object_offset + i)); } // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { - int total_offset = current_offset + object->GetInObjectPropertyOffset(i); + int total_offset = object_offset + object->GetInObjectPropertyOffset(i); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); @@ -4405,10 +4424,41 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ str(r2, FieldMemOperand(result, total_offset)); } } + + // Copy elements backing store header. + ASSERT(!has_elements || elements->IsFixedArray()); + if (has_elements) { + __ LoadHeapObject(source, elements); + for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { + __ ldr(r2, FieldMemOperand(source, i)); + __ str(r2, FieldMemOperand(result, elements_offset + i)); + } + } + + // Copy elements backing store content. + ASSERT(!has_elements || elements->IsFixedArray()); + int elements_length = has_elements ? elements->length() : 0; + for (int i = 0; i < elements_length; i++) { + int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); + Handle<Object> value = JSObject::GetElement(object, i); + if (value->IsJSObject()) { + Handle<JSObject> value_object = Handle<JSObject>::cast(value); + __ add(r2, result, Operand(*offset)); + __ str(r2, FieldMemOperand(result, total_offset)); + __ LoadHeapObject(source, value_object); + EmitDeepCopy(value_object, result, source, offset); + } else if (value->IsHeapObject()) { + __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); + __ str(r2, FieldMemOperand(result, total_offset)); + } else { + __ mov(r2, Operand(value)); + __ str(r2, FieldMemOperand(result, total_offset)); + } + } } -void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { +void LCodeGen::DoFastLiteral(LFastLiteral* instr) { int size = instr->hydrogen()->total_size(); // Allocate all objects that are part of the literal in one big @@ -4430,12 +4480,13 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { } -void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { +void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { + Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); - __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset)); + // Set up the parameters to the stub/runtime call. + __ LoadHeapObject(r4, literals); __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); __ mov(r2, Operand(constant_properties)); int flags = instr->hydrogen()->fast_elements() @@ -4444,7 +4495,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { __ mov(r1, Operand(Smi::FromInt(flags))); __ Push(r4, r3, r2, r1); - // Pick the right runtime function to call. + // Pick the right runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); @@ -4799,6 +4850,88 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) { } +void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r0, ip); + DeoptimizeIf(eq, instr->environment()); + + Register null_value = r5; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ cmp(r0, null_value); + DeoptimizeIf(eq, instr->environment()); + + __ tst(r0, Operand(kSmiTagMask)); + DeoptimizeIf(eq, instr->environment()); + + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); + DeoptimizeIf(le, instr->environment()); + + Label use_cache, call_runtime; + __ CheckEnumCache(null_value, &call_runtime); + + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ b(&use_cache); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(r0); + CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); + + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kMetaMapRootIndex); + __ cmp(r1, ip); + DeoptimizeIf(ne, instr->environment()); + __ bind(&use_cache); +} + + +void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { + Register map = ToRegister(instr->map()); + Register result = ToRegister(instr->result()); + __ LoadInstanceDescriptors(map, result); + __ ldr(result, + FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset)); + __ ldr(result, + FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); + __ cmp(result, Operand(0)); + DeoptimizeIf(eq, instr->environment()); +} + + +void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { + Register object = ToRegister(instr->value()); + Register map = ToRegister(instr->map()); + __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); + __ cmp(map, scratch0()); + DeoptimizeIf(ne, instr->environment()); +} + + +void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + Register object = ToRegister(instr->object()); + Register index = ToRegister(instr->index()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + + Label out_of_object, done; + __ cmp(index, Operand(0)); + __ b(lt, &out_of_object); + + STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); + __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); + + __ b(&done); + + __ bind(&out_of_object); + __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); + // Index is equal to negated out of object property index plus 1. + __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(result, FieldMemOperand(scratch, + FixedArray::kHeaderSize - kPointerSize)); + __ bind(&done); +} #undef __ diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 2f0e5fa459..45dd80ffb7 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1281,8 +1281,7 @@ void MacroAssembler::Throw(Register value) { } -void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, - Register value) { +void MacroAssembler::ThrowUncatchable(Register value) { // Adjust this code if not the case. STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); @@ -1292,24 +1291,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); // The exception is expected in r0. - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, - isolate()); - mov(r0, Operand(false, RelocInfo::NONE)); - mov(r2, Operand(external_caught)); - str(r0, MemOperand(r2)); - - // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); - mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate()))); - str(r0, MemOperand(r2)); - } else if (!value.is(r0)) { + if (!value.is(r0)) { mov(r0, value); } - // Drop the stack pointer to the top of the top stack handler. mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); ldr(sp, MemOperand(r3)); @@ -3680,6 +3664,52 @@ void MacroAssembler::LoadInstanceDescriptors(Register map, } +void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { + Label next; + // Preload a couple of values used in the loop. + Register empty_fixed_array_value = r6; + LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); + Register empty_descriptor_array_value = r7; + LoadRoot(empty_descriptor_array_value, + Heap::kEmptyDescriptorArrayRootIndex); + mov(r1, r0); + bind(&next); + + // Check that there are no elements. Register r1 contains the + // current JS object we've reached through the prototype chain. + ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); + cmp(r2, empty_fixed_array_value); + b(ne, call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in r2 for the subsequent + // prototype load. + ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset)); + JumpIfSmi(r3, call_runtime); + + // Check that there is an enum cache in the non-empty instance + // descriptors (r3). This is the case if the next enumeration + // index field does not contain a smi. + ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset)); + JumpIfSmi(r3, call_runtime); + + // For all objects but the receiver, check that the cache is empty. + Label check_prototype; + cmp(r1, r0); + b(eq, &check_prototype); + ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset)); + cmp(r3, empty_fixed_array_value); + b(ne, call_runtime); + + // Load the prototype from the map and loop if non-null. + bind(&check_prototype); + ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset)); + cmp(r1, null_value); + b(ne, &next); +} + + bool AreAliased(Register r1, Register r2, Register r3, Register r4) { if (r1.is(r2)) return true; if (r1.is(r3)) return true; diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 45cca9042a..47afa93a6e 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -588,12 +588,12 @@ class MacroAssembler: public Assembler { // Must preserve the result register. void PopTryHandler(); - // Passes thrown value (in r0) to the handler of top of the try handler chain. + // Passes thrown value to the handler of top of the try handler chain. void Throw(Register value); // Propagates an uncatchable exception to the top of the current JS stack's // handler chain. - void ThrowUncatchable(UncatchableExceptionType type, Register value); + void ThrowUncatchable(Register value); // --------------------------------------------------------------------------- // Inline caching support @@ -1259,6 +1259,10 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); + // Expects object in r0 and returns map with validated enum cache + // in r0. Assumes that any other register can be used as a scratch. + void CheckEnumCache(Register null_value, Label* call_runtime); + private: void CallCFunctionHelper(Register function, int num_reg_arguments, diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 1ae172c008..629c209ea2 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1277,9 +1277,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { // Returns the limit of the stack area to enable checking for stack overflows. uintptr_t Simulator::StackLimit() const { - // Leave a safety margin of 512 bytes to prevent overrunning the stack when + // Leave a safety margin of 1024 bytes to prevent overrunning the stack when // pushing values. - return reinterpret_cast<uintptr_t>(stack_) + 512; + return reinterpret_cast<uintptr_t>(stack_) + 1024; } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 2f2c5a838d..9a0793e12f 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -3076,7 +3076,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( ElementsKind elements_kind = receiver_map->elements_kind(); bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; Handle<Code> stub = - KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); + KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode(); __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK); @@ -4121,7 +4121,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, - ElementsKind elements_kind) { + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4130,13 +4131,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // -- r3 : scratch // -- r4 : scratch (elements) // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, grow, slow; + Label finish_store, check_capacity; Register value_reg = r0; Register key_reg = r1; Register receiver_reg = r2; - Register scratch = r3; - Register elements_reg = r4; + Register scratch = r4; + Register elements_reg = r3; + Register length_reg = r5; + Register scratch2 = r6; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -4144,16 +4148,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi. __ JumpIfNotSmi(key_reg, &miss_force_generic); - // Get the elements array and make sure it is a fast element array, not 'cow'. - __ ldr(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ CheckMap(elements_reg, - scratch, - Heap::kFixedArrayMapRootIndex, - &miss_force_generic, - DONT_DO_SMI_CHECK); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(value_reg, &transition_elements_kind); + } // Check that the key is within bounds. + __ ldr(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); if (is_js_array) { __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { @@ -4161,10 +4162,21 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( } // Compare smis. __ cmp(key_reg, scratch); - __ b(hs, &miss_force_generic); + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + __ b(hs, &grow); + } else { + __ b(hs, &miss_force_generic); + } + // Make sure elements is a fast element array, not 'cow'. + __ CheckMap(elements_reg, + scratch, + Heap::kFixedArrayMapRootIndex, + &miss_force_generic, + DONT_DO_SMI_CHECK); + + __ bind(&finish_store); if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(value_reg, &transition_elements_kind); __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4202,12 +4214,80 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ Jump(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Grow the array by a single element if possible. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. Flags already set by previous compare. + __ b(ne, &miss_force_generic); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ ldr(length_reg, + FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex); + __ b(ne, &check_capacity); + + int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow, + TAG_OBJECT); + + __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); + __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset)); + __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) { + __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i))); + } + + // Store the element at index zero. + __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0))); + + // Install the new backing store in the JSArray. + __ str(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg, + scratch, kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ mov(length_reg, Operand(Smi::FromInt(1))); + __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ Ret(); + + __ bind(&check_capacity); + // Check for cow elements, in general they are not handled by this stub + __ CheckMap(elements_reg, + scratch, + Heap::kFixedCOWArrayMapRootIndex, + &miss_force_generic, + DONT_DO_SMI_CHECK); + + __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); + __ cmp(length_reg, scratch); + __ b(hs, &slow); + + // Grow the array and finish the store. + __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); + __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ jmp(&finish_store); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ Jump(ic_slow, RelocInfo::CODE_TARGET); + } } void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, - bool is_js_array) { + bool is_js_array, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4217,7 +4297,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r4 : scratch // -- r5 : scratch // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, grow, slow; + Label finish_store, check_capacity; Register value_reg = r0; Register key_reg = r1; @@ -4227,6 +4308,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register scratch2 = r5; Register scratch3 = r6; Register scratch4 = r7; + Register length_reg = r7; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -4245,8 +4327,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. __ cmp(key_reg, scratch1); - __ b(hs, &miss_force_generic); + if (grow_mode == ALLOW_JSARRAY_GROWTH) { + __ b(hs, &grow); + } else { + __ b(hs, &miss_force_generic); + } + __ bind(&finish_store); __ StoreNumberToDoubleElements(value_reg, key_reg, receiver_reg, @@ -4267,6 +4354,73 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ Jump(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Grow the array by a single element if possible. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. Flags already set by previous compare. + __ b(ne, &miss_force_generic); + + // Transition on values that can't be stored in a FixedDoubleArray. + Label value_is_smi; + __ JumpIfSmi(value_reg, &value_is_smi); + __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex); + __ b(ne, &transition_elements_kind); + __ bind(&value_is_smi); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ ldr(length_reg, + FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex); + __ b(ne, &check_capacity); + + int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, + TAG_OBJECT); + + // Initialize the new FixedDoubleArray. Leave elements unitialized for + // efficiency, they are guaranteed to be initialized before use. + __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); + __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); + __ mov(scratch1, + Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + __ str(scratch1, + FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + + // Install the new backing store in the JSArray. + __ str(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg, + scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ mov(length_reg, Operand(Smi::FromInt(1))); + __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ jmp(&finish_store); + + __ bind(&check_capacity); + // Make sure that the backing store can hold additional elements. + __ ldr(scratch1, + FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + __ cmp(length_reg, scratch1); + __ b(hs, &slow); + + // Grow the array and finish the store. + __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); + __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ jmp(&finish_store); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ Jump(ic_slow, RelocInfo::CODE_TARGET); + } } diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 16e37c5a7b..daa75d5753 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -27,7 +27,7 @@ // This file relies on the fact that the following declarations have been made // in runtime.js: -// const $Array = global.Array; +// var $Array = global.Array; // ------------------------------------------------------------------- @@ -757,7 +757,7 @@ function ArraySort(comparefn) { } var receiver = %GetDefaultReceiver(comparefn); - function InsertionSort(a, from, to) { + var InsertionSort = function InsertionSort(a, from, to) { for (var i = from + 1; i < to; i++) { var element = a[i]; for (var j = i - 1; j >= from; j--) { @@ -771,9 +771,9 @@ function ArraySort(comparefn) { } a[j + 1] = element; } - } + }; - function QuickSort(a, from, to) { + var QuickSort = function QuickSort(a, from, to) { // Insertion sort is faster for short arrays. if (to - from <= 10) { InsertionSort(a, from, to); @@ -841,12 +841,12 @@ function ArraySort(comparefn) { } QuickSort(a, from, low_end); QuickSort(a, high_start, to); - } + }; // Copy elements in the range 0..length from obj's prototype chain // to obj itself, if obj has holes. Return one more than the maximal index // of a prototype property. - function CopyFromPrototype(obj, length) { + var CopyFromPrototype = function CopyFromPrototype(obj, length) { var max = 0; for (var proto = obj.__proto__; proto; proto = proto.__proto__) { var indices = %GetArrayKeys(proto, length); @@ -873,12 +873,12 @@ function ArraySort(comparefn) { } } return max; - } + }; // Set a value of "undefined" on all indices in the range from..to // where a prototype of obj has an element. I.e., shadow all prototype // elements in that range. - function ShadowPrototypeElements(obj, from, to) { + var ShadowPrototypeElements = function(obj, from, to) { for (var proto = obj.__proto__; proto; proto = proto.__proto__) { var indices = %GetArrayKeys(proto, to); if (indices.length > 0) { @@ -901,9 +901,9 @@ function ArraySort(comparefn) { } } } - } + }; - function SafeRemoveArrayHoles(obj) { + var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) { // Copy defined elements from the end to fill in all holes and undefineds // in the beginning of the array. Write undefineds and holes at the end // after loop is finished. @@ -958,7 +958,7 @@ function ArraySort(comparefn) { // Return the number of defined elements. return first_undefined; - } + }; var length = TO_UINT32(this.length); if (length < 2) return this; @@ -1024,10 +1024,10 @@ function ArrayFilter(f, receiver) { var accumulator = new InternalArray(); var accumulator_length = 0; for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - if (%_CallFunction(receiver, current, i, array, f)) { - accumulator[accumulator_length++] = current; + if (i in array) { + var element = array[i]; + if (%_CallFunction(receiver, element, i, array, f)) { + accumulator[accumulator_length++] = element; } } } @@ -1057,9 +1057,9 @@ function ArrayForEach(f, receiver) { } for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - %_CallFunction(receiver, current, i, array, f); + if (i in array) { + var element = array[i]; + %_CallFunction(receiver, element, i, array, f); } } } @@ -1088,9 +1088,9 @@ function ArraySome(f, receiver) { } for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - if (%_CallFunction(receiver, current, i, array, f)) return true; + if (i in array) { + var element = array[i]; + if (%_CallFunction(receiver, element, i, array, f)) return true; } } return false; @@ -1118,9 +1118,9 @@ function ArrayEvery(f, receiver) { } for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - if (!%_CallFunction(receiver, current, i, array, f)) return false; + if (i in array) { + var element = array[i]; + if (!%_CallFunction(receiver, element, i, array, f)) return false; } } return true; @@ -1149,9 +1149,9 @@ function ArrayMap(f, receiver) { var result = new $Array(); var accumulator = new InternalArray(length); for (var i = 0; i < length; i++) { - var current = array[i]; - if (!IS_UNDEFINED(current) || i in array) { - accumulator[i] = %_CallFunction(receiver, current, i, array, f); + if (i in array) { + var element = array[i]; + accumulator[i] = %_CallFunction(receiver, element, i, array, f); } } %MoveArrayContents(accumulator, result); @@ -1308,8 +1308,8 @@ function ArrayReduce(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i < length; i++) { - var element = array[i]; - if (!IS_UNDEFINED(element) || i in array) { + if (i in array) { + var element = array[i]; current = %_CallFunction(receiver, current, element, i, array, callback); } } @@ -1345,8 +1345,8 @@ function ArrayReduceRight(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i >= 0; i--) { - var element = array[i]; - if (!IS_UNDEFINED(element) || i in array) { + if (i in array) { + var element = array[i]; current = %_CallFunction(receiver, current, element, i, array, callback); } } @@ -1373,7 +1373,7 @@ function SetUpArray() { var specialFunctions = %SpecialArrayFunctions({}); - function getFunction(name, jsBuiltin, len) { + var getFunction = function(name, jsBuiltin, len) { var f = jsBuiltin; if (specialFunctions.hasOwnProperty(name)) { f = specialFunctions[name]; @@ -1382,7 +1382,7 @@ function SetUpArray() { %FunctionSetLength(f, len); } return f; - } + }; // Set up non-enumerable functions of the Array.prototype object and // set their names. diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 980dba6371..c98aaa916b 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -1009,6 +1009,7 @@ INCREASE_NODE_COUNT(BreakStatement) INCREASE_NODE_COUNT(ReturnStatement) INCREASE_NODE_COUNT(Conditional) INCREASE_NODE_COUNT(Literal) +INCREASE_NODE_COUNT(ObjectLiteral) INCREASE_NODE_COUNT(Assignment) INCREASE_NODE_COUNT(Throw) INCREASE_NODE_COUNT(Property) @@ -1017,6 +1018,8 @@ INCREASE_NODE_COUNT(CountOperation) INCREASE_NODE_COUNT(BinaryOperation) INCREASE_NODE_COUNT(CompareOperation) INCREASE_NODE_COUNT(ThisFunction) +INCREASE_NODE_COUNT(Call) +INCREASE_NODE_COUNT(CallNew) #undef INCREASE_NODE_COUNT @@ -1112,33 +1115,14 @@ void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) { } -void AstConstructionVisitor::VisitObjectLiteral(ObjectLiteral* node) { - increase_node_count(); - add_flag(kDontInline); // TODO(1322): Allow materialized literals. -} - - void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) { increase_node_count(); add_flag(kDontInline); // TODO(1322): Allow materialized literals. } -void AstConstructionVisitor::VisitCall(Call* node) { - increase_node_count(); - add_flag(kDontSelfOptimize); -} - - -void AstConstructionVisitor::VisitCallNew(CallNew* node) { - increase_node_count(); - add_flag(kDontSelfOptimize); -} - - void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { increase_node_count(); - add_flag(kDontSelfOptimize); if (node->is_jsruntime()) { // Don't try to inline JS runtime calls because we don't (currently) even // optimize them. diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 7f812326fa..3acd121582 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -154,7 +154,7 @@ typedef ZoneList<Handle<Object> > ZoneObjectList; #define DECLARE_NODE_TYPE(type) \ virtual void Accept(AstVisitor* v); \ - virtual AstNode::Type node_type() const { return AstNode::k##type; } \ + virtual AstNode::Type node_type() const { return AstNode::k##type; } enum AstPropertiesFlag { @@ -223,8 +223,6 @@ class AstNode: public ZoneObject { virtual IterationStatement* AsIterationStatement() { return NULL; } virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; } - static void ResetIds() { Isolate::Current()->set_ast_node_id(0); } - protected: static int GetNextId(Isolate* isolate) { return ReserveIdRange(isolate, 1); @@ -245,11 +243,6 @@ class AstNode: public ZoneObject { }; -#define DECLARE_NODE_TYPE(type) \ - virtual void Accept(AstVisitor* v); \ - virtual AstNode::Type node_type() const { return AstNode::k##type; } \ - - class Statement: public AstNode { public: Statement() : statement_pos_(RelocInfo::kNoPosition) {} @@ -555,17 +548,17 @@ class ModuleVariable: public Module { public: DECLARE_NODE_TYPE(ModuleVariable) - Variable* var() const { return var_; } + VariableProxy* proxy() const { return proxy_; } protected: template<class> friend class AstNodeFactory; - explicit ModuleVariable(Variable* var) - : var_(var) { + explicit ModuleVariable(VariableProxy* proxy) + : proxy_(proxy) { } private: - Variable* var_; + VariableProxy* proxy_; }; @@ -793,10 +786,10 @@ class ForInStatement: public IterationStatement { Expression* each() const { return each_; } Expression* enumerable() const { return enumerable_; } - // Bailout support. - int AssignmentId() const { return assignment_id_; } virtual int ContinueId() const { return EntryId(); } - virtual int StackCheckId() const { return EntryId(); } + virtual int StackCheckId() const { return body_id_; } + int BodyId() const { return body_id_; } + int PrepareId() const { return prepare_id_; } protected: template<class> friend class AstNodeFactory; @@ -805,13 +798,15 @@ class ForInStatement: public IterationStatement { : IterationStatement(isolate, labels), each_(NULL), enumerable_(NULL), - assignment_id_(GetNextId(isolate)) { + body_id_(GetNextId(isolate)), + prepare_id_(GetNextId(isolate)) { } private: Expression* each_; Expression* enumerable_; - int assignment_id_; + int body_id_; + int prepare_id_; }; @@ -1910,6 +1905,16 @@ class FunctionLiteral: public Expression { DECLARATION }; + enum ParameterFlag { + kNoDuplicateParameters = 0, + kHasDuplicateParameters = 1 + }; + + enum IsFunctionFlag { + kGlobalOrEval, + kIsFunction + }; + DECLARE_NODE_TYPE(FunctionLiteral) Handle<String> name() const { return name_; } @@ -1919,6 +1924,7 @@ class FunctionLiteral: public Expression { int function_token_position() const { return function_token_position_; } int start_position() const; int end_position() const; + int SourceSize() const { return end_position() - start_position(); } bool is_expression() const { return IsExpression::decode(bitfield_); } bool is_anonymous() const { return IsAnonymous::decode(bitfield_); } bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; } @@ -1954,6 +1960,8 @@ class FunctionLiteral: public Expression { return HasDuplicateParameters::decode(bitfield_); } + bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; } + int ast_node_count() { return ast_properties_.node_count(); } AstProperties::Flags* flags() { return ast_properties_.flags(); } void set_ast_properties(AstProperties* ast_properties) { @@ -1974,7 +1982,8 @@ class FunctionLiteral: public Expression { Handle<FixedArray> this_property_assignments, int parameter_count, Type type, - bool has_duplicate_parameters) + ParameterFlag has_duplicate_parameters, + IsFunctionFlag is_function) : Expression(isolate), name_(name), scope_(scope), @@ -1992,7 +2001,8 @@ class FunctionLiteral: public Expression { IsExpression::encode(type != DECLARATION) | IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) | Pretenure::encode(false) | - HasDuplicateParameters::encode(has_duplicate_parameters); + HasDuplicateParameters::encode(has_duplicate_parameters) | + IsFunction::encode(is_function); } private: @@ -2014,7 +2024,8 @@ class FunctionLiteral: public Expression { class IsExpression: public BitField<bool, 1, 1> {}; class IsAnonymous: public BitField<bool, 2, 1> {}; class Pretenure: public BitField<bool, 3, 1> {}; - class HasDuplicateParameters: public BitField<bool, 4, 1> {}; + class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {}; + class IsFunction: public BitField<IsFunctionFlag, 5, 1> {}; }; @@ -2050,6 +2061,8 @@ class ThisFunction: public Expression { explicit ThisFunction(Isolate* isolate): Expression(isolate) {} }; +#undef DECLARE_NODE_TYPE + // ---------------------------------------------------------------------------- // Regular expressions @@ -2525,19 +2538,19 @@ class AstNodeFactory BASE_EMBEDDED { VISIT_AND_RETURN(ModuleLiteral, module) } - ModuleVariable* NewModuleVariable(Variable* var) { - ModuleVariable* module = new(zone_) ModuleVariable(var); - VISIT_AND_RETURN(ModuleLiteral, module) + ModuleVariable* NewModuleVariable(VariableProxy* proxy) { + ModuleVariable* module = new(zone_) ModuleVariable(proxy); + VISIT_AND_RETURN(ModuleVariable, module) } ModulePath* NewModulePath(Module* origin, Handle<String> name) { ModulePath* module = new(zone_) ModulePath(origin, name); - VISIT_AND_RETURN(ModuleLiteral, module) + VISIT_AND_RETURN(ModulePath, module) } ModuleUrl* NewModuleUrl(Handle<String> url) { ModuleUrl* module = new(zone_) ModuleUrl(url); - VISIT_AND_RETURN(ModuleLiteral, module) + VISIT_AND_RETURN(ModuleUrl, module) } Block* NewBlock(ZoneStringList* labels, @@ -2781,15 +2794,16 @@ class AstNodeFactory BASE_EMBEDDED { bool has_only_simple_this_property_assignments, Handle<FixedArray> this_property_assignments, int parameter_count, - bool has_duplicate_parameters, + FunctionLiteral::ParameterFlag has_duplicate_parameters, FunctionLiteral::Type type, - bool visit_with_visitor) { + FunctionLiteral::IsFunctionFlag is_function) { FunctionLiteral* lit = new(zone_) FunctionLiteral( isolate_, name, scope, body, materialized_literal_count, expected_property_count, handler_count, has_only_simple_this_property_assignments, this_property_assignments, - parameter_count, type, has_duplicate_parameters); - if (visit_with_visitor) { + parameter_count, type, has_duplicate_parameters, is_function); + // Top-level literal doesn't count for the AST's properties. + if (is_function == FunctionLiteral::kIsFunction) { visitor_.VisitFunctionLiteral(lit); } return lit; diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 31a771fbeb..0f5b9c84a9 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -172,6 +172,10 @@ class Genesis BASE_EMBEDDED { Handle<JSFunction> GetThrowTypeErrorFunction(); void CreateStrictModeFunctionMaps(Handle<JSFunction> empty); + + // Make the "arguments" and "caller" properties throw a TypeError on access. + void PoisonArgumentsAndCaller(Handle<Map> map); + // Creates the global objects using the global and the template passed in // through the API. We call this regardless of whether we are building a // context from scratch or using a deserialized one from the partial snapshot @@ -192,7 +196,7 @@ class Genesis BASE_EMBEDDED { // detached from the other objects in the snapshot. void HookUpInnerGlobal(Handle<GlobalObject> inner_global); // New context initialization. Used for creating a context from scratch. - void InitializeGlobal(Handle<GlobalObject> inner_global, + bool InitializeGlobal(Handle<GlobalObject> inner_global, Handle<JSFunction> empty_function); void InitializeExperimentalGlobal(); // Installs the contents of the native .js files on the global objects. @@ -256,14 +260,10 @@ class Genesis BASE_EMBEDDED { Handle<Map> CreateStrictModeFunctionMap( PrototypePropertyMode prototype_mode, - Handle<JSFunction> empty_function, - Handle<AccessorPair> arguments_callbacks, - Handle<AccessorPair> caller_callbacks); + Handle<JSFunction> empty_function); Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor( - PrototypePropertyMode propertyMode, - Handle<AccessorPair> arguments, - Handle<AccessorPair> caller); + PrototypePropertyMode propertyMode); static bool CompileBuiltin(Isolate* isolate, int index); static bool CompileExperimentalBuiltin(Isolate* isolate, int index); @@ -384,44 +384,40 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor( PrototypePropertyMode prototypeMode) { - Handle<DescriptorArray> descriptors = - factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE - ? 4 - : 5); - PropertyAttributes attributes = - static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY); + int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; + Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size)); + PropertyAttributes attribs = static_cast<PropertyAttributes>( + DONT_ENUM | DONT_DELETE | READ_ONLY); DescriptorArray::WhitenessWitness witness(*descriptors); { // Add length. - Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength); - CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes); + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength)); + CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs); descriptors->Set(0, &d, witness); } { // Add name. - Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName); - CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes); + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName)); + CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs); descriptors->Set(1, &d, witness); } { // Add arguments. - Handle<Foreign> foreign = - factory()->NewForeign(&Accessors::FunctionArguments); - CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes); + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments)); + CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs); descriptors->Set(2, &d, witness); } { // Add caller. - Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller); - CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes); + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller)); + CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs); descriptors->Set(3, &d, witness); } if (prototypeMode != DONT_ADD_PROTOTYPE) { // Add prototype. if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) { - attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY); + attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY); } - Handle<Foreign> foreign = - factory()->NewForeign(&Accessors::FunctionPrototype); - CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes); + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype)); + CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs); descriptors->Set(4, &d, witness); } descriptors->Sort(witness); @@ -532,47 +528,42 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) { Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor( - PrototypePropertyMode prototypeMode, - Handle<AccessorPair> arguments, - Handle<AccessorPair> caller) { - Handle<DescriptorArray> descriptors = - factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE - ? 4 - : 5); - PropertyAttributes attributes = static_cast<PropertyAttributes>( + PrototypePropertyMode prototypeMode) { + int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; + Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size)); + PropertyAttributes attribs = static_cast<PropertyAttributes>( DONT_ENUM | DONT_DELETE); DescriptorArray::WhitenessWitness witness(*descriptors); - { // length - Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength); - CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes); + { // Add length. + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength)); + CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs); descriptors->Set(0, &d, witness); } - { // name - Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName); - CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes); + { // Add name. + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName)); + CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs); descriptors->Set(1, &d, witness); } - { // arguments - CallbacksDescriptor d(*factory()->arguments_symbol(), - *arguments, - attributes); + { // Add arguments. + Handle<AccessorPair> arguments(factory()->NewAccessorPair()); + CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs); descriptors->Set(2, &d, witness); } - { // caller - CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes); + { // Add caller. + Handle<AccessorPair> caller(factory()->NewAccessorPair()); + CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs); descriptors->Set(3, &d, witness); } - // prototype if (prototypeMode != DONT_ADD_PROTOTYPE) { + // Add prototype. if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) { - attributes = static_cast<PropertyAttributes>(attributes | READ_ONLY); + attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY); } - Handle<Foreign> foreign = - factory()->NewForeign(&Accessors::FunctionPrototype); - CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes); + Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype)); + CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs); descriptors->Set(4, &d, witness); } @@ -603,14 +594,10 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() { Handle<Map> Genesis::CreateStrictModeFunctionMap( PrototypePropertyMode prototype_mode, - Handle<JSFunction> empty_function, - Handle<AccessorPair> arguments_callbacks, - Handle<AccessorPair> caller_callbacks) { + Handle<JSFunction> empty_function) { Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); Handle<DescriptorArray> descriptors = - ComputeStrictFunctionInstanceDescriptor(prototype_mode, - arguments_callbacks, - caller_callbacks); + ComputeStrictFunctionInstanceDescriptor(prototype_mode); map->set_instance_descriptors(*descriptors); map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE); map->set_prototype(*empty_function); @@ -619,23 +606,15 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap( void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) { - // Create the callbacks arrays for ThrowTypeError functions. - // The get/set callacks are filled in after the maps are created below. - Factory* factory = empty->GetIsolate()->factory(); - Handle<AccessorPair> arguments(factory->NewAccessorPair()); - Handle<AccessorPair> caller(factory->NewAccessorPair()); - // Allocate map for the strict mode function instances. Handle<Map> strict_mode_function_instance_map = - CreateStrictModeFunctionMap( - ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller); + CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty); global_context()->set_strict_mode_function_instance_map( *strict_mode_function_instance_map); // Allocate map for the prototype-less strict mode instances. Handle<Map> strict_mode_function_without_prototype_map = - CreateStrictModeFunctionMap( - DONT_ADD_PROTOTYPE, empty, arguments, caller); + CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty); global_context()->set_strict_mode_function_without_prototype_map( *strict_mode_function_without_prototype_map); @@ -643,26 +622,38 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) { // only for processing of builtins. // Later the map is replaced with writable prototype map, allocated below. Handle<Map> strict_mode_function_map = - CreateStrictModeFunctionMap( - ADD_READONLY_PROTOTYPE, empty, arguments, caller); + CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty); global_context()->set_strict_mode_function_map( *strict_mode_function_map); // The final map for the strict mode functions. Writeable prototype. // This map is installed in MakeFunctionInstancePrototypeWritable. strict_mode_function_instance_map_writable_prototype_ = - CreateStrictModeFunctionMap( - ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller); - - // Create the ThrowTypeError function instance. - Handle<JSFunction> throw_function = - GetThrowTypeErrorFunction(); + CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty); // Complete the callbacks. - arguments->set_getter(*throw_function); - arguments->set_setter(*throw_function); - caller->set_getter(*throw_function); - caller->set_setter(*throw_function); + PoisonArgumentsAndCaller(strict_mode_function_instance_map); + PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map); + PoisonArgumentsAndCaller(strict_mode_function_map); + PoisonArgumentsAndCaller( + strict_mode_function_instance_map_writable_prototype_); +} + + +static void SetAccessors(Handle<Map> map, + Handle<String> name, + Handle<JSFunction> func) { + DescriptorArray* descs = map->instance_descriptors(); + int number = descs->Search(*name); + AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number)); + accessors->set_getter(*func); + accessors->set_setter(*func); +} + + +void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) { + SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction()); + SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction()); } @@ -837,7 +828,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) { // This is only called if we are not using snapshots. The equivalent // work in the snapshot case is done in HookUpInnerGlobal. -void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, +bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Handle<JSFunction> empty_function) { // --- G l o b a l C o n t e x t --- // Use the empty function as closure (no scope info). @@ -1041,7 +1032,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON")); Handle<JSFunction> cons = factory->NewFunction(name, factory->the_hole_value()); - cons->SetInstancePrototype(global_context()->initial_object_prototype()); + { MaybeObject* result = cons->SetInstancePrototype( + global_context()->initial_object_prototype()); + if (result->IsFailure()) return false; + } cons->SetInstanceClassName(*name); Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED); ASSERT(json_object->IsJSObject()); @@ -1252,6 +1246,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, global_context()->set_random_seed(*zeroed_byte_array); memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize); } + return true; } @@ -1743,7 +1738,9 @@ bool Genesis::InstallNatives() { Handle<DescriptorArray> array_descriptors( array_function->initial_map()->instance_descriptors()); int index = array_descriptors->SearchWithCache(heap()->length_symbol()); - reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness); + MaybeObject* copy_result = + reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness); + if (copy_result->IsFailure()) return false; int enum_index = 0; { @@ -2321,7 +2318,7 @@ Genesis::Genesis(Isolate* isolate, Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(global_template, global_object, &inner_global); HookUpGlobalProxy(inner_global, global_proxy); - InitializeGlobal(inner_global, empty_function); + if (!InitializeGlobal(inner_global, empty_function)) return; InstallJSFunctionResultCaches(); InitializeNormalizedMapCaches(); if (!InstallNatives()) return; diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 1badca7bc5..7290a2cf1c 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -206,8 +206,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, } } else { // Allocate the JS Array - MaybeObject* maybe_obj = - heap->AllocateEmptyJSArray(FAST_SMI_ONLY_ELEMENTS); + MaybeObject* maybe_obj = heap->AllocateJSObject(constructor); if (!maybe_obj->To(&array)) return maybe_obj; } @@ -218,12 +217,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, if (obj->IsSmi()) { int len = Smi::cast(obj)->value(); if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) { - Object* obj; + Object* fixed_array; { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; } - MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj)); - if (maybe_obj->IsFailure()) return maybe_obj; + // We do not use SetContent to skip the unnecessary elements type check. + array->set_elements(FixedArray::cast(fixed_array)); + array->set_length(Smi::cast(obj)); return array; } } diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h index 5a901a26aa..b97191f5cc 100644 --- a/deps/v8/src/char-predicates.h +++ b/deps/v8/src/char-predicates.h @@ -57,6 +57,8 @@ struct IdentifierPart { static inline bool Is(uc32 c) { return IdentifierStart::Is(c) || unibrow::Number::Is(c) + || c == 0x200C // U+200C is Zero-Width Non-Joiner. + || c == 0x200D // U+200D is Zero-Width Joiner. || unibrow::CombiningMark::Is(c) || unibrow::ConnectorPunctuation::Is(c); } diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index dd1cc5e47a..11016c8238 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -296,12 +296,14 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { case FAST_SMI_ONLY_ELEMENTS: { KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_, - elements_kind_); + elements_kind_, + grow_mode_); } break; case FAST_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, - is_js_array_); + is_js_array_, + grow_mode_); break; case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: @@ -440,10 +442,13 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { } KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_jsarray_, - FAST_ELEMENTS); + FAST_ELEMENTS, + grow_mode_); } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) { ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); - KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_); + KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, + is_jsarray_, + grow_mode_); } else { UNREACHABLE(); } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 78ff554fdb..b67e961ac7 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -55,6 +55,7 @@ namespace internal { V(ConvertToDouble) \ V(WriteInt32ToHeapNumber) \ V(StackCheck) \ + V(Interrupt) \ V(FastNewClosure) \ V(FastNewContext) \ V(FastNewBlockContext) \ @@ -297,6 +298,18 @@ class StackCheckStub : public CodeStub { }; +class InterruptStub : public CodeStub { + public: + InterruptStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return Interrupt; } + int MinorKey() { return 0; } +}; + + class ToNumberStub: public CodeStub { public: ToNumberStub() { } @@ -632,9 +645,6 @@ class CEntryStub : public CodeStub { Label* throw_out_of_memory_exception, bool do_gc, bool always_allocate_scope); - void GenerateThrowTOS(MacroAssembler* masm); - void GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type); // Number of pointers/values returned. const int result_size_; @@ -985,20 +995,29 @@ class KeyedLoadElementStub : public CodeStub { class KeyedStoreElementStub : public CodeStub { public: KeyedStoreElementStub(bool is_js_array, - ElementsKind elements_kind) - : is_js_array_(is_js_array), - elements_kind_(elements_kind) { } + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) + : is_js_array_(is_js_array), + elements_kind_(elements_kind), + grow_mode_(grow_mode) { } Major MajorKey() { return KeyedStoreElement; } int MinorKey() { - return (is_js_array_ ? 0 : kElementsKindCount) + elements_kind_; + return ElementsKindBits::encode(elements_kind_) | + IsJSArrayBits::encode(is_js_array_) | + GrowModeBits::encode(grow_mode_); } void Generate(MacroAssembler* masm); private: + class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; + class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {}; + class IsJSArrayBits: public BitField<bool, 9, 1> {}; + bool is_js_array_; ElementsKind elements_kind_; + KeyedAccessGrowMode grow_mode_; DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); }; @@ -1076,24 +1095,28 @@ class ElementsTransitionAndStoreStub : public CodeStub { ElementsTransitionAndStoreStub(ElementsKind from, ElementsKind to, bool is_jsarray, - StrictModeFlag strict_mode) + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode) : from_(from), to_(to), is_jsarray_(is_jsarray), - strict_mode_(strict_mode) {} + strict_mode_(strict_mode), + grow_mode_(grow_mode) {} private: - class FromBits: public BitField<ElementsKind, 0, 8> {}; - class ToBits: public BitField<ElementsKind, 8, 8> {}; - class IsJSArrayBits: public BitField<bool, 16, 8> {}; - class StrictModeBits: public BitField<StrictModeFlag, 24, 8> {}; + class FromBits: public BitField<ElementsKind, 0, 8> {}; + class ToBits: public BitField<ElementsKind, 8, 8> {}; + class IsJSArrayBits: public BitField<bool, 16, 1> {}; + class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {}; + class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {}; Major MajorKey() { return ElementsTransitionAndStore; } int MinorKey() { return FromBits::encode(from_) | ToBits::encode(to_) | IsJSArrayBits::encode(is_jsarray_) | - StrictModeBits::encode(strict_mode_); + StrictModeBits::encode(strict_mode_) | + GrowModeBits::encode(grow_mode_); } void Generate(MacroAssembler* masm); @@ -1102,6 +1125,7 @@ class ElementsTransitionAndStoreStub : public CodeStub { ElementsKind to_; bool is_jsarray_; StrictModeFlag strict_mode_; + KeyedAccessGrowMode grow_mode_; DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub); }; diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js index fcf4d38d94..75fe3d541d 100644 --- a/deps/v8/src/collection.js +++ b/deps/v8/src/collection.js @@ -25,10 +25,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"use strict"; -const $Set = global.Set; -const $Map = global.Map; -const $WeakMap = global.WeakMap; +var $Set = global.Set; +var $Map = global.Map; +var $WeakMap = global.WeakMap; //------------------------------------------------------------------- diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index aea889f8be..5d7dbd1624 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -61,7 +61,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script) extension_(NULL), pre_parse_data_(NULL), osr_ast_id_(AstNode::kNoNumber) { - Initialize(NONOPT); + Initialize(BASE); } @@ -182,10 +182,8 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) { static bool MakeCrankshaftCode(CompilationInfo* info) { // Test if we can optimize this function when asked to. We can only // do this after the scopes are computed. - if (!info->AllowOptimize()) { + if (!V8::UseCrankshaft()) { info->DisableOptimization(); - } else if (info->IsOptimizable()) { - info->EnableDeoptimizationSupport(); } // In case we are not optimizing simply return the code from @@ -217,8 +215,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000; if (info->shared_info()->opt_count() > kMaxOptCount) { info->AbortOptimization(); - Handle<JSFunction> closure = info->closure(); - info->shared_info()->DisableOptimization(*closure); + info->shared_info()->DisableOptimization(); // True indicates the compilation pipeline is still going, not // necessarily that we optimized the code. return true; @@ -238,8 +235,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { (info->osr_ast_id() != AstNode::kNoNumber && scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) { info->AbortOptimization(); - Handle<JSFunction> closure = info->closure(); - info->shared_info()->DisableOptimization(*closure); + info->shared_info()->DisableOptimization(); // True indicates the compilation pipeline is still going, not // necessarily that we optimized the code. return true; @@ -317,8 +313,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { if (!builder.inline_bailout()) { // Mark the shared code as unoptimizable unless it was an inlined // function that bailed out. - Handle<JSFunction> closure = info->closure(); - info->shared_info()->DisableOptimization(*closure); + info->shared_info()->DisableOptimization(); } // True indicates the compilation pipeline is still going, not necessarily // that we optimized the code. @@ -502,13 +497,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, // for small sources, odds are that there aren't many functions // that would be compiled lazily anyway, so we skip the preparse step // in that case too. - int flags = kNoParsingFlags; - if ((natives == NATIVES_CODE) || FLAG_allow_natives_syntax) { - flags |= kAllowNativesSyntax; - } - if (natives != NATIVES_CODE && FLAG_harmony_scoping) { - flags |= EXTENDED_MODE; - } // Create a script object describing the script to be compiled. Handle<Script> script = FACTORY->NewScript(source); @@ -529,6 +517,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, info.MarkAsGlobal(); info.SetExtension(extension); info.SetPreParseData(pre_data); + if (FLAG_use_strict) info.SetLanguageMode(STRICT_MODE); result = MakeFunctionInfo(&info); if (extension == NULL && !result.is_null()) { compilation_cache->PutScript(source, result); @@ -573,6 +562,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source, info.SetCallingContext(context); result = MakeFunctionInfo(&info); if (!result.is_null()) { + // Explicitly disable optimization for eval code. We're not yet prepared + // to handle eval-code in the optimizing compiler. + result->DisableOptimization(); + // If caller is strict mode, the result must be in strict mode or // extended mode as well, but not the other way around. Consider: // eval("'use strict'; ..."); @@ -664,11 +657,13 @@ bool Compiler::CompileLazy(CompilationInfo* info) { // Check the function has compiled code. ASSERT(shared->is_compiled()); shared->set_code_age(0); - shared->set_dont_crankshaft(lit->flags()->Contains(kDontOptimize)); + shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize)); shared->set_dont_inline(lit->flags()->Contains(kDontInline)); shared->set_ast_node_count(lit->ast_node_count()); - if (info->AllowOptimize() && !shared->optimization_disabled()) { + if (V8::UseCrankshaft()&& + !function.is_null() && + !shared->optimization_disabled()) { // If we're asked to always optimize, we compile the optimized // version of the function right away - unless the debugger is // active as it makes no sense to compile optimized code then. @@ -766,7 +761,8 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info, function_info->set_uses_arguments(lit->scope()->arguments() != NULL); function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters()); function_info->set_ast_node_count(lit->ast_node_count()); - function_info->set_dont_crankshaft(lit->flags()->Contains(kDontOptimize)); + function_info->set_is_function(lit->is_function()); + function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize)); function_info->set_dont_inline(lit->flags()->Contains(kDontInline)); } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 38252871ea..44df9e090f 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -163,11 +163,6 @@ class CompilationInfo BASE_EMBEDDED { flags_ |= SupportsDeoptimization::encode(true); } - // Determine whether or not we can adaptively optimize. - bool AllowOptimize() { - return V8::UseCrankshaft() && !closure_.is_null(); - } - // Determines whether or not to insert a self-optimization header. bool ShouldSelfOptimize(); @@ -181,9 +176,8 @@ class CompilationInfo BASE_EMBEDDED { // Compilation mode. // BASE is generated by the full codegen, optionally prepared for bailouts. // OPTIMIZE is optimized code generated by the Hydrogen-based backend. - // NONOPT is generated by the full codegen or the classic backend - // and is not prepared for recompilation/bailouts. These functions - // are never recompiled. + // NONOPT is generated by the full codegen and is not prepared for + // recompilation/bailouts. These functions are never recompiled. enum Mode { BASE, OPTIMIZE, diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 1f88c946de..af5cb036c6 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -356,6 +356,10 @@ class Context: public FixedArray { Map* map = this->map(); return map == map->GetHeap()->block_context_map(); } + bool IsModuleContext() { + Map* map = this->map(); + return map == map->GetHeap()->module_context_map(); + } // Tells whether the global context is marked with out of memory. inline bool has_out_of_memory(); diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index 86b8c8106c..d136393e78 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -25,6 +25,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"use strict"; + String.prototype.startsWith = function (str) { if (str.length > this.length) { return false; @@ -76,7 +78,7 @@ function GetCompletions(global, last, full) { // Global object holding debugger related constants and state. -const Debug = {}; +var Debug = {}; // Debug events which can occour in the V8 JavaScript engine. These originate @@ -111,7 +113,7 @@ Debug.ScopeType = { Global: 0, // Current debug state. -const kNoFrame = -1; +var kNoFrame = -1; Debug.State = { currentFrame: kNoFrame, displaySourceStartLine: -1, @@ -123,8 +125,8 @@ var trace_debug_json = false; // Tracing all debug json packets? var last_cmd_line = ''; //var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined. var lol_next_dump_index = 0; -const kDefaultLolLinesToPrintAtATime = 10; -const kMaxLolLinesToPrintAtATime = 1000; +var kDefaultLolLinesToPrintAtATime = 10; +var kMaxLolLinesToPrintAtATime = 1000; var repeat_cmd_line = ''; var is_running = true; @@ -2629,7 +2631,7 @@ function NumberToJSON_(value) { // Mapping of some control characters to avoid the \uXXXX syntax for most // commonly used control cahracters. -const ctrlCharMap_ = { +var ctrlCharMap_ = { '\b': '\\b', '\t': '\\t', '\n': '\\n', @@ -2641,12 +2643,12 @@ const ctrlCharMap_ = { // Regular expression testing for ", \ and control characters (0x00 - 0x1F). -const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]'); +var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]'); // Regular expression matching ", \ and control characters (0x00 - 0x1F) // globally. -const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g'); +var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g'); /** @@ -2688,12 +2690,12 @@ function StringToJSON_(value) { * @return {string} JSON formatted Date value */ function DateToISO8601_(value) { - function f(n) { + var f = function(n) { return n < 10 ? '0' + n : n; - } - function g(n) { + }; + var g = function(n) { return n < 10 ? '00' + n : n < 100 ? '0' + n : n; - } + }; return builtins.GetUTCFullYearFrom(value) + '-' + f(builtins.GetUTCMonthFrom(value) + 1) + '-' + f(builtins.GetUTCDateFrom(value)) + 'T' + diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h index d69d6c7a52..71f56e718b 100644 --- a/deps/v8/src/data-flow.h +++ b/deps/v8/src/data-flow.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -85,18 +85,18 @@ class BitVector: public ZoneObject { friend class BitVector; }; - explicit BitVector(int length) + BitVector(int length, Zone* zone) : length_(length), data_length_(SizeFor(length)), - data_(ZONE->NewArray<uint32_t>(data_length_)) { + data_(zone->NewArray<uint32_t>(data_length_)) { ASSERT(length > 0); Clear(); } - BitVector(const BitVector& other) + BitVector(const BitVector& other, Zone* zone) : length_(other.length()), data_length_(SizeFor(length_)), - data_(ZONE->NewArray<uint32_t>(data_length_)) { + data_(zone->NewArray<uint32_t>(data_length_)) { CopyFrom(other); } diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index 999009e863..8c51a931e4 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -28,17 +28,16 @@ // This file relies on the fact that the following declarations have been made // in v8natives.js: -// const $isFinite = GlobalIsFinite; +// var $isFinite = GlobalIsFinite; // ------------------------------------------------------------------- // This file contains date support implemented in JavaScript. - // Keep reference to original values of some global properties. This // has the added benefit that the code in this file is isolated from // changes to these properties. -const $Date = global.Date; +var $Date = global.Date; // Helper function to throw error. function ThrowDateTypeError() { diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 120a297007..91c70a027b 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -26,14 +26,14 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Default number of frames to include in the response to backtrace request. -const kDefaultBacktraceLength = 10; +var kDefaultBacktraceLength = 10; -const Debug = {}; +var Debug = {}; // Regular expression to skip "crud" at the beginning of a source line which is // not really code. Currently the regular expression matches whitespace and // comments. -const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/; +var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/; // Debug events which can occour in the V8 JavaScript engine. These originate // from the API include file debug.h. diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 44189d96cd..68bc48d5cb 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -267,11 +267,7 @@ class Deoptimizer : public Malloced { int ConvertJSFrameIndexToFrameIndex(int jsframe_index); private: -#ifdef V8_TARGET_ARCH_MIPS - static const int kNumberOfEntries = 4096; -#else static const int kNumberOfEntries = 16384; -#endif Deoptimizer(Isolate* isolate, JSFunction* function, diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index e54ec62691..c15c44d2ea 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -109,30 +109,29 @@ class ElementsAccessorBase : public ElementsAccessor { uint32_t key, JSObject* obj, Object* receiver) { - return ElementsAccessorSubclass::Get( + return ElementsAccessorSubclass::GetImpl( BackingStoreClass::cast(backing_store), key, obj, receiver); } - static MaybeObject* Get(BackingStoreClass* backing_store, - uint32_t key, - JSObject* obj, - Object* receiver) { - if (key < ElementsAccessorSubclass::GetCapacity(backing_store)) { - return backing_store->get(key); - } - return backing_store->GetHeap()->the_hole_value(); + static MaybeObject* GetImpl(BackingStoreClass* backing_store, + uint32_t key, + JSObject* obj, + Object* receiver) { + return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store)) + ? backing_store->get(key) + : backing_store->GetHeap()->the_hole_value(); } virtual MaybeObject* SetLength(JSObject* obj, Object* length) { ASSERT(obj->IsJSArray()); - return ElementsAccessorSubclass::SetLength( + return ElementsAccessorSubclass::SetLengthImpl( BackingStoreClass::cast(obj->elements()), obj, length); } - static MaybeObject* SetLength(BackingStoreClass* backing_store, - JSObject* obj, - Object* length); + static MaybeObject* SetLengthImpl(BackingStoreClass* backing_store, + JSObject* obj, + Object* length); virtual MaybeObject* SetCapacityAndLength(JSArray* array, int capacity, @@ -167,7 +166,7 @@ class ElementsAccessorBase : public ElementsAccessor { } #endif BackingStoreClass* backing_store = BackingStoreClass::cast(from); - uint32_t len1 = ElementsAccessorSubclass::GetCapacity(backing_store); + uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store); // Optimize if 'other' is empty. // We cannot optimize if 'this' is empty, as other may have holes. @@ -176,14 +175,13 @@ class ElementsAccessorBase : public ElementsAccessor { // Compute how many elements are not in other. int extra = 0; for (uint32_t y = 0; y < len1; y++) { - if (ElementsAccessorSubclass::HasElementAtIndex(backing_store, - y, - holder, - receiver)) { + if (ElementsAccessorSubclass::HasElementAtIndexImpl( + backing_store, y, holder, receiver)) { uint32_t key = - ElementsAccessorSubclass::GetKeyForIndex(backing_store, y); + ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y); MaybeObject* maybe_value = - ElementsAccessorSubclass::Get(backing_store, key, holder, receiver); + ElementsAccessorSubclass::GetImpl(backing_store, key, + holder, receiver); Object* value; if (!maybe_value->ToObject(&value)) return maybe_value; ASSERT(!value->IsTheHole()); @@ -214,14 +212,13 @@ class ElementsAccessorBase : public ElementsAccessor { // Fill in the extra values. int index = 0; for (uint32_t y = 0; y < len1; y++) { - if (ElementsAccessorSubclass::HasElementAtIndex(backing_store, - y, - holder, - receiver)) { + if (ElementsAccessorSubclass::HasElementAtIndexImpl( + backing_store, y, holder, receiver)) { uint32_t key = - ElementsAccessorSubclass::GetKeyForIndex(backing_store, y); + ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y); MaybeObject* maybe_value = - ElementsAccessorSubclass::Get(backing_store, key, holder, receiver); + ElementsAccessorSubclass::GetImpl(backing_store, key, + holder, receiver); Object* value; if (!maybe_value->ToObject(&value)) return maybe_value; if (!value->IsTheHole() && !HasKey(to, value)) { @@ -235,25 +232,23 @@ class ElementsAccessorBase : public ElementsAccessor { } protected: - static uint32_t GetCapacity(BackingStoreClass* backing_store) { + static uint32_t GetCapacityImpl(BackingStoreClass* backing_store) { return backing_store->length(); } virtual uint32_t GetCapacity(FixedArrayBase* backing_store) { - return ElementsAccessorSubclass::GetCapacity( + return ElementsAccessorSubclass::GetCapacityImpl( BackingStoreClass::cast(backing_store)); } - static bool HasElementAtIndex(BackingStoreClass* backing_store, - uint32_t index, - JSObject* holder, - Object* receiver) { + static bool HasElementAtIndexImpl(BackingStoreClass* backing_store, + uint32_t index, + JSObject* holder, + Object* receiver) { uint32_t key = - ElementsAccessorSubclass::GetKeyForIndex(backing_store, index); - MaybeObject* element = ElementsAccessorSubclass::Get(backing_store, - key, - holder, - receiver); + ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index); + MaybeObject* element = + ElementsAccessorSubclass::GetImpl(backing_store, key, holder, receiver); return !element->IsTheHole(); } @@ -261,18 +256,18 @@ class ElementsAccessorBase : public ElementsAccessor { uint32_t index, JSObject* holder, Object* receiver) { - return ElementsAccessorSubclass::HasElementAtIndex( + return ElementsAccessorSubclass::HasElementAtIndexImpl( BackingStoreClass::cast(backing_store), index, holder, receiver); } - static uint32_t GetKeyForIndex(BackingStoreClass* backing_store, - uint32_t index) { + static uint32_t GetKeyForIndexImpl(BackingStoreClass* backing_store, + uint32_t index) { return index; } virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store, uint32_t index) { - return ElementsAccessorSubclass::GetKeyForIndex( + return ElementsAccessorSubclass::GetKeyForIndexImpl( BackingStoreClass::cast(backing_store), index); } @@ -446,10 +441,10 @@ class FastDoubleElementsAccessor return obj->GetHeap()->true_value(); } - static bool HasElementAtIndex(FixedDoubleArray* backing_store, - uint32_t index, - JSObject* holder, - Object* receiver) { + static bool HasElementAtIndexImpl(FixedDoubleArray* backing_store, + uint32_t index, + JSObject* holder, + Object* receiver) { return !backing_store->is_the_hole(index); } }; @@ -465,20 +460,19 @@ class ExternalElementsAccessor friend class ElementsAccessorBase<ExternalElementsAccessorSubclass, ExternalArray>; - static MaybeObject* Get(ExternalArray* backing_store, - uint32_t key, - JSObject* obj, - Object* receiver) { - if (key < ExternalElementsAccessorSubclass::GetCapacity(backing_store)) { - return backing_store->get(key); - } else { - return backing_store->GetHeap()->undefined_value(); - } + static MaybeObject* GetImpl(ExternalArray* backing_store, + uint32_t key, + JSObject* obj, + Object* receiver) { + return + key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store) + ? backing_store->get(key) + : backing_store->GetHeap()->undefined_value(); } - static MaybeObject* SetLength(ExternalArray* backing_store, - JSObject* obj, - Object* length) { + static MaybeObject* SetLengthImpl(ExternalArray* backing_store, + JSObject* obj, + Object* length) { // External arrays do not support changing their length. UNREACHABLE(); return obj; @@ -663,10 +657,10 @@ class DictionaryElementsAccessor return DeleteCommon(obj, key, mode); } - static MaybeObject* Get(SeededNumberDictionary* backing_store, - uint32_t key, - JSObject* obj, - Object* receiver) { + static MaybeObject* GetImpl(SeededNumberDictionary* backing_store, + uint32_t key, + JSObject* obj, + Object* receiver) { int entry = backing_store->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { Object* element = backing_store->ValueAt(entry); @@ -683,8 +677,8 @@ class DictionaryElementsAccessor return obj->GetHeap()->the_hole_value(); } - static uint32_t GetKeyForIndex(SeededNumberDictionary* dict, - uint32_t index) { + static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict, + uint32_t index) { Object* key = dict->KeyAt(index); return Smi::cast(key)->value(); } @@ -698,10 +692,10 @@ class NonStrictArgumentsElementsAccessor friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor, FixedArray>; - static MaybeObject* Get(FixedArray* parameter_map, - uint32_t key, - JSObject* obj, - Object* receiver) { + static MaybeObject* GetImpl(FixedArray* parameter_map, + uint32_t key, + JSObject* obj, + Object* receiver) { Object* probe = GetParameterMapArg(parameter_map, key); if (!probe->IsTheHole()) { Context* context = Context::cast(parameter_map->get(0)); @@ -718,9 +712,9 @@ class NonStrictArgumentsElementsAccessor } } - static MaybeObject* SetLength(FixedArray* parameter_map, - JSObject* obj, - Object* length) { + static MaybeObject* SetLengthImpl(FixedArray* parameter_map, + JSObject* obj, + Object* length) { // TODO(mstarzinger): This was never implemented but will be used once we // correctly implement [[DefineOwnProperty]] on arrays. UNIMPLEMENTED(); @@ -748,21 +742,21 @@ class NonStrictArgumentsElementsAccessor return obj->GetHeap()->true_value(); } - static uint32_t GetCapacity(FixedArray* parameter_map) { + static uint32_t GetCapacityImpl(FixedArray* parameter_map) { FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1)); return Max(static_cast<uint32_t>(parameter_map->length() - 2), ForArray(arguments)->GetCapacity(arguments)); } - static uint32_t GetKeyForIndex(FixedArray* dict, - uint32_t index) { + static uint32_t GetKeyForIndexImpl(FixedArray* dict, + uint32_t index) { return index; } - static bool HasElementAtIndex(FixedArray* parameter_map, - uint32_t index, - JSObject* holder, - Object* receiver) { + static bool HasElementAtIndexImpl(FixedArray* parameter_map, + uint32_t index, + JSObject* holder, + Object* receiver) { Object* probe = GetParameterMapArg(parameter_map, index); if (!probe->IsTheHole()) { return true; @@ -866,9 +860,9 @@ void ElementsAccessor::InitializeOncePerProcess() { template <typename ElementsAccessorSubclass, typename BackingStoreClass> MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>:: - SetLength(BackingStoreClass* backing_store, - JSObject* obj, - Object* length) { + SetLengthImpl(BackingStoreClass* backing_store, + JSObject* obj, + Object* length) { JSArray* array = JSArray::cast(obj); // Fast case: The new length fits into a Smi. @@ -917,7 +911,9 @@ MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>:: MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1); if (!maybe_obj->To(&new_backing_store)) return maybe_obj; new_backing_store->set(0, length); - array->SetContent(new_backing_store); + { MaybeObject* result = array->SetContent(new_backing_store); + if (result->IsFailure()) return result; + } return array; } diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 71e8ea34a1..00806a7ce2 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -882,7 +882,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt() { } isolate->counters()->stack_interrupts()->Increment(); - if (stack_guard->IsRuntimeProfilerTick()) { + // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt. + if (FLAG_count_based_interrupts || + stack_guard->IsRuntimeProfilerTick()) { isolate->counters()->runtime_profiler_ticks()->Increment(); stack_guard->Continue(RUNTIME_PROFILER_TICK); isolate->runtime_profiler()->OptimizeNow(); @@ -904,4 +906,5 @@ MaybeObject* Execution::HandleStackGuardInterrupt() { return isolate->heap()->undefined_value(); } + } } // namespace v8::internal diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 5915f487de..15ded01e7b 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -148,6 +148,13 @@ Handle<AccessorPair> Factory::NewAccessorPair() { } +Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->AllocateTypeFeedbackInfo(), + TypeFeedbackInfo); +} + + // Symbols are created in the old generation (data space). Handle<String> Factory::LookupSymbol(Vector<const char> string) { CALL_HEAP_FUNCTION(isolate(), @@ -540,11 +547,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo( context->global_context()); } result->set_literals(*literals); - } else { - result->set_function_bindings(isolate()->heap()->empty_fixed_array()); } - result->set_next_function_link(isolate()->heap()->undefined_value()); - if (V8::UseCrankshaft() && FLAG_always_opt && result->is_compiled() && @@ -865,7 +868,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors( // Copy the descriptors from the array. for (int i = 0; i < array->number_of_descriptors(); i++) { if (!array->IsNullDescriptor(i)) { - result->CopyFrom(descriptor_count++, *array, i, witness); + DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness); } } @@ -899,7 +902,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors( Handle<DescriptorArray> new_result = NewDescriptorArray(number_of_descriptors); for (int i = 0; i < number_of_descriptors; i++) { - new_result->CopyFrom(i, *result, i, witness); + DescriptorArray::CopyFrom(new_result, i, result, i, witness); } result = new_result; } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 121d34cfed..786d4a983a 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -76,6 +76,8 @@ class Factory { // Allocates a pre-tenured empty AccessorPair. Handle<AccessorPair> NewAccessorPair(); + Handle<TypeFeedbackInfo> NewTypeFeedbackInfo(); + Handle<String> LookupSymbol(Vector<const char> str); Handle<String> LookupSymbol(Handle<String> str); Handle<String> LookupAsciiSymbol(Vector<const char> str); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 59e54dd354..bec85bfa2a 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -106,10 +106,13 @@ private: // #define FLAG FLAG_FULL -// Flags for experimental language features. +// Flags for language modes and experimental language features. +DEFINE_bool(use_strict, false, "enforce strict mode") + DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") -DEFINE_bool(harmony_modules, false, "enable harmony modules") +DEFINE_bool(harmony_modules, false, + "enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false, "enable harmony proxies") DEFINE_bool(harmony_collections, false, "enable harmony collections (sets, maps, and weak maps)") @@ -118,9 +121,10 @@ DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) +DEFINE_implication(harmony_modules, harmony_scoping) // Flags for experimental implementation features. -DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values") +DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true, "Optimize object size, Array shift, DOM strings and string +") @@ -168,11 +172,28 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference") // Experimental profiler changes. DEFINE_bool(experimental_profiler, false, "enable all profiler experiments") DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability") +DEFINE_int(frame_count, 2, "number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false, "primitive functions trigger their own optimization") +DEFINE_bool(count_based_interrupts, false, + "trigger profiler ticks based on counting instead of timing") +DEFINE_bool(interrupt_at_exit, false, + "insert an interrupt check at function exit") +DEFINE_bool(weighted_back_edges, false, + "weight back edges by jump distance for interrupt triggering") +DEFINE_int(interrupt_budget, 10000, + "execution budget before interrupt is triggered") +DEFINE_int(type_info_threshold, 0, + "percentage of ICs that must have type info to allow optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) +DEFINE_implication(experimental_profiler, count_based_interrupts) +DEFINE_implication(experimental_profiler, interrupt_at_exit) +DEFINE_implication(experimental_profiler, weighted_back_edges) + +DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing") +DEFINE_implication(trace_opt_verbose, trace_opt) // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc DEFINE_bool(debug_code, false, diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 5c5ba6b256..3d10e96b53 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -291,8 +291,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { masm.positions_recorder()->StartGDBJITLineInfoRecording(); #endif - FullCodeGenerator cgen(&masm); - cgen.Generate(info); + FullCodeGenerator cgen(&masm, info); + cgen.Generate(); if (cgen.HasStackOverflow()) { ASSERT(!isolate->has_pending_exception()); return false; @@ -303,6 +303,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info); code->set_optimizable(info->IsOptimizable()); cgen.PopulateDeoptimizationData(code); + cgen.PopulateTypeFeedbackInfo(code); cgen.PopulateTypeFeedbackCells(code); code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_handler_table(*cgen.handler_table()); @@ -361,6 +362,13 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) { } +void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) { + Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo(); + info->set_ic_total_count(ic_total_count_); + code->set_type_feedback_info(*info); +} + + void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) { if (type_feedback_cells_.is_empty()) return; int length = type_feedback_cells_.length(); @@ -371,7 +379,8 @@ void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) { cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id)); cache->SetCell(i, *type_feedback_cells_[i].cell); } - code->set_type_feedback_cells(*cache); + TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells( + *cache); } @@ -404,6 +413,7 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) { if (!info_->HasDeoptimizationSupport()) return; unsigned pc_and_state = StateField::encode(state) | PcField::encode(masm_->pc_offset()); + ASSERT(Smi::IsValid(pc_and_state)); BailoutEntry entry = { id, pc_and_state }; #ifdef DEBUG if (FLAG_enable_slow_asserts) { @@ -1073,7 +1083,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { // Check stack before looping. PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); __ bind(&stack_check); - EmitStackCheck(stmt); + EmitStackCheck(stmt, &body); __ jmp(&body); PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); @@ -1102,7 +1112,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { SetStatementPosition(stmt); // Check stack before looping. - EmitStackCheck(stmt); + EmitStackCheck(stmt, &body); __ bind(&test); VisitForControl(stmt->cond(), @@ -1145,7 +1155,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) { SetStatementPosition(stmt); // Check stack before looping. - EmitStackCheck(stmt); + EmitStackCheck(stmt, &body); __ bind(&test); if (stmt->cond() != NULL) { diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index f9b7c3842a..c1dec15e81 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -77,29 +77,27 @@ class FullCodeGenerator: public AstVisitor { TOS_REG }; - explicit FullCodeGenerator(MacroAssembler* masm) + FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info) : masm_(masm), - info_(NULL), - scope_(NULL), + info_(info), + scope_(info->scope()), nesting_stack_(NULL), loop_depth_(0), global_count_(0), context_(NULL), - bailout_entries_(0), + bailout_entries_(info->HasDeoptimizationSupport() + ? info->function()->ast_node_count() : 0), stack_checks_(2), // There's always at least one. - type_feedback_cells_(0) { - } + type_feedback_cells_(info->HasDeoptimizationSupport() + ? info->function()->ast_node_count() : 0), + ic_total_count_(0) { } static bool MakeCode(CompilationInfo* info); - void Generate(CompilationInfo* info); - void PopulateDeoptimizationData(Handle<Code> code); - void PopulateTypeFeedbackCells(Handle<Code> code); - - Handle<FixedArray> handler_table() { return handler_table_; } - - class StateField : public BitField<State, 0, 8> { }; - class PcField : public BitField<unsigned, 8, 32-8> { }; + // Encode state and pc-offset as a BitField<type, start, size>. + // Only use 30 bits because we encode the result as a smi. + class StateField : public BitField<State, 0, 1> { }; + class PcField : public BitField<unsigned, 1, 30-1> { }; static const char* State2String(State state) { switch (state) { @@ -424,7 +422,10 @@ class FullCodeGenerator: public AstVisitor { // Platform-specific code for checking the stack limit at the back edge of // a loop. - void EmitStackCheck(IterationStatement* stmt); + // This is meant to be called at loop back edges, |back_edge_target| is + // the jump target of the back edge and is used to approximate the amount + // of code inside the loop. + void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target); // Record the OSR AST id corresponding to a stack check in the code. void RecordStackCheck(unsigned osr_ast_id); // Emit a table of stack check ids and pcs into the code stream. Return @@ -494,7 +495,7 @@ class FullCodeGenerator: public AstVisitor { // Assign to the given expression as if via '='. The right-hand-side value // is expected in the accumulator. - void EmitAssignment(Expression* expr, int bailout_ast_id); + void EmitAssignment(Expression* expr); // Complete a variable assignment. The right-hand-side value is expected // in the accumulator. @@ -510,6 +511,10 @@ class FullCodeGenerator: public AstVisitor { // accumulator. void EmitKeyedPropertyAssignment(Assignment* expr); + void CallIC(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + unsigned ast_id = kNoASTId); + void SetFunctionPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun); void SetStatementPosition(Statement* stmt); @@ -575,6 +580,13 @@ class FullCodeGenerator: public AstVisitor { void VisitForTypeofValue(Expression* expr); + void Generate(); + void PopulateDeoptimizationData(Handle<Code> code); + void PopulateTypeFeedbackInfo(Handle<Code> code); + void PopulateTypeFeedbackCells(Handle<Code> code); + + Handle<FixedArray> handler_table() { return handler_table_; } + struct BailoutEntry { unsigned id; unsigned pc_and_state; @@ -773,7 +785,9 @@ class FullCodeGenerator: public AstVisitor { ZoneList<BailoutEntry> bailout_entries_; ZoneList<BailoutEntry> stack_checks_; ZoneList<TypeFeedbackCellEntry> type_feedback_cells_; + int ic_total_count_; Handle<FixedArray> handler_table_; + Handle<JSGlobalPropertyCell> profiling_counter_; friend class NestedStatement; diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index 30b676c8bd..9f13780802 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -191,6 +191,11 @@ typedef byte* Address; #define V8_PTR_PREFIX "" #endif // V8_HOST_ARCH_64_BIT +#ifdef __MINGW64__ +#undef V8_PTR_PREFIX +#define V8_PTR_PREFIX "I64" +#endif // __MINGW64__ + // The following macro works on both 32 and 64-bit platforms. // Usage: instead of writing 0x1234567890123456 // write V8_2PART_UINT64_C(0x12345678,90123456); diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 943a1c0b6a..1bb258e475 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -711,26 +711,57 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, isolate); } isolate->counters()->enum_cache_misses()->Increment(); + Handle<Map> map(object->map()); int num_enum = object->NumberOfLocalProperties(DONT_ENUM); + Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum); Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum); + + Handle<FixedArray> indices; + Handle<FixedArray> sort_array2; + + if (cache_result) { + indices = isolate->factory()->NewFixedArray(num_enum); + sort_array2 = isolate->factory()->NewFixedArray(num_enum); + } + Handle<DescriptorArray> descs = Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate); + for (int i = 0; i < descs->number_of_descriptors(); i++) { if (descs->IsProperty(i) && !descs->IsDontEnum(i)) { - (*storage)->set(index, descs->GetKey(i)); + storage->set(index, descs->GetKey(i)); PropertyDetails details(descs->GetDetails(i)); - (*sort_array)->set(index, Smi::FromInt(details.index())); + sort_array->set(index, Smi::FromInt(details.index())); + if (!indices.is_null()) { + if (details.type() != FIELD) { + indices = Handle<FixedArray>(); + sort_array2 = Handle<FixedArray>(); + } else { + int field_index = Descriptor::IndexFromValue(descs->GetValue(i)); + if (field_index >= map->inobject_properties()) { + field_index = -(field_index - map->inobject_properties() + 1); + } + indices->set(index, Smi::FromInt(field_index)); + sort_array2->set(index, Smi::FromInt(details.index())); + } + } index++; } } - (*storage)->SortPairs(*sort_array, sort_array->length()); + storage->SortPairs(*sort_array, sort_array->length()); + if (!indices.is_null()) { + indices->SortPairs(*sort_array2, sort_array2->length()); + } if (cache_result) { Handle<FixedArray> bridge_storage = isolate->factory()->NewFixedArray( DescriptorArray::kEnumCacheBridgeLength); DescriptorArray* desc = object->map()->instance_descriptors(); - desc->SetEnumCache(*bridge_storage, *storage); + desc->SetEnumCache(*bridge_storage, + *storage, + indices.is_null() ? Object::cast(Smi::FromInt(0)) + : Object::cast(*indices)); } ASSERT(storage->length() == index); return storage; diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index b082886394..4c54e84f6c 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -1938,6 +1938,19 @@ MaybeObject* Heap::AllocateAccessorPair() { } +MaybeObject* Heap::AllocateTypeFeedbackInfo() { + TypeFeedbackInfo* info; + { MaybeObject* maybe_result = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE); + if (!maybe_result->To(&info)) return maybe_result; + } + info->set_ic_total_count(0); + info->set_ic_with_typeinfo_count(0); + info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()), + SKIP_WRITE_BARRIER); + return info; +} + + const Heap::StringTypeTable Heap::string_type_table[] = { #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ {type, size, k##camel_name##MapRootIndex}, @@ -2221,6 +2234,12 @@ bool Heap::CreateInitialMaps() { AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (!maybe_obj->ToObject(&obj)) return false; } + set_module_context_map(Map::cast(obj)); + + { MaybeObject* maybe_obj = + AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); + if (!maybe_obj->ToObject(&obj)) return false; + } Map* global_context_map = Map::cast(obj); global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext); set_global_context_map(global_context_map); @@ -3361,8 +3380,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, code->set_check_type(RECEIVER_MAP_CHECK); } code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER); - code->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()), - SKIP_WRITE_BARRIER); + code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER); code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER); code->set_gc_metadata(Smi::FromInt(0)); // Allow self references to created code object by patching the handle to @@ -4361,10 +4379,10 @@ MaybeObject* Heap::AllocateJSArray( Context* global_context = isolate()->context()->global_context(); JSFunction* array_function = global_context->array_function(); Map* map = array_function->initial_map(); - if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) { - map = Map::cast(global_context->object_js_array_map()); - } else if (elements_kind == FAST_DOUBLE_ELEMENTS) { + if (elements_kind == FAST_DOUBLE_ELEMENTS) { map = Map::cast(global_context->double_js_array_map()); + } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) { + map = Map::cast(global_context->object_js_array_map()); } else { ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); ASSERT(map == global_context->smi_js_array_map()); @@ -4562,7 +4580,7 @@ MaybeObject* Heap::AllocateEmptyFixedDoubleArray() { MaybeObject* Heap::AllocateUninitializedFixedDoubleArray( int length, PretenureFlag pretenure) { - if (length == 0) return empty_fixed_double_array(); + if (length == 0) return empty_fixed_array(); Object* elements_object; MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure); @@ -4579,7 +4597,7 @@ MaybeObject* Heap::AllocateUninitializedFixedDoubleArray( MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles( int length, PretenureFlag pretenure) { - if (length == 0) return empty_fixed_double_array(); + if (length == 0) return empty_fixed_array(); Object* elements_object; MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure); @@ -5062,8 +5080,37 @@ void Heap::Verify() { cell_space_->Verify(&no_dirty_regions_visitor); lo_space_->Verify(); -} + VerifyNoAccessorPairSharing(); +} + + +void Heap::VerifyNoAccessorPairSharing() { + // Verification is done in 2 phases: First we mark all AccessorPairs, checking + // that we mark only unmarked pairs, then we clear all marks, restoring the + // initial state. We use the Smi tag of the AccessorPair's getter as the + // marking bit, because we can never see a Smi as the getter. + for (int phase = 0; phase < 2; phase++) { + HeapObjectIterator iter(map_space()); + for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) { + if (obj->IsMap()) { + DescriptorArray* descs = Map::cast(obj)->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->GetType(i) == CALLBACKS && + descs->GetValue(i)->IsAccessorPair()) { + AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i)); + uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter()); + uintptr_t after = (phase == 0) ? + ((before & ~kSmiTagMask) | kSmiTag) : + ((before & ~kHeapObjectTag) | kHeapObjectTag); + CHECK(before != after); + accessors->set_getter(reinterpret_cast<Object*>(after)); + } + } + } + } + } +} #endif // DEBUG diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 83e9b61e84..bb5c37562e 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -74,7 +74,6 @@ namespace internal { V(Map, hash_table_map, HashTableMap) \ V(FixedArray, empty_fixed_array, EmptyFixedArray) \ V(ByteArray, empty_byte_array, EmptyByteArray) \ - V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \ V(String, empty_string, EmptyString) \ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Smi, stack_limit, StackLimit) \ @@ -131,6 +130,7 @@ namespace internal { V(Map, catch_context_map, CatchContextMap) \ V(Map, with_context_map, WithContextMap) \ V(Map, block_context_map, BlockContextMap) \ + V(Map, module_context_map, ModuleContextMap) \ V(Map, oddball_map, OddballMap) \ V(Map, message_object_map, JSMessageObjectMap) \ V(Map, foreign_map, ForeignMap) \ @@ -205,12 +205,10 @@ namespace internal { V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \ V(KeyedLoadElementMonomorphic_symbol, \ "KeyedLoadElementMonomorphic") \ - V(KeyedLoadElementPolymorphic_symbol, \ - "KeyedLoadElementPolymorphic") \ V(KeyedStoreElementMonomorphic_symbol, \ "KeyedStoreElementMonomorphic") \ - V(KeyedStoreElementPolymorphic_symbol, \ - "KeyedStoreElementPolymorphic") \ + V(KeyedStoreAndGrowElementMonomorphic_symbol, \ + "KeyedStoreAndGrowElementMonomorphic") \ V(stack_overflow_symbol, "kStackOverflowBoilerplate") \ V(illegal_access_symbol, "illegal access") \ V(out_of_memory_symbol, "out-of-memory") \ @@ -643,6 +641,9 @@ class Heap { // Allocates a pre-tenured empty AccessorPair. MUST_USE_RESULT MaybeObject* AllocateAccessorPair(); + // Allocates an empty TypeFeedbackInfo. + MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo(); + // Clear the Instanceof cache (used when a prototype changes). inline void ClearInstanceofCache(); @@ -1222,6 +1223,10 @@ class Heap { // Verify the heap is in its normal state before or after a GC. void Verify(); + // Verify that AccessorPairs are not shared, i.e. make sure that they have + // exactly one pointer to them. + void VerifyNoAccessorPairSharing(); + void OldPointerSpaceCheckStoreBuffer(); void MapSpaceCheckStoreBuffer(); void LargeObjectSpaceCheckStoreBuffer(); diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index cdc3e233f2..c59c9e4f94 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -786,6 +786,33 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) { } +void HCheckMapValue::PrintDataTo(StringStream* stream) { + value()->PrintNameTo(stream); + stream->Add(" "); + map()->PrintNameTo(stream); +} + + +void HForInPrepareMap::PrintDataTo(StringStream* stream) { + enumerable()->PrintNameTo(stream); +} + + +void HForInCacheArray::PrintDataTo(StringStream* stream) { + enumerable()->PrintNameTo(stream); + stream->Add(" "); + map()->PrintNameTo(stream); + stream->Add("[%d]", idx_); +} + + +void HLoadFieldByIndex::PrintDataTo(StringStream* stream) { + object()->PrintNameTo(stream); + stream->Add(" "); + index()->PrintNameTo(stream); +} + + HValue* HConstant::Canonicalize() { return HasNoUses() && !IsBlockEntry() ? NULL : this; } @@ -1519,10 +1546,15 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { bool HLoadKeyedFastElement::RequiresHoleCheck() { + if (hole_check_mode_ == OMIT_HOLE_CHECK) { + return false; + } + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { HValue* use = it.value(); if (!use->IsChange()) return true; } + return false; } @@ -1543,6 +1575,39 @@ void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { } +HValue* HLoadKeyedGeneric::Canonicalize() { + // Recognize generic keyed loads that use property name generated + // by for-in statement as a key and rewrite them into fast property load + // by index. + if (key()->IsLoadKeyedFastElement()) { + HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key()); + if (key_load->object()->IsForInCacheArray()) { + HForInCacheArray* names_cache = + HForInCacheArray::cast(key_load->object()); + + if (names_cache->enumerable() == object()) { + HForInCacheArray* index_cache = + names_cache->index_cache(); + HCheckMapValue* map_check = + new(block()->zone()) HCheckMapValue(object(), names_cache->map()); + HInstruction* index = new(block()->zone()) HLoadKeyedFastElement( + index_cache, + key_load->key(), + HLoadKeyedFastElement::OMIT_HOLE_CHECK); + HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex( + object(), index); + map_check->InsertBefore(this); + index->InsertBefore(this); + load->InsertBefore(this); + return load; + } + } + } + + return this; +} + + void HLoadKeyedSpecializedArrayElement::PrintDataTo( StringStream* stream) { external_pointer()->PrintNameTo(stream); @@ -1841,17 +1906,18 @@ HType HStringCharFromCode::CalculateInferredType() { } -HType HArrayLiteral::CalculateInferredType() { - return HType::JSArray(); +HType HFastLiteral::CalculateInferredType() { + // TODO(mstarzinger): Be smarter, could also be JSArray here. + return HType::JSObject(); } -HType HObjectLiteralFast::CalculateInferredType() { - return HType::JSObject(); +HType HArrayLiteral::CalculateInferredType() { + return HType::JSArray(); } -HType HObjectLiteralGeneric::CalculateInferredType() { +HType HObjectLiteral::CalculateInferredType() { return HType::JSObject(); } diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 39e3950251..4d7b7baa88 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -97,11 +97,13 @@ class LChunkBuilder; V(CompareConstantEqAndBranch) \ V(Constant) \ V(Context) \ + V(DeclareGlobals) \ V(DeleteProperty) \ V(Deoptimize) \ V(Div) \ V(ElementsKind) \ V(EnterInlined) \ + V(FastLiteral) \ V(FixedArrayBaseLength) \ V(ForceRepresentation) \ V(FunctionLiteral) \ @@ -139,8 +141,7 @@ class LChunkBuilder; V(LoadNamedGeneric) \ V(Mod) \ V(Mul) \ - V(ObjectLiteralFast) \ - V(ObjectLiteralGeneric) \ + V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ @@ -179,7 +180,11 @@ class LChunkBuilder; V(UnaryMathOperation) \ V(UnknownOSRValue) \ V(UseConst) \ - V(ValueOf) + V(ValueOf) \ + V(ForInPrepareMap) \ + V(ForInCacheArray) \ + V(CheckMapValue) \ + V(LoadFieldByIndex) #define GVN_FLAG_LIST(V) \ V(Calls) \ @@ -1486,6 +1491,33 @@ class HOuterContext: public HUnaryOperation { }; +class HDeclareGlobals: public HUnaryOperation { + public: + HDeclareGlobals(HValue* context, + Handle<FixedArray> pairs, + int flags) + : HUnaryOperation(context), + pairs_(pairs), + flags_(flags) { + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } + + HValue* context() { return OperandAt(0); } + Handle<FixedArray> pairs() const { return pairs_; } + int flags() const { return flags_; } + + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals) + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + private: + Handle<FixedArray> pairs_; + int flags_; +}; + + class HGlobalObject: public HUnaryOperation { public: explicit HGlobalObject(HValue* context) : HUnaryOperation(context) { @@ -1983,7 +2015,8 @@ class HLoadExternalArrayPointer: public HUnaryOperation { class HCheckMap: public HTemplateInstruction<2> { public: - HCheckMap(HValue* value, Handle<Map> map, + HCheckMap(HValue* value, + Handle<Map> map, HValue* typecheck = NULL, CompareMapMode mode = REQUIRE_EXACT_MAP) : map_(map), @@ -3786,7 +3819,12 @@ class HLoadFunctionPrototype: public HUnaryOperation { class HLoadKeyedFastElement: public HTemplateInstruction<2> { public: - HLoadKeyedFastElement(HValue* obj, HValue* key) { + enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK }; + + HLoadKeyedFastElement(HValue* obj, + HValue* key, + HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK) + : hole_check_mode_(hole_check_mode) { SetOperandAt(0, obj); SetOperandAt(1, key); set_representation(Representation::Tagged()); @@ -3811,7 +3849,14 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement) protected: - virtual bool DataEquals(HValue* other) { return true; } + virtual bool DataEquals(HValue* other) { + if (!other->IsLoadKeyedFastElement()) return false; + HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other); + return hole_check_mode_ == other_load->hole_check_mode_; + } + + private: + HoleCheckMode hole_check_mode_; }; @@ -3915,6 +3960,8 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> { return Representation::Tagged(); } + virtual HValue* Canonicalize(); + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric) }; @@ -4163,17 +4210,8 @@ class HTransitionElementsKind: public HTemplateInstruction<1> { transitioned_map_(transitioned_map) { SetOperandAt(0, object); SetFlag(kUseGVN); - SetGVNFlag(kDependsOnMaps); SetGVNFlag(kChangesElementsKind); - if (original_map->has_fast_double_elements()) { - SetGVNFlag(kChangesElementsPointer); - SetGVNFlag(kDependsOnElementsPointer); - SetGVNFlag(kDependsOnDoubleArrayElements); - } else if (transitioned_map->has_fast_double_elements()) { - SetGVNFlag(kChangesElementsPointer); - SetGVNFlag(kDependsOnElementsPointer); - SetGVNFlag(kDependsOnArrayElements); - } + SetGVNFlag(kChangesElementsPointer); set_representation(Representation::Tagged()); } @@ -4329,6 +4367,41 @@ class HMaterializedLiteral: public HTemplateInstruction<V> { }; +class HFastLiteral: public HMaterializedLiteral<1> { + public: + HFastLiteral(HValue* context, + Handle<JSObject> boilerplate, + int total_size, + int literal_index, + int depth) + : HMaterializedLiteral<1>(literal_index, depth), + boilerplate_(boilerplate), + total_size_(total_size) { + SetOperandAt(0, context); + } + + // Maximum depth and total number of elements and properties for literal + // graphs to be considered for fast deep-copying. + static const int kMaxLiteralDepth = 3; + static const int kMaxLiteralProperties = 8; + + HValue* context() { return OperandAt(0); } + Handle<JSObject> boilerplate() const { return boilerplate_; } + int total_size() const { return total_size_; } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + virtual HType CalculateInferredType(); + + DECLARE_CONCRETE_INSTRUCTION(FastLiteral) + + private: + Handle<JSObject> boilerplate_; + int total_size_; +}; + + class HArrayLiteral: public HMaterializedLiteral<1> { public: HArrayLiteral(HValue* context, @@ -4367,49 +4440,14 @@ class HArrayLiteral: public HMaterializedLiteral<1> { }; -class HObjectLiteralFast: public HMaterializedLiteral<1> { - public: - HObjectLiteralFast(HValue* context, - Handle<JSObject> boilerplate, - int total_size, - int literal_index, - int depth) - : HMaterializedLiteral<1>(literal_index, depth), - boilerplate_(boilerplate), - total_size_(total_size) { - SetOperandAt(0, context); - } - - // Maximum depth and total number of properties for object literal - // graphs to be considered for fast deep-copying. - static const int kMaxObjectLiteralDepth = 3; - static const int kMaxObjectLiteralProperties = 8; - - HValue* context() { return OperandAt(0); } - Handle<JSObject> boilerplate() const { return boilerplate_; } - int total_size() const { return total_size_; } - - virtual Representation RequiredInputRepresentation(int index) { - return Representation::Tagged(); - } - virtual HType CalculateInferredType(); - - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast) - - private: - Handle<JSObject> boilerplate_; - int total_size_; -}; - - -class HObjectLiteralGeneric: public HMaterializedLiteral<1> { +class HObjectLiteral: public HMaterializedLiteral<1> { public: - HObjectLiteralGeneric(HValue* context, - Handle<FixedArray> constant_properties, - bool fast_elements, - int literal_index, - int depth, - bool has_function) + HObjectLiteral(HValue* context, + Handle<FixedArray> constant_properties, + bool fast_elements, + int literal_index, + int depth, + bool has_function) : HMaterializedLiteral<1>(literal_index, depth), constant_properties_(constant_properties), fast_elements_(fast_elements), @@ -4429,7 +4467,7 @@ class HObjectLiteralGeneric: public HMaterializedLiteral<1> { } virtual HType CalculateInferredType(); - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric) + DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral) private: Handle<FixedArray> constant_properties_; @@ -4524,7 +4562,7 @@ class HToFastProperties: public HUnaryOperation { // This instruction is not marked as having side effects, but // changes the map of the input operand. Use it only when creating // object literals. - ASSERT(value->IsObjectLiteralGeneric() || value->IsObjectLiteralFast()); + ASSERT(value->IsObjectLiteral() || value->IsFastLiteral()); set_representation(Representation::Tagged()); } @@ -4598,6 +4636,134 @@ class HIn: public HTemplateInstruction<3> { DECLARE_CONCRETE_INSTRUCTION(In) }; + +class HCheckMapValue: public HTemplateInstruction<2> { + public: + HCheckMapValue(HValue* value, + HValue* map) { + SetOperandAt(0, value); + SetOperandAt(1, map); + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetGVNFlag(kDependsOnMaps); + SetGVNFlag(kDependsOnElementsKind); + } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + virtual void PrintDataTo(StringStream* stream); + + virtual HType CalculateInferredType() { + return HType::Tagged(); + } + + HValue* value() { return OperandAt(0); } + HValue* map() { return OperandAt(1); } + + DECLARE_CONCRETE_INSTRUCTION(CheckMapValue) + + protected: + virtual bool DataEquals(HValue* other) { + return true; + } +}; + + +class HForInPrepareMap : public HTemplateInstruction<2> { + public: + HForInPrepareMap(HValue* context, + HValue* object) { + SetOperandAt(0, context); + SetOperandAt(1, object); + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + HValue* context() { return OperandAt(0); } + HValue* enumerable() { return OperandAt(1); } + + virtual void PrintDataTo(StringStream* stream); + + virtual HType CalculateInferredType() { + return HType::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap); +}; + + +class HForInCacheArray : public HTemplateInstruction<2> { + public: + HForInCacheArray(HValue* enumerable, + HValue* keys, + int idx) : idx_(idx) { + SetOperandAt(0, enumerable); + SetOperandAt(1, keys); + set_representation(Representation::Tagged()); + } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + HValue* enumerable() { return OperandAt(0); } + HValue* map() { return OperandAt(1); } + int idx() { return idx_; } + + HForInCacheArray* index_cache() { + return index_cache_; + } + + void set_index_cache(HForInCacheArray* index_cache) { + index_cache_ = index_cache; + } + + virtual void PrintDataTo(StringStream* stream); + + virtual HType CalculateInferredType() { + return HType::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray); + + private: + int idx_; + HForInCacheArray* index_cache_; +}; + + +class HLoadFieldByIndex : public HTemplateInstruction<2> { + public: + HLoadFieldByIndex(HValue* object, + HValue* index) { + SetOperandAt(0, object); + SetOperandAt(1, index); + set_representation(Representation::Tagged()); + } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + HValue* object() { return OperandAt(0); } + HValue* index() { return OperandAt(1); } + + virtual void PrintDataTo(StringStream* stream); + + virtual HType CalculateInferredType() { + return HType::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex); +}; + + #undef DECLARE_INSTRUCTION #undef DECLARE_CONCRETE_INSTRUCTION diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index e2505876d8..9918e85180 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -446,7 +446,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED { HBasicBlock* dont_visit) : visited_count_(0), stack_(16), - reachable_(block_count), + reachable_(block_count, ZONE), dont_visit_(dont_visit) { PushBlock(entry_block); Analyze(); @@ -744,7 +744,7 @@ void HGraph::Canonicalize() { void HGraph::OrderBlocks() { HPhase phase("Block ordering"); - BitVector visited(blocks_.length()); + BitVector visited(blocks_.length(), zone()); ZoneList<HBasicBlock*> reverse_result(8); HBasicBlock* start = blocks_[0]; @@ -955,7 +955,7 @@ void HGraph::CollectPhis() { void HGraph::InferTypes(ZoneList<HValue*>* worklist) { - BitVector in_worklist(GetMaximumValueID()); + BitVector in_worklist(GetMaximumValueID(), zone()); for (int i = 0; i < worklist->length(); ++i) { ASSERT(!in_worklist.Contains(worklist->at(i)->id())); in_worklist.Add(worklist->at(i)->id()); @@ -1431,7 +1431,8 @@ class HGlobalValueNumberer BASE_EMBEDDED { void ProcessLoopBlock(HBasicBlock* block, HBasicBlock* before_loop, GVNFlagSet loop_kills, - GVNFlagSet* accumulated_first_time_depends); + GVNFlagSet* accumulated_first_time_depends, + GVNFlagSet* accumulated_first_time_changes); bool AllowCodeMotion(); bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header); @@ -1512,10 +1513,12 @@ void HGlobalValueNumberer::LoopInvariantCodeMotion() { side_effects.ToIntegral()); GVNFlagSet accumulated_first_time_depends; + GVNFlagSet accumulated_first_time_changes; HBasicBlock* last = block->loop_information()->GetLastBackEdge(); for (int j = block->block_id(); j <= last->block_id(); ++j) { ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects, - &accumulated_first_time_depends); + &accumulated_first_time_depends, + &accumulated_first_time_changes); } } } @@ -1526,7 +1529,8 @@ void HGlobalValueNumberer::ProcessLoopBlock( HBasicBlock* block, HBasicBlock* loop_header, GVNFlagSet loop_kills, - GVNFlagSet* accumulated_first_time_depends) { + GVNFlagSet* first_time_depends, + GVNFlagSet* first_time_changes) { HBasicBlock* pre_header = loop_header->predecessors()->at(0); GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills); TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n", @@ -1544,28 +1548,47 @@ void HGlobalValueNumberer::ProcessLoopBlock( instr->gvn_flags().ToIntegral(), depends_flags.ToIntegral()); bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags); - if (!can_hoist && instr->IsTransitionElementsKind()) { - // It's only possible to hoist one time side effects if there are no - // dependencies on their changes from the loop header to the current - // instruction. - GVNFlagSet converted_changes = - HValue::ConvertChangesToDependsFlags(instr->ChangesFlags()); - TraceGVN("Checking dependencies on one-time instruction %d (%s) " - "converted changes 0x%X, accumulated depends 0x%X\n", + if (instr->IsTransitionElementsKind()) { + // It's possible to hoist transitions out of a loop as long as the + // hoisting wouldn't move the transition past a DependsOn of one of it's + // changes or any instructions that might change an objects map or + // elements contents. + GVNFlagSet changes = instr->ChangesFlags(); + GVNFlagSet hoist_depends_blockers = + HValue::ConvertChangesToDependsFlags(changes); + // In addition to not hoisting transitions above other instructions that + // change dependencies that the transition changes, it must not be + // hoisted above map changes and stores to an elements backing store + // that the transition might change. + GVNFlagSet hoist_change_blockers = changes; + hoist_change_blockers.Add(kChangesMaps); + HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr); + if (trans->original_map()->has_fast_double_elements()) { + hoist_change_blockers.Add(kChangesDoubleArrayElements); + } + if (trans->transitioned_map()->has_fast_double_elements()) { + hoist_change_blockers.Add(kChangesArrayElements); + } + TraceGVN("Checking dependencies on HTransitionElementsKind %d (%s) " + "hoist depends blockers 0x%X, hoist change blockers 0x%X, " + "accumulated depends 0x%X, accumulated changes 0x%X\n", instr->id(), instr->Mnemonic(), - converted_changes.ToIntegral(), - accumulated_first_time_depends->ToIntegral()); - // It's possible to hoist one-time side effects from the current loop - // loop only if they dominate all of the successor blocks in the same - // loop and there are not any instructions that have Changes/DependsOn - // that intervene between it and the beginning of the loop header. + hoist_depends_blockers.ToIntegral(), + hoist_change_blockers.ToIntegral(), + first_time_depends->ToIntegral(), + first_time_changes->ToIntegral()); + // It's possible to hoist transition from the current loop loop only if + // they dominate all of the successor blocks in the same loop and there + // are not any instructions that have Changes/DependsOn that intervene + // between it and the beginning of the loop header. bool in_nested_loop = block != loop_header && ((block->parent_loop_header() != loop_header) || block->IsLoopHeader()); can_hoist = !in_nested_loop && block->IsLoopSuccessorDominator() && - !accumulated_first_time_depends->ContainsAnyOf(converted_changes); + !first_time_depends->ContainsAnyOf(hoist_depends_blockers) && + !first_time_changes->ContainsAnyOf(hoist_change_blockers); } if (can_hoist) { @@ -1589,10 +1612,8 @@ void HGlobalValueNumberer::ProcessLoopBlock( if (!hoisted) { // If an instruction is not hoisted, we have to account for its side // effects when hoisting later HTransitionElementsKind instructions. - accumulated_first_time_depends->Add(instr->DependsOnFlags()); - GVNFlagSet converted_changes = - HValue::ConvertChangesToDependsFlags(instr->SideEffectFlags()); - accumulated_first_time_depends->Add(converted_changes); + first_time_depends->Add(instr->DependsOnFlags()); + first_time_changes->Add(instr->ChangesFlags()); } instr = next; } @@ -1698,7 +1719,9 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) { class HInferRepresentation BASE_EMBEDDED { public: explicit HInferRepresentation(HGraph* graph) - : graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {} + : graph_(graph), + worklist_(8), + in_worklist_(graph->GetMaximumValueID(), graph->zone()) { } void Analyze(); @@ -1815,7 +1838,7 @@ void HInferRepresentation::Analyze() { ZoneList<BitVector*> connected_phis(phi_count); for (int i = 0; i < phi_count; ++i) { phi_list->at(i)->InitRealUses(i); - BitVector* connected_set = new(zone()) BitVector(phi_count); + BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone()); connected_set->Add(i); connected_phis.Add(connected_set); } @@ -2105,7 +2128,7 @@ void HGraph::MarkDeoptimizeOnUndefined() { void HGraph::ComputeMinusZeroChecks() { - BitVector visited(GetMaximumValueID()); + BitVector visited(GetMaximumValueID(), zone()); for (int i = 0; i < blocks_.length(); ++i) { for (HInstruction* current = blocks_[i]->first(); current != NULL; @@ -2443,7 +2466,7 @@ HGraph* HGraphBuilder::CreateGraph() { // Handle implicit declaration of the function name in named function // expressions before other declarations. if (scope->is_function_scope() && scope->function() != NULL) { - HandleVariableDeclaration(scope->function(), CONST, NULL); + HandleVariableDeclaration(scope->function(), CONST, NULL, NULL); } VisitDeclarations(scope->declarations()); AddSimulate(AstNode::kDeclarationsId); @@ -2721,12 +2744,20 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) { HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get( BreakableStatement* stmt, - BreakType type) { + BreakType type, + int* drop_extra) { + *drop_extra = 0; BreakAndContinueScope* current = this; while (current != NULL && current->info()->target() != stmt) { + *drop_extra += current->info()->drop_extra(); current = current->next(); } ASSERT(current != NULL); // Always found (unless stack is malformed). + + if (type == BREAK) { + *drop_extra += current->info()->drop_extra(); + } + HBasicBlock* block = NULL; switch (type) { case BREAK: @@ -2754,7 +2785,11 @@ void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); - HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE); + int drop_extra = 0; + HBasicBlock* continue_block = break_scope()->Get(stmt->target(), + CONTINUE, + &drop_extra); + Drop(drop_extra); current_block()->Goto(continue_block); set_current_block(NULL); } @@ -2764,7 +2799,11 @@ void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); - HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK); + int drop_extra = 0; + HBasicBlock* break_block = break_scope()->Get(stmt->target(), + BREAK, + &drop_extra); + Drop(drop_extra); current_block()->Goto(break_block); set_current_block(NULL); } @@ -3019,15 +3058,24 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) { set_current_block(osr_entry); int osr_entry_id = statement->OsrEntryId(); - // We want the correct environment at the OsrEntry instruction. Build - // it explicitly. The expression stack should be empty. - ASSERT(environment()->ExpressionStackIsEmpty()); - for (int i = 0; i < environment()->length(); ++i) { + int first_expression_index = environment()->first_expression_index(); + int length = environment()->length(); + for (int i = 0; i < first_expression_index; ++i) { HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue; AddInstruction(osr_value); environment()->Bind(i, osr_value); } + if (first_expression_index != length) { + environment()->Drop(length - first_expression_index); + for (int i = first_expression_index; i < length; ++i) { + HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue; + AddInstruction(osr_value); + environment()->Push(osr_value); + } + } + + AddSimulate(osr_entry_id); AddInstruction(new(zone()) HOsrEntry(osr_entry_id)); HContext* context = new(zone()) HContext; @@ -3125,7 +3173,6 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { BreakAndContinueInfo break_info(stmt); if (current_block() != NULL) { - BreakAndContinueScope push(&break_info, this); CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); } HBasicBlock* body_exit = @@ -3170,7 +3217,6 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) { BreakAndContinueInfo break_info(stmt); if (current_block() != NULL) { - BreakAndContinueScope push(&break_info, this); CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); } HBasicBlock* body_exit = @@ -3195,7 +3241,110 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); - return Bailout("ForInStatement"); + + if (!stmt->each()->IsVariableProxy() || + !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) { + return Bailout("ForInStatement with non-local each variable"); + } + + Variable* each_var = stmt->each()->AsVariableProxy()->var(); + + CHECK_ALIVE(VisitForValue(stmt->enumerable())); + HValue* enumerable = Top(); // Leave enumerable at the top. + + HValue* context = environment()->LookupContext(); + + HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap( + context, enumerable)); + AddSimulate(stmt->PrepareId()); + + HInstruction* array = AddInstruction( + new(zone()) HForInCacheArray( + enumerable, + map, + DescriptorArray::kEnumCacheBridgeCacheIndex)); + + HInstruction* array_length = AddInstruction( + new(zone()) HFixedArrayBaseLength(array)); + + HInstruction* start_index = AddInstruction(new(zone()) HConstant( + Handle<Object>(Smi::FromInt(0)), Representation::Integer32())); + + Push(map); + Push(array); + Push(array_length); + Push(start_index); + + HInstruction* index_cache = AddInstruction( + new(zone()) HForInCacheArray( + enumerable, + map, + DescriptorArray::kEnumCacheBridgeIndicesCacheIndex)); + HForInCacheArray::cast(array)->set_index_cache( + HForInCacheArray::cast(index_cache)); + + PreProcessOsrEntry(stmt); + HBasicBlock* loop_entry = CreateLoopHeaderBlock(); + current_block()->Goto(loop_entry); + set_current_block(loop_entry); + + HValue* index = environment()->ExpressionStackAt(0); + HValue* limit = environment()->ExpressionStackAt(1); + + // Check that we still have more keys. + HCompareIDAndBranch* compare_index = + new(zone()) HCompareIDAndBranch(index, limit, Token::LT); + compare_index->SetInputRepresentation(Representation::Integer32()); + + HBasicBlock* loop_body = graph()->CreateBasicBlock(); + HBasicBlock* loop_successor = graph()->CreateBasicBlock(); + + compare_index->SetSuccessorAt(0, loop_body); + compare_index->SetSuccessorAt(1, loop_successor); + current_block()->Finish(compare_index); + + set_current_block(loop_successor); + Drop(5); + + set_current_block(loop_body); + + HValue* key = AddInstruction( + new(zone()) HLoadKeyedFastElement( + environment()->ExpressionStackAt(2), // Enum cache. + environment()->ExpressionStackAt(0), // Iteration index. + HLoadKeyedFastElement::OMIT_HOLE_CHECK)); + + // Check if the expected map still matches that of the enumerable. + // If not just deoptimize. + AddInstruction(new(zone()) HCheckMapValue( + environment()->ExpressionStackAt(4), + environment()->ExpressionStackAt(3))); + + Bind(each_var, key); + + BreakAndContinueInfo break_info(stmt, 5); + CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); + + HBasicBlock* body_exit = + JoinContinue(stmt, current_block(), break_info.continue_block()); + + if (body_exit != NULL) { + set_current_block(body_exit); + + HValue* current_index = Pop(); + PushAndAdd( + new(zone()) HAdd(context, current_index, graph()->GetConstant1())); + + body_exit = current_block(); + } + + HBasicBlock* loop_exit = CreateLoop(stmt, + loop_entry, + body_exit, + loop_successor, + break_info.break_block()); + + set_current_block(loop_exit); } @@ -3437,19 +3586,35 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { } -// Determines whether the given object literal boilerplate satisfies all -// limits to be considered for fast deep-copying and computes the total +// Determines whether the given array or object literal boilerplate satisfies +// all limits to be considered for fast deep-copying and computes the total // size of all objects that are part of the graph. -static bool IsFastObjectLiteral(Handle<JSObject> boilerplate, - int max_depth, - int* max_properties, - int* total_size) { - if (max_depth <= 0) return false; +static bool IsFastLiteral(Handle<JSObject> boilerplate, + int max_depth, + int* max_properties, + int* total_size) { + ASSERT(max_depth >= 0 && *max_properties >= 0); + if (max_depth == 0) return false; Handle<FixedArrayBase> elements(boilerplate->elements()); if (elements->length() > 0 && - elements->map() != HEAP->fixed_cow_array_map()) { - return false; + elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) { + if (!boilerplate->HasFastElements()) return false; + int length = elements->length(); + for (int i = 0; i < length; i++) { + if ((*max_properties)-- == 0) return false; + Handle<Object> value = JSObject::GetElement(boilerplate, i); + if (value->IsJSObject()) { + Handle<JSObject> value_object = Handle<JSObject>::cast(value); + if (!IsFastLiteral(value_object, + max_depth - 1, + max_properties, + total_size)) { + return false; + } + } + } + *total_size += FixedArray::SizeFor(length); } Handle<FixedArray> properties(boilerplate->properties()); @@ -3458,14 +3623,14 @@ static bool IsFastObjectLiteral(Handle<JSObject> boilerplate, } else { int nof = boilerplate->map()->inobject_properties(); for (int i = 0; i < nof; i++) { - if ((*max_properties)-- <= 0) return false; + if ((*max_properties)-- == 0) return false; Handle<Object> value(boilerplate->InObjectPropertyAt(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); - if (!IsFastObjectLiteral(value_object, - max_depth - 1, - max_properties, - total_size)) { + if (!IsFastLiteral(value_object, + max_depth - 1, + max_properties, + total_size)) { return false; } } @@ -3487,26 +3652,26 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { // Check whether to use fast or slow deep-copying for boilerplate. int total_size = 0; - int max_properties = HObjectLiteralFast::kMaxObjectLiteralProperties; + int max_properties = HFastLiteral::kMaxLiteralProperties; Handle<Object> boilerplate(closure->literals()->get(expr->literal_index())); if (boilerplate->IsJSObject() && - IsFastObjectLiteral(Handle<JSObject>::cast(boilerplate), - HObjectLiteralFast::kMaxObjectLiteralDepth, - &max_properties, - &total_size)) { + IsFastLiteral(Handle<JSObject>::cast(boilerplate), + HFastLiteral::kMaxLiteralDepth, + &max_properties, + &total_size)) { Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate); - literal = new(zone()) HObjectLiteralFast(context, - boilerplate_object, - total_size, - expr->literal_index(), - expr->depth()); + literal = new(zone()) HFastLiteral(context, + boilerplate_object, + total_size, + expr->literal_index(), + expr->depth()); } else { - literal = new(zone()) HObjectLiteralGeneric(context, - expr->constant_properties(), - expr->fast_elements(), - expr->literal_index(), - expr->depth(), - expr->has_function()); + literal = new(zone()) HObjectLiteral(context, + expr->constant_properties(), + expr->fast_elements(), + expr->literal_index(), + expr->depth(), + expr->has_function()); } // The object is expected in the bailout environment during computation @@ -3577,6 +3742,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { ZoneList<Expression*>* subexprs = expr->values(); int length = subexprs->length(); HValue* context = environment()->LookupContext(); + HInstruction* literal; Handle<FixedArray> literals(environment()->closure()->literals()); Handle<Object> raw_boilerplate(literals->get(expr->literal_index())); @@ -3598,12 +3764,25 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { ElementsKind boilerplate_elements_kind = Handle<JSObject>::cast(boilerplate)->GetElementsKind(); - HArrayLiteral* literal = new(zone()) HArrayLiteral( - context, - boilerplate, - length, - expr->literal_index(), - expr->depth()); + // Check whether to use fast or slow deep-copying for boilerplate. + int total_size = 0; + int max_properties = HFastLiteral::kMaxLiteralProperties; + if (IsFastLiteral(boilerplate, + HFastLiteral::kMaxLiteralDepth, + &max_properties, + &total_size)) { + literal = new(zone()) HFastLiteral(context, + boilerplate, + total_size, + expr->literal_index(), + expr->depth()); + } else { + literal = new(zone()) HArrayLiteral(context, + boilerplate, + length, + expr->literal_index(), + expr->depth()); + } // The array is expected in the bailout environment during computation // of the property values and is the value of the entire expression. @@ -4454,7 +4633,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, Handle<Map> map = maps->at(i); ASSERT(map->IsMap()); if (!transition_target.at(i).is_null()) { - object = AddInstruction(new(zone()) HTransitionElementsKind( + AddInstruction(new(zone()) HTransitionElementsKind( object, map, transition_target.at(i))); } else { type_todo[map->elements_kind()] = true; @@ -4917,7 +5096,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { TraceInline(target, caller, "target not inlineable"); return false; } - if (target_shared->dont_inline() || target_shared->dont_crankshaft()) { + if (target_shared->dont_inline() || target_shared->dont_optimize()) { TraceInline(target, caller, "target contains unsupported syntax [early]"); return false; } @@ -4979,7 +5158,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { if (target_info.isolate()->has_pending_exception()) { // Parse or scope error, never optimize this function. SetStackOverflow(); - target_shared->DisableOptimization(*target); + target_shared->DisableOptimization(); } TraceInline(target, caller, "parse failure"); return false; @@ -5092,7 +5271,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) { // Bail out if the inline function did, as we cannot residualize a call // instead. TraceInline(target, caller, "inline graph construction failed"); - target_shared->DisableOptimization(*target); + target_shared->DisableOptimization(); inline_bailout_ = true; delete target_state; return true; @@ -5173,7 +5352,6 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) { BuiltinFunctionId id = expr->target()->shared()->builtin_function_id(); switch (id) { case kMathRound: - case kMathFloor: case kMathAbs: case kMathSqrt: case kMathLog: @@ -5311,32 +5489,43 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, AddCheckConstantFunction(expr, receiver, receiver_map, true); HValue* right = Pop(); HValue* left = Pop(); - // Do not inline if the return representation is not certain. - if (!left->representation().Equals(right->representation())) { - Push(left); - Push(right); - return false; + Pop(); // Pop receiver. + + HValue* left_operand = left; + HValue* right_operand = right; + + // If we do not have two integers, we convert to double for comparison. + if (!left->representation().IsInteger32() || + !right->representation().IsInteger32()) { + if (!left->representation().IsDouble()) { + HChange* left_convert = new(zone()) HChange( + left, + Representation::Double(), + false, // Do not truncate when converting to double. + true); // Deoptimize for undefined. + left_convert->SetFlag(HValue::kBailoutOnMinusZero); + left_operand = AddInstruction(left_convert); + } + if (!right->representation().IsDouble()) { + HChange* right_convert = new(zone()) HChange( + right, + Representation::Double(), + false, // Do not truncate when converting to double. + true); // Deoptimize for undefined. + right_convert->SetFlag(HValue::kBailoutOnMinusZero); + right_operand = AddInstruction(right_convert); + } } - Pop(); // Pop receiver. + ASSERT(left_operand->representation().Equals( + right_operand->representation())); + ASSERT(!left_operand->representation().IsTagged()); + Token::Value op = (id == kMathMin) ? Token::LT : Token::GT; - HCompareIDAndBranch* compare = NULL; - - if (left->representation().IsTagged()) { - HChange* left_cvt = - new(zone()) HChange(left, Representation::Double(), false, true); - left_cvt->SetFlag(HValue::kBailoutOnMinusZero); - AddInstruction(left_cvt); - HChange* right_cvt = - new(zone()) HChange(right, Representation::Double(), false, true); - right_cvt->SetFlag(HValue::kBailoutOnMinusZero); - AddInstruction(right_cvt); - compare = new(zone()) HCompareIDAndBranch(left_cvt, right_cvt, op); - compare->SetInputRepresentation(Representation::Double()); - } else { - compare = new(zone()) HCompareIDAndBranch(left, right, op); - compare->SetInputRepresentation(left->representation()); - } + + HCompareIDAndBranch* compare = + new(zone()) HCompareIDAndBranch(left_operand, right_operand, op); + compare->SetInputRepresentation(left_operand->representation()); HBasicBlock* return_left = graph()->CreateBasicBlock(); HBasicBlock* return_right = graph()->CreateBasicBlock(); @@ -6541,26 +6730,81 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) { void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) { - HandleVariableDeclaration(decl->proxy(), decl->mode(), decl->fun()); + UNREACHABLE(); +} + +void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) { + int length = declarations->length(); + int global_count = 0; + for (int i = 0; i < declarations->length(); i++) { + VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration(); + if (decl == NULL) continue; + HandleVariableDeclaration(decl->proxy(), + decl->mode(), + decl->fun(), + &global_count); + } + + // Batch declare global functions and variables. + if (global_count > 0) { + Handle<FixedArray> array = + isolate()->factory()->NewFixedArray(2 * global_count, TENURED); + for (int j = 0, i = 0; i < length; i++) { + VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration(); + if (decl == NULL) continue; + Variable* var = decl->proxy()->var(); + + if (var->IsUnallocated()) { + array->set(j++, *(var->name())); + if (decl->fun() == NULL) { + if (var->binding_needs_init()) { + // In case this binding needs initialization use the hole. + array->set_the_hole(j++); + } else { + array->set_undefined(j++); + } + } else { + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(decl->fun(), info()->script()); + // Check for stack-overflow exception. + if (function.is_null()) { + SetStackOverflow(); + return; + } + array->set(j++, *function); + } + } + } + int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) | + DeclareGlobalsNativeFlag::encode(info()->is_native()) | + DeclareGlobalsLanguageMode::encode(info()->language_mode()); + HInstruction* result = + new(zone()) HDeclareGlobals(environment()->LookupContext(), + array, + flags); + AddInstruction(result); + } } void HGraphBuilder::HandleVariableDeclaration(VariableProxy* proxy, VariableMode mode, - FunctionLiteral* function) { + FunctionLiteral* function, + int* global_count) { Variable* var = proxy->var(); bool binding_needs_init = (mode == CONST || mode == CONST_HARMONY || mode == LET); switch (var->location()) { case Variable::UNALLOCATED: - return Bailout("unsupported global declaration"); + ++(*global_count); + return; case Variable::PARAMETER: case Variable::LOCAL: case Variable::CONTEXT: if (binding_needs_init || function != NULL) { HValue* value = NULL; if (function != NULL) { - VisitForValue(function); + CHECK_ALIVE(VisitForValue(function)); value = Pop(); } else { value = graph()->GetConstantHole(); @@ -7211,9 +7455,8 @@ bool HEnvironment::HasExpressionAt(int index) const { bool HEnvironment::ExpressionStackIsEmpty() const { - int first_expression = parameter_count() + specials_count() + local_count(); - ASSERT(length() >= first_expression); - return length() == first_expression; + ASSERT(length() >= first_expression_index()); + return length() == first_expression_index(); } @@ -7501,7 +7744,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) { PrintIndent(); trace_.Add("%d %s", range->id(), type); if (range->HasRegisterAssigned()) { - LOperand* op = range->CreateAssignedOperand(); + LOperand* op = range->CreateAssignedOperand(ZONE); int assigned_reg = op->index(); if (op->IsDoubleRegister()) { trace_.Add(" \"%s\"", diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index bbd4841f7a..65aa346cbe 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -399,6 +399,10 @@ class HEnvironment: public ZoneObject { return i >= parameter_count() && i < parameter_count() + specials_count(); } + int first_expression_index() const { + return parameter_count() + specials_count() + local_count(); + } + void Bind(Variable* variable, HValue* value) { Bind(IndexFor(variable), value); } @@ -705,8 +709,12 @@ class HGraphBuilder: public AstVisitor { // can have a separate lifetime. class BreakAndContinueInfo BASE_EMBEDDED { public: - explicit BreakAndContinueInfo(BreakableStatement* target) - : target_(target), break_block_(NULL), continue_block_(NULL) { + explicit BreakAndContinueInfo(BreakableStatement* target, + int drop_extra = 0) + : target_(target), + break_block_(NULL), + continue_block_(NULL), + drop_extra_(drop_extra) { } BreakableStatement* target() { return target_; } @@ -714,11 +722,13 @@ class HGraphBuilder: public AstVisitor { void set_break_block(HBasicBlock* block) { break_block_ = block; } HBasicBlock* continue_block() { return continue_block_; } void set_continue_block(HBasicBlock* block) { continue_block_ = block; } + int drop_extra() { return drop_extra_; } private: BreakableStatement* target_; HBasicBlock* break_block_; HBasicBlock* continue_block_; + int drop_extra_; }; // A helper class to maintain a stack of current BreakAndContinueInfo @@ -737,7 +747,7 @@ class HGraphBuilder: public AstVisitor { BreakAndContinueScope* next() { return next_; } // Search the break stack for a break or continue target. - HBasicBlock* Get(BreakableStatement* stmt, BreakType type); + HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra); private: BreakAndContinueInfo* info_; @@ -780,6 +790,8 @@ class HGraphBuilder: public AstVisitor { FunctionState* function_state() const { return function_state_; } + void VisitDeclarations(ZoneList<Declaration*>* declarations); + private: // Type of a member function that generates inline code for a native function. typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call); @@ -841,7 +853,8 @@ class HGraphBuilder: public AstVisitor { void HandleVariableDeclaration(VariableProxy* proxy, VariableMode mode, - FunctionLiteral* function); + FunctionLiteral* function, + int* global_count); void VisitDelete(UnaryOperation* expr); void VisitVoid(UnaryOperation* expr); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index bb050b63f9..a42f6324e3 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -32,7 +32,7 @@ // The original source code covered by the above license above has been modified // significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. #include "v8.h" @@ -575,7 +575,7 @@ void Assembler::leave() { void Assembler::mov_b(Register dst, const Operand& src) { - ASSERT(dst.code() < 4); + CHECK(dst.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x8A); emit_operand(dst, src); @@ -591,7 +591,7 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) { void Assembler::mov_b(const Operand& dst, Register src) { - ASSERT(src.code() < 4); + CHECK(src.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x88); emit_operand(src, dst); @@ -829,7 +829,7 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) { void Assembler::cmpb(const Operand& op, Register reg) { - ASSERT(reg.is_byte_register()); + CHECK(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x38); emit_operand(reg, op); @@ -837,7 +837,7 @@ void Assembler::cmpb(const Operand& op, Register reg) { void Assembler::cmpb(Register reg, const Operand& op) { - ASSERT(reg.is_byte_register()); + CHECK(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x3A); emit_operand(reg, op); @@ -901,6 +901,7 @@ void Assembler::cmpw_ax(const Operand& op) { void Assembler::dec_b(Register dst) { + CHECK(dst.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0xFE); EMIT(0xC8 | dst.code()); @@ -1174,7 +1175,9 @@ void Assembler::test(Register reg, const Immediate& imm) { EnsureSpace ensure_space(this); // Only use test against byte for registers that have a byte // variant: eax, ebx, ecx, and edx. - if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) { + if (imm.rmode_ == RelocInfo::NONE && + is_uint8(imm.x_) && + reg.is_byte_register()) { uint8_t imm8 = imm.x_; if (reg.is(eax)) { EMIT(0xA8); @@ -1204,6 +1207,7 @@ void Assembler::test(Register reg, const Operand& op) { void Assembler::test_b(Register reg, const Operand& op) { + CHECK(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x84); emit_operand(reg, op); @@ -1219,7 +1223,7 @@ void Assembler::test(const Operand& op, const Immediate& imm) { void Assembler::test_b(const Operand& op, uint8_t imm8) { - if (op.is_reg_only() && op.reg().code() >= 4) { + if (op.is_reg_only() && !op.reg().is_byte_register()) { test(op, Immediate(imm8)); return; } diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index efa3456d8e..fdf21e5429 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -1088,7 +1088,7 @@ static void ArrayNativeCode(MacroAssembler* masm, bool construct_call, Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call, - empty_array, not_empty_array; + empty_array, not_empty_array, finish, cant_transition_map, not_double; // Push the constructor and argc. No need to tag argc as a smi, as there will // be no garbage collection with this on the stack. @@ -1247,6 +1247,7 @@ static void ArrayNativeCode(MacroAssembler* masm, // esp[8]: constructor (only if construct_call) // esp[12]: return address // esp[16]: last argument + __ bind(&finish); __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize)); __ pop(eax); __ pop(ebx); @@ -1255,9 +1256,43 @@ static void ArrayNativeCode(MacroAssembler* masm, __ jmp(ecx); __ bind(&has_non_smi_element); + // Double values are handled by the runtime. + __ CheckMap(eax, + masm->isolate()->factory()->heap_number_map(), + ¬_double, + DONT_DO_SMI_CHECK); + __ bind(&cant_transition_map); // Throw away the array that's only been partially constructed. __ pop(eax); __ UndoAllocationInNewSpace(eax); + __ jmp(&prepare_generic_code_call); + + __ bind(¬_double); + // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + __ mov(ebx, Operand(esp, 0)); + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ LoadTransitionedArrayMapConditional( + FAST_SMI_ONLY_ELEMENTS, + FAST_ELEMENTS, + edi, + eax, + &cant_transition_map); + __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi); + __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax, + kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Prepare to re-enter the loop + __ lea(edi, Operand(esp, last_arg_offset)); + + // Finish the array initialization loop. + Label loop2; + __ bind(&loop2); + __ mov(eax, Operand(edi, ecx, times_pointer_size, 0)); + __ mov(Operand(edx, 0), eax); + __ add(edx, Immediate(kPointerSize)); + __ dec(ecx); + __ j(greater_equal, &loop2); + __ jmp(&finish); // Restore argc and constructor before running the generic code. __ bind(&prepare_generic_code_call); @@ -1659,8 +1694,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { __ j(not_equal, &skip, Label::kNear); __ ret(0); - // If we decide not to perform on-stack replacement we perform a - // stack guard check to enable interrupts. + // Insert a stack guard check so that if we decide not to perform + // on-stack replacement right away, the function calling this stub can + // still be interrupted. __ bind(&stack_check); Label ok; ExternalReference stack_limit = diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index b3a0b9538e..2d078cdc06 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -3922,7 +3922,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ Throw(eax); __ bind(&throw_termination_exception); - __ ThrowUncatchable(TERMINATION, eax); + __ ThrowUncatchable(eax); __ bind(&failure); // For failure to match, return null. @@ -4573,6 +4573,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void InterruptStub::Generate(MacroAssembler* masm) { + __ TailCallRuntime(Runtime::kInterrupt, 0, 1); +} + + static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and @@ -4780,11 +4785,6 @@ void CEntryStub::GenerateAheadOfTime() { } -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - __ Throw(eax); -} - - void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -4903,12 +4903,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - __ ThrowUncatchable(type, eax); -} - - void CEntryStub::Generate(MacroAssembler* masm) { // eax: number of arguments including receiver // ebx: pointer to C function (C callee-saved) @@ -4962,13 +4956,24 @@ void CEntryStub::Generate(MacroAssembler* masm) { true); __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + // Set external caught exception to false. + Isolate* isolate = masm->isolate(); + ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, + isolate); + __ mov(Operand::StaticVariable(external_caught), Immediate(false)); + + // Set pending exception and eax to out of memory exception. + ExternalReference pending_exception(Isolate::kPendingExceptionAddress, + isolate); + __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); + __ mov(Operand::StaticVariable(pending_exception), eax); + // Fall through to the next label. __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); + __ ThrowUncatchable(eax); __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); + __ Throw(eax); } @@ -7041,11 +7046,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreIC::GenerateGeneric. { ebx, edx, ecx, EMIT_REMEMBERED_SET}, // KeyedStoreStubCompiler::GenerateStoreFastElement. - { edi, edx, ecx, EMIT_REMEMBERED_SET}, + { edi, ebx, ecx, EMIT_REMEMBERED_SET}, + { edx, edi, ebx, EMIT_REMEMBERED_SET}, // ElementsTransitionGenerator::GenerateSmiOnlyToObject // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { edx, ebx, edi, EMIT_REMEMBERED_SET}, + { edx, ebx, edi, OMIT_REMEMBERED_SET}, // ElementsTransitionGenerator::GenerateDoubleToObject { eax, edx, esi, EMIT_REMEMBERED_SET}, { edx, eax, edi, EMIT_REMEMBERED_SET}, diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index e5ca02c473..3e085a245d 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -301,11 +301,17 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label loop, entry, convert_hole, gc_required; + Label loop, entry, convert_hole, gc_required, only_change_map; + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); + __ j(equal, &only_change_map); + __ push(eax); __ push(ebx); - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset)); // Allocate new FixedDoubleArray. @@ -399,6 +405,11 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( __ pop(ebx); __ pop(eax); + + // Restore esi. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + + __ bind(&only_change_map); // eax: value // ebx: target map // Set transitioned map. @@ -408,10 +419,8 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( ebx, edi, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, + OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - // Restore esi. - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); } @@ -424,12 +433,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label loop, entry, convert_hole, gc_required; + Label loop, entry, convert_hole, gc_required, only_change_map, success; + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); + __ j(equal, &only_change_map); + __ push(eax); __ push(edx); __ push(ebx); - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); // Allocate new FixedArray. @@ -446,6 +461,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ jmp(&entry); + // ebx: target map + // edx: receiver + // Set transitioned map. + __ bind(&only_change_map); + __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); + __ RecordWriteField(edx, + HeapObject::kMapOffset, + ebx, + edi, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ jmp(&success); + // Call into runtime if GC is required. __ bind(&gc_required); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -507,7 +536,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ebx, edi, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, + OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Replace receiver's backing store with newly created and filled FixedArray. __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); @@ -522,6 +551,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // Restore registers. __ pop(eax); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + + __ bind(&success); } diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 14f26757e9..0c552d7d01 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -205,6 +205,15 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } +static const byte kJnsInstruction = 0x79; +static const byte kJnsOffset = 0x11; +static const byte kJaeInstruction = 0x73; +static const byte kJaeOffset = 0x07; +static const byte kCallInstruction = 0xe8; +static const byte kNopByteOne = 0x66; +static const byte kNopByteTwo = 0x90; + + void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Address pc_after, Code* check_code, @@ -228,11 +237,17 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, // call <on-stack replacment> // test eax, <loop nesting depth> // ok: - ASSERT(*(call_target_address - 3) == 0x73 && // jae - *(call_target_address - 2) == 0x07 && // offset - *(call_target_address - 1) == 0xe8); // call - *(call_target_address - 3) = 0x66; // 2 byte nop part 1 - *(call_target_address - 2) = 0x90; // 2 byte nop part 2 + + if (FLAG_count_based_interrupts) { + ASSERT(*(call_target_address - 3) == kJnsInstruction); + ASSERT(*(call_target_address - 2) == kJnsOffset); + } else { + ASSERT(*(call_target_address - 3) == kJaeInstruction); + ASSERT(*(call_target_address - 2) == kJaeOffset); + } + ASSERT(*(call_target_address - 1) == kCallInstruction); + *(call_target_address - 3) = kNopByteOne; + *(call_target_address - 2) = kNopByteTwo; Assembler::set_target_address_at(call_target_address, replacement_code->entry()); @@ -248,13 +263,19 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, Address call_target_address = pc_after - kIntSize; ASSERT(replacement_code->entry() == Assembler::target_address_at(call_target_address)); + // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to // restore the conditional branch. - ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1 - *(call_target_address - 2) == 0x90 && // 2 byte nop part 2 - *(call_target_address - 1) == 0xe8); // call - *(call_target_address - 3) = 0x73; // jae - *(call_target_address - 2) = 0x07; // offset + ASSERT(*(call_target_address - 3) == kNopByteOne && + *(call_target_address - 2) == kNopByteTwo && + *(call_target_address - 1) == kCallInstruction); + if (FLAG_count_based_interrupts) { + *(call_target_address - 3) = kJnsInstruction; + *(call_target_address - 2) = kJnsOffset; + } else { + *(call_target_address - 3) = kJaeInstruction; + *(call_target_address - 2) = kJaeOffset; + } Assembler::set_target_address_at(call_target_address, check_code->entry()); diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 7bb4cffad0..469ead9510 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -113,12 +113,12 @@ class JumpPatchSite BASE_EMBEDDED { // // The function builds a JS frame. Please see JavaScriptFrameConstants in // frames-ia32.h for its layout. -void FullCodeGenerator::Generate(CompilationInfo* info) { - ASSERT(info_ == NULL); - info_ = info; - scope_ = info->scope(); +void FullCodeGenerator::Generate() { + CompilationInfo* info = info_; handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); + profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget))); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -132,7 +132,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // We can optionally optimize based on counters rather than statistical // sampling. if (info->ShouldSelfOptimize()) { - if (FLAG_trace_opt) { + if (FLAG_trace_opt_verbose) { PrintF("[adding self-optimization header to %s]\n", *info->function()->debug_name()->ToCString()); } @@ -323,15 +323,34 @@ void FullCodeGenerator::ClearAccumulator() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { +void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, + Label* back_edge_target) { Comment cmnt(masm_, "[ Stack check"); Label ok; - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(above_equal, &ok, Label::kNear); - StackCheckStub stub; - __ CallStub(&stub); + + if (FLAG_count_based_interrupts) { + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(127, Max(1, distance / 100)); + } + __ sub(Operand::Cell(profiling_counter_), Immediate(Smi::FromInt(weight))); + __ j(positive, &ok, Label::kNear); + InterruptStub stub; + __ CallStub(&stub); + } else { + // Count based interrupts happen often enough when they are enabled + // that the additional stack checks are not necessary (they would + // only check for interrupts). + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + StackCheckStub stub; + __ CallStub(&stub); + } + // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. @@ -344,6 +363,12 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { ASSERT(loop_depth() > 0); __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); + if (FLAG_count_based_interrupts) { + // Reset the countdown. + __ mov(Operand::Cell(profiling_counter_), + Immediate(Smi::FromInt(FLAG_interrupt_budget))); + } + __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); // Record a mapping of the OSR id to this PC. This is used if the OSR @@ -364,6 +389,26 @@ void FullCodeGenerator::EmitReturnSequence() { __ push(eax); __ CallRuntime(Runtime::kTraceExit, 1); } + if (FLAG_interrupt_at_exit) { + // Pretend that the exit is a backwards jump to the entry. + int weight = 1; + if (FLAG_weighted_back_edges) { + int distance = masm_->pc_offset(); + weight = Min(127, Max(1, distance / 100)); + } + __ sub(Operand::Cell(profiling_counter_), + Immediate(Smi::FromInt(weight))); + Label ok; + __ j(positive, &ok, Label::kNear); + __ push(eax); + InterruptStub stub; + __ CallStub(&stub); + __ pop(eax); + // Reset the countdown. + __ mov(Operand::Cell(profiling_counter_), + Immediate(Smi::FromInt(FLAG_interrupt_budget))); + __ bind(&ok); + } #ifdef DEBUG // Add a label for checking the size of the code used for returning. Label check_exit_codesize; @@ -851,7 +896,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); - __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); + CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); __ test(eax, eax); __ j(not_equal, &next_test); @@ -900,6 +945,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ cmp(eax, isolate()->factory()->null_value()); __ j(equal, &exit); + PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); + // Convert the object to a JS object. Label convert, done_convert; __ JumpIfSmi(eax, &convert, Label::kNear); @@ -912,7 +959,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ push(eax); // Check for proxies. - Label call_runtime; + Label call_runtime, use_cache, fixed_array; STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); __ j(below_equal, &call_runtime); @@ -921,61 +968,19 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // the JSObject::IsSimpleEnum cache validity checks. If we cannot // guarantee cache validity, call the runtime system to check cache // validity or get the property names in a fixed array. - Label next; - __ mov(ecx, eax); - __ bind(&next); - - // Check that there are no elements. Register ecx contains the - // current JS object we've reached through the prototype chain. - __ cmp(FieldOperand(ecx, JSObject::kElementsOffset), - isolate()->factory()->empty_fixed_array()); - __ j(not_equal, &call_runtime); - - // Check that instance descriptors are not empty so that we can - // check for an enum cache. Leave the map in ebx for the subsequent - // prototype load. - __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); - __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset)); - __ JumpIfSmi(edx, &call_runtime); - - // Check that there is an enum cache in the non-empty instance - // descriptors (edx). This is the case if the next enumeration - // index field does not contain a smi. - __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset)); - __ JumpIfSmi(edx, &call_runtime); - - // For all objects but the receiver, check that the cache is empty. - Label check_prototype; - __ cmp(ecx, eax); - __ j(equal, &check_prototype, Label::kNear); - __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); - __ cmp(edx, isolate()->factory()->empty_fixed_array()); - __ j(not_equal, &call_runtime); - - // Load the prototype from the map and loop if non-null. - __ bind(&check_prototype); - __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); - __ cmp(ecx, isolate()->factory()->null_value()); - __ j(not_equal, &next); + __ CheckEnumCache(&call_runtime); - // The enum cache is valid. Load the map of the object being - // iterated over and use the cache for the iteration. - Label use_cache; __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); __ jmp(&use_cache, Label::kNear); // Get the set of properties to enumerate. __ bind(&call_runtime); - __ push(eax); // Duplicate the enumerable object on the stack. + __ push(eax); __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); - - // If we got a map from the runtime call, we can do a fast - // modification check. Otherwise, we got a fixed array, and we have - // to do a slow check. - Label fixed_array; __ cmp(FieldOperand(eax, HeapObject::kMapOffset), isolate()->factory()->meta_map()); - __ j(not_equal, &fixed_array, Label::kNear); + __ j(not_equal, &fixed_array); + // We got a map in register eax. Get the enumeration cache from it. __ bind(&use_cache); @@ -1008,6 +1013,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ push(Immediate(Smi::FromInt(0))); // Initial index. // Generate code for doing the condition check. + PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); __ bind(&loop); __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index. __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length. @@ -1050,7 +1056,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ mov(result_register(), ebx); // Perform the assignment as if via '='. { EffectContext context(this); - EmitAssignment(stmt->each(), stmt->AssignmentId()); + EmitAssignment(stmt->each()); } // Generate code for the body of the loop. @@ -1061,7 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(loop_statement.continue_label()); __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); - EmitStackCheck(stmt); + EmitStackCheck(stmt, &loop); __ jmp(&loop); // Remove the pointers stored on the stack. @@ -1069,6 +1075,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ add(esp, Immediate(5 * kPointerSize)); // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); __ bind(&exit); decrement_loop_depth(); } @@ -1164,7 +1171,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) ? RelocInfo::CODE_TARGET : RelocInfo::CODE_TARGET_CONTEXT; - __ call(ic, mode); + CallIC(ic, mode); } @@ -1245,7 +1252,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { __ mov(eax, GlobalObjectOperand()); __ mov(ecx, var->name()); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ call(ic, RelocInfo::CODE_TARGET_CONTEXT); + CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); context()->Plug(eax); break; } @@ -1445,7 +1452,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); - __ call(ic, RelocInfo::CODE_TARGET, key->id()); + CallIC(ic, RelocInfo::CODE_TARGET, key->id()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { VisitForEffect(value); @@ -1472,7 +1479,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Smi::FromInt(1) : Smi::FromInt(0))); VisitForStackValue(value); - __ CallRuntime(Runtime::kDefineAccessor, 4); + __ push(Immediate(Smi::FromInt(NONE))); + __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); break; default: UNREACHABLE(); } @@ -1709,14 +1717,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { ASSERT(!key->handle()->IsSmi()); __ mov(ecx, Immediate(key->handle())); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ call(ic, RelocInfo::CODE_TARGET, prop->id()); + CallIC(ic, RelocInfo::CODE_TARGET, prop->id()); } void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - __ call(ic, RelocInfo::CODE_TARGET, prop->id()); + CallIC(ic, RelocInfo::CODE_TARGET, prop->id()); } @@ -1737,7 +1745,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ bind(&stub_call); __ mov(eax, ecx); BinaryOpStub stub(op, mode); - __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); + CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); patch_site.EmitPatchInfo(); __ jmp(&done, Label::kNear); @@ -1822,13 +1830,13 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, __ pop(edx); BinaryOpStub stub(op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); + CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); patch_site.EmitPatchInfo(); context()->Plug(eax); } -void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { +void FullCodeGenerator::EmitAssignment(Expression* expr) { // Invalid left-hand sides are rewritten to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { @@ -1863,7 +1871,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); - __ call(ic); + CallIC(ic); break; } case KEYED_PROPERTY: { @@ -1876,11 +1884,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); - __ call(ic); + CallIC(ic); break; } } - PrepareForBailoutForId(bailout_ast_id, TOS_REG); context()->Plug(eax); } @@ -1894,7 +1901,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); - __ call(ic, RelocInfo::CODE_TARGET_CONTEXT); + CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); } else if (op == Token::INIT_CONST) { // Const initializers need a write barrier. @@ -2003,7 +2010,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); - __ call(ic, RelocInfo::CODE_TARGET, expr->id()); + CallIC(ic, RelocInfo::CODE_TARGET, expr->id()); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { @@ -2043,7 +2050,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); - __ call(ic, RelocInfo::CODE_TARGET, expr->id()); + CallIC(ic, RelocInfo::CODE_TARGET, expr->id()); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { @@ -2077,6 +2084,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) { } +void FullCodeGenerator::CallIC(Handle<Code> code, + RelocInfo::Mode rmode, + unsigned ast_id) { + ic_total_count_++; + __ call(code, rmode, ast_id); +} + + + + void FullCodeGenerator::EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode) { @@ -2093,7 +2110,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, SetSourcePosition(expr->position()); Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode); - __ call(ic, mode, expr->id()); + CallIC(ic, mode, expr->id()); RecordJSReturnSite(expr); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -2125,7 +2142,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count); __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key. - __ call(ic, RelocInfo::CODE_TARGET, expr->id()); + CallIC(ic, RelocInfo::CODE_TARGET, expr->id()); RecordJSReturnSite(expr); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -3712,7 +3729,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { RelocInfo::Mode mode = RelocInfo::CODE_TARGET; Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode); - __ call(ic, mode, expr->id()); + CallIC(ic, mode, expr->id()); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); } else { @@ -3870,7 +3887,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, // accumulator register eax. VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); - __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); + CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); context()->Plug(eax); } @@ -3990,7 +4007,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ mov(edx, eax); __ mov(eax, Immediate(Smi::FromInt(1))); BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); - __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId()); + CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4024,7 +4041,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); - __ call(ic, RelocInfo::CODE_TARGET, expr->id()); + CallIC(ic, RelocInfo::CODE_TARGET, expr->id()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { if (!context()->IsEffect()) { @@ -4041,7 +4058,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); - __ call(ic, RelocInfo::CODE_TARGET, expr->id()); + CallIC(ic, RelocInfo::CODE_TARGET, expr->id()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { // Result is on the stack @@ -4069,7 +4086,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); // Use a regular load, not a contextual load, to avoid a reference // error. - __ call(ic); + CallIC(ic); PrepareForBailout(expr, TOS_REG); context()->Plug(eax); } else if (proxy != NULL && proxy->var()->IsLookupSlot()) { @@ -4249,7 +4266,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // Record position and call the compare IC. SetSourcePosition(expr->position()); Handle<Code> ic = CompareIC::GetUninitialized(op); - __ call(ic, RelocInfo::CODE_TARGET, expr->id()); + CallIC(ic, RelocInfo::CODE_TARGET, expr->id()); patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index dbb0554ac0..33f247a349 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -1639,6 +1639,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { __ pop(ebx); __ push(edx); __ push(ebx); // return address + // Leaving the code managed by the register allocator and return to the + // convention of using esi as context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1); } @@ -1662,6 +1665,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( __ pop(ebx); __ push(edx); __ push(ebx); // return address + // Leaving the code managed by the register allocator and return to the + // convention of using esi as context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1); } diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 5a276f4527..fec330849a 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -1868,11 +1868,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true, // Faster code path to avoid two compares: subtract lower bound from the // actual type and do a signed compare with the width of the type range. __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ cmpb(Operand(temp2), - static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ j(above, is_false); } @@ -2690,6 +2689,15 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { } +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + ASSERT(ToRegister(instr->InputAt(0)).is(esi)); + __ push(esi); // The context is the first argument. + __ push(Immediate(instr->hydrogen()->pairs())); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags()))); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); @@ -4070,7 +4078,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { } else { __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); __ and_(temp, mask); - __ cmpb(Operand(temp), tag); + __ cmp(temp, tag); DeoptimizeIf(not_equal, instr->environment()); } } @@ -4251,26 +4259,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ Assert(equal, "Unexpected object literal boilerplate"); } + // Only elements backing stores for non-COW arrays need to be copied. + Handle<FixedArrayBase> elements(object->elements()); + bool has_elements = elements->length() > 0 && + elements->map() != isolate()->heap()->fixed_cow_array_map(); + // Increase the offset so that subsequent objects end up right after - // this one. - int current_offset = *offset; - int size = object->map()->instance_size(); - *offset += size; + // this object and its backing store. + int object_offset = *offset; + int object_size = object->map()->instance_size(); + int elements_offset = *offset + object_size; + int elements_size = has_elements ? elements->Size() : 0; + *offset += object_size + elements_size; // Copy object header. ASSERT(object->properties()->length() == 0); - ASSERT(object->elements()->length() == 0 || - object->elements()->map() == isolate()->heap()->fixed_cow_array_map()); int inobject_properties = object->map()->inobject_properties(); - int header_size = size - inobject_properties * kPointerSize; + int header_size = object_size - inobject_properties * kPointerSize; for (int i = 0; i < header_size; i += kPointerSize) { - __ mov(ecx, FieldOperand(source, i)); - __ mov(FieldOperand(result, current_offset + i), ecx); + if (has_elements && i == JSObject::kElementsOffset) { + __ lea(ecx, Operand(result, elements_offset)); + } else { + __ mov(ecx, FieldOperand(source, i)); + } + __ mov(FieldOperand(result, object_offset + i), ecx); } // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { - int total_offset = current_offset + object->GetInObjectPropertyOffset(i); + int total_offset = object_offset + object->GetInObjectPropertyOffset(i); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); @@ -4285,10 +4302,40 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ mov(FieldOperand(result, total_offset), Immediate(value)); } } + + // Copy elements backing store header. + ASSERT(!has_elements || elements->IsFixedArray()); + if (has_elements) { + __ LoadHeapObject(source, elements); + for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { + __ mov(ecx, FieldOperand(source, i)); + __ mov(FieldOperand(result, elements_offset + i), ecx); + } + } + + // Copy elements backing store content. + ASSERT(!has_elements || elements->IsFixedArray()); + int elements_length = has_elements ? elements->length() : 0; + for (int i = 0; i < elements_length; i++) { + int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); + Handle<Object> value = JSObject::GetElement(object, i); + if (value->IsJSObject()) { + Handle<JSObject> value_object = Handle<JSObject>::cast(value); + __ lea(ecx, Operand(result, *offset)); + __ mov(FieldOperand(result, total_offset), ecx); + __ LoadHeapObject(source, value_object); + EmitDeepCopy(value_object, result, source, offset); + } else if (value->IsHeapObject()) { + __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value)); + __ mov(FieldOperand(result, total_offset), ecx); + } else { + __ mov(FieldOperand(result, total_offset), Immediate(value)); + } + } } -void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { +void LCodeGen::DoFastLiteral(LFastLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); int size = instr->hydrogen()->total_size(); @@ -4310,14 +4357,14 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { } -void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { +void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); + Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); // Set up the parameters to the stub/runtime call. - __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ push(FieldOperand(eax, JSFunction::kLiteralsOffset)); + __ PushHeapObject(literals); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); __ push(Immediate(constant_properties)); int flags = instr->hydrogen()->fast_elements() @@ -4414,7 +4461,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { __ push(Immediate(shared_info)); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } else { - __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(esi); __ push(Immediate(shared_info)); __ push(Immediate(pretenure ? factory()->true_value() @@ -4682,6 +4729,84 @@ void LCodeGen::DoIn(LIn* instr) { } +void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { + __ cmp(eax, isolate()->factory()->undefined_value()); + DeoptimizeIf(equal, instr->environment()); + + __ cmp(eax, isolate()->factory()->null_value()); + DeoptimizeIf(equal, instr->environment()); + + __ test(eax, Immediate(kSmiTagMask)); + DeoptimizeIf(zero, instr->environment()); + + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); + DeoptimizeIf(below_equal, instr->environment()); + + Label use_cache, call_runtime; + __ CheckEnumCache(&call_runtime); + + __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); + __ jmp(&use_cache, Label::kNear); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(eax); + CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); + + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + isolate()->factory()->meta_map()); + DeoptimizeIf(not_equal, instr->environment()); + __ bind(&use_cache); +} + + +void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { + Register map = ToRegister(instr->map()); + Register result = ToRegister(instr->result()); + __ LoadInstanceDescriptors(map, result); + __ mov(result, + FieldOperand(result, DescriptorArray::kEnumerationIndexOffset)); + __ mov(result, + FieldOperand(result, FixedArray::SizeFor(instr->idx()))); + __ test(result, result); + DeoptimizeIf(equal, instr->environment()); +} + + +void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { + Register object = ToRegister(instr->value()); + __ cmp(ToRegister(instr->map()), + FieldOperand(object, HeapObject::kMapOffset)); + DeoptimizeIf(not_equal, instr->environment()); +} + + +void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + Register object = ToRegister(instr->object()); + Register index = ToRegister(instr->index()); + + Label out_of_object, done; + __ cmp(index, Immediate(0)); + __ j(less, &out_of_object); + __ mov(object, FieldOperand(object, + index, + times_half_pointer_size, + JSObject::kHeaderSize)); + __ jmp(&done, Label::kNear); + + __ bind(&out_of_object); + __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset)); + __ neg(index); + // Index is now equal to out of object property index plus 1. + __ mov(object, FieldOperand(object, + index, + times_half_pointer_size, + FixedArray::kHeaderSize - kPointerSize)); + __ bind(&done); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 60f105014b..120ab14f80 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -1150,6 +1150,12 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { } +LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { + LOperand* context = UseFixed(instr->context(), esi); + return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); +} + + LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LOperand* context = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LGlobalObject(context)); @@ -2206,25 +2212,24 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { } -LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { +LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall( - DefineFixed(new(zone()) LArrayLiteral(context), eax), instr); + DefineFixed(new(zone()) LFastLiteral(context), eax), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { +LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall( - DefineFixed(new(zone()) LObjectLiteralFast(context), eax), instr); + DefineFixed(new(zone()) LArrayLiteral(context), eax), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralGeneric( - HObjectLiteralGeneric* instr) { +LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall( - DefineFixed(new(zone()) LObjectLiteralGeneric(context), eax), instr); + DefineFixed(new(zone()) LObjectLiteral(context), eax), instr); } @@ -2403,6 +2408,35 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) { } +LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* context = UseFixed(instr->context(), esi); + LOperand* object = UseFixed(instr->enumerable(), eax); + LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); + return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { + LOperand* map = UseRegister(instr->map()); + return AssignEnvironment(DefineAsRegister( + new(zone()) LForInCacheArray(map))); +} + + +LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* map = UseRegisterAtStart(instr->map()); + return AssignEnvironment(new(zone()) LCheckMapValue(value, map)); +} + + +LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* index = UseTempRegister(instr->index()); + return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index)); +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 825aad18e5..b879fc13cc 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -81,11 +81,13 @@ class LCodeGen; V(ConstantI) \ V(ConstantT) \ V(Context) \ + V(DeclareGlobals) \ V(DeleteProperty) \ V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ V(ElementsKind) \ + V(FastLiteral) \ V(FixedArrayBaseLength) \ V(FunctionLiteral) \ V(GetCachedArrayIndex) \ @@ -129,8 +131,7 @@ class LCodeGen; V(NumberTagD) \ V(NumberTagI) \ V(NumberUntagD) \ - V(ObjectLiteralFast) \ - V(ObjectLiteralGeneric) \ + V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ @@ -166,7 +167,11 @@ class LCodeGen; V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ V(UnknownOSRValue) \ - V(ValueOf) + V(ValueOf) \ + V(ForInPrepareMap) \ + V(ForInCacheArray) \ + V(CheckMapValue) \ + V(LoadFieldByIndex) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -1385,6 +1390,17 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> { }; +class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> { + public: + explicit LDeclareGlobals(LOperand* context) { + inputs_[0] = context; + } + + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") + DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) +}; + + class LGlobalObject: public LTemplateInstruction<1, 1, 0> { public: explicit LGlobalObject(LOperand* context) { @@ -1979,42 +1995,42 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> { }; -class LArrayLiteral: public LTemplateInstruction<1, 1, 0> { +class LFastLiteral: public LTemplateInstruction<1, 1, 0> { public: - explicit LArrayLiteral(LOperand* context) { + explicit LFastLiteral(LOperand* context) { inputs_[0] = context; } LOperand* context() { return inputs_[0]; } - DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") - DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) + DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") + DECLARE_HYDROGEN_ACCESSOR(FastLiteral) }; -class LObjectLiteralFast: public LTemplateInstruction<1, 1, 0> { +class LArrayLiteral: public LTemplateInstruction<1, 1, 0> { public: - explicit LObjectLiteralFast(LOperand* context) { + explicit LArrayLiteral(LOperand* context) { inputs_[0] = context; } LOperand* context() { return inputs_[0]; } - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) + DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") + DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) }; -class LObjectLiteralGeneric: public LTemplateInstruction<1, 1, 0> { +class LObjectLiteral: public LTemplateInstruction<1, 1, 0> { public: - explicit LObjectLiteralGeneric(LOperand* context) { + explicit LObjectLiteral(LOperand* context) { inputs_[0] = context; } LOperand* context() { return inputs_[0]; } - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) + DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") + DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) }; @@ -2156,6 +2172,64 @@ class LIn: public LTemplateInstruction<1, 3, 0> { }; +class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> { + public: + LForInPrepareMap(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; + } + + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") +}; + + +class LForInCacheArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInCacheArray(LOperand* map) { + inputs_[0] = map; + } + + LOperand* map() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") + + int idx() { + return HForInCacheArray::cast(this->hydrogen_value())->idx(); + } +}; + + +class LCheckMapValue: public LTemplateInstruction<0, 2, 0> { + public: + LCheckMapValue(LOperand* value, LOperand* map) { + inputs_[0] = value; + inputs_[1] = map; + } + + LOperand* value() { return inputs_[0]; } + LOperand* map() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") +}; + + +class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> { + public: + LLoadFieldByIndex(LOperand* object, LOperand* index) { + inputs_[0] = object; + inputs_[1] = index; + } + + LOperand* object() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") +}; + + class LChunkBuilder; class LChunk: public ZoneObject { public: diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 9986c3ed86..60e38a6c13 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -862,8 +862,7 @@ void MacroAssembler::Throw(Register value) { } -void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, - Register value) { +void MacroAssembler::ThrowUncatchable(Register value) { // Adjust this code if not the case. STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); @@ -873,21 +872,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); // The exception is expected in eax. - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, - isolate()); - mov(Operand::StaticVariable(external_caught), Immediate(false)); - - // Set pending exception and eax to out of memory exception. - ExternalReference pending_exception(Isolate::kPendingExceptionAddress, - isolate()); - mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); - mov(Operand::StaticVariable(pending_exception), eax); - } else if (!value.is(eax)) { + if (!value.is(eax)) { mov(eax, value); } - // Drop the stack pointer to the top of the top stack handler. ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); mov(esp, Operand::StaticVariable(handler_address)); @@ -2789,6 +2776,46 @@ void MacroAssembler::EnsureNotWhite( bind(&done); } + +void MacroAssembler::CheckEnumCache(Label* call_runtime) { + Label next; + mov(ecx, eax); + bind(&next); + + // Check that there are no elements. Register ecx contains the + // current JS object we've reached through the prototype chain. + cmp(FieldOperand(ecx, JSObject::kElementsOffset), + isolate()->factory()->empty_fixed_array()); + j(not_equal, call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in ebx for the subsequent + // prototype load. + mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); + mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset)); + JumpIfSmi(edx, call_runtime); + + // Check that there is an enum cache in the non-empty instance + // descriptors (edx). This is the case if the next enumeration + // index field does not contain a smi. + mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset)); + JumpIfSmi(edx, call_runtime); + + // For all objects but the receiver, check that the cache is empty. + Label check_prototype; + cmp(ecx, eax); + j(equal, &check_prototype, Label::kNear); + mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); + cmp(edx, isolate()->factory()->empty_fixed_array()); + j(not_equal, call_runtime); + + // Load the prototype from the map and loop if non-null. + bind(&check_prototype); + mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset)); + cmp(ecx, isolate()->factory()->null_value()); + j(not_equal, &next); +} + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index b06d801b64..66d1ce7d38 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -496,10 +496,11 @@ class MacroAssembler: public Assembler { // Unlink the stack handler on top of the stack from the try handler chain. void PopTryHandler(); - // Activate the top handler in the try hander chain. + // Throw to the top handler in the try hander chain. void Throw(Register value); - void ThrowUncatchable(UncatchableExceptionType type, Register value); + // Throw past all JS frames to the top JS entry frame. + void ThrowUncatchable(Register value); // --------------------------------------------------------------------------- // Inline caching support @@ -828,6 +829,10 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); + // Expects object in eax and returns map with validated enum cache + // in eax. Assumes that any other register can be used as a scratch. + void CheckEnumCache(Label* call_runtime); + private: bool generating_stub_; bool allow_stub_calls_; diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 9717869b5e..56484af309 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -2591,7 +2591,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( ElementsKind elements_kind = receiver_map->elements_kind(); bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE; Handle<Code> stub = - KeyedStoreElementStub(is_jsarray, elements_kind).GetCode(); + KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode(); __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK); @@ -3718,14 +3718,16 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, - ElementsKind elements_kind) { + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, grow, slow, transition_elements_kind; + Label check_capacity, prepare_slow, finish_store, commit_backing_store; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3733,24 +3735,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi. __ JumpIfNotSmi(ecx, &miss_force_generic); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(eax, &transition_elements_kind); + } + // Get the elements array and make sure it is a fast element array, not 'cow'. __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ cmp(FieldOperand(edi, HeapObject::kMapOffset), - Immediate(masm->isolate()->factory()->fixed_array_map())); - __ j(not_equal, &miss_force_generic); - if (is_js_array) { // Check that the key is within bounds. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis. - __ j(above_equal, &miss_force_generic); + if (grow_mode == ALLOW_JSARRAY_GROWTH) { + __ j(above_equal, &grow); + } else { + __ j(above_equal, &miss_force_generic); + } } else { // Check that the key is within bounds. __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis. __ j(above_equal, &miss_force_generic); } + __ cmp(FieldOperand(edi, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->fixed_array_map())); + __ j(not_equal, &miss_force_generic); + + __ bind(&finish_store); if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(eax, &transition_elements_kind); // ecx is a smi, use times_half_pointer_size instead of // times_pointer_size __ mov(FieldOperand(edi, @@ -3768,8 +3778,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( FixedArray::kHeaderSize)); __ mov(Operand(ecx, 0), eax); // Make sure to preserve the value in register eax. - __ mov(edx, eax); - __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs); + __ mov(ebx, eax); + __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs); } // Done. @@ -3785,19 +3795,94 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ jmp(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Handle transition requiring the array to grow. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. Flags are already set by previous + // compare. + __ j(not_equal, &miss_force_generic); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); + __ j(not_equal, &check_capacity); + + int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT); + // Restore the key, which is known to be the array length. + + // eax: value + // ecx: key + // edx: receiver + // edi: elements + // Make sure that the backing store can hold additional elements. + __ mov(FieldOperand(edi, JSObject::kMapOffset), + Immediate(masm->isolate()->factory()->fixed_array_map())); + __ mov(FieldOperand(edi, FixedArray::kLengthOffset), + Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value())); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) { + __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx); + } + + // Store the element at index zero. + __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax); + + // Install the new backing store in the JSArray. + __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi); + __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ mov(FieldOperand(edx, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ ret(0); + + __ bind(&check_capacity); + __ cmp(FieldOperand(edi, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->fixed_cow_array_map())); + __ j(equal, &miss_force_generic); + + // eax: value + // ecx: key + // edx: receiver + // edi: elements + // Make sure that the backing store can hold additional elements. + __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); + __ j(above_equal, &slow); + + // Grow the array and finish the store. + __ add(FieldOperand(edx, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ jmp(&finish_store); + + __ bind(&prepare_slow); + // Restore the key, which is known to be the array length. + __ mov(ecx, Immediate(0)); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ jmp(ic_slow, RelocInfo::CODE_TARGET); + } } void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, - bool is_js_array) { + bool is_js_array, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, grow, slow; + Label check_capacity, prepare_slow, finish_store, commit_backing_store; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3812,19 +3897,20 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( if (is_js_array) { // Check that the key is within bounds. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis. + if (grow_mode == ALLOW_JSARRAY_GROWTH) { + __ j(above_equal, &grow); + } else { + __ j(above_equal, &miss_force_generic); + } } else { // Check that the key is within bounds. __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis. + __ j(above_equal, &miss_force_generic); } - __ j(above_equal, &miss_force_generic); - __ StoreNumberToDoubleElements(eax, - edi, - ecx, - edx, - xmm0, - &transition_elements_kind, - true); + __ bind(&finish_store); + __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0, + &transition_elements_kind, true); __ ret(0); // Handle store cache miss, replacing the ic with the generic stub. @@ -3837,6 +3923,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ jmp(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Handle transition requiring the array to grow. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. Flags are already set by previous + // compare. + __ j(not_equal, &miss_force_generic); + + // Transition on values that can't be stored in a FixedDoubleArray. + Label value_is_smi; + __ JumpIfSmi(eax, &value_is_smi); + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map()))); + __ j(not_equal, &transition_elements_kind); + __ bind(&value_is_smi); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array())); + __ j(not_equal, &check_capacity); + + int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT); + // Restore the key, which is known to be the array length. + __ mov(ecx, Immediate(0)); + + // eax: value + // ecx: key + // edx: receiver + // edi: elements + // Initialize the new FixedDoubleArray. Leave elements unitialized for + // efficiency, they are guaranteed to be initialized before use. + __ mov(FieldOperand(edi, JSObject::kMapOffset), + Immediate(masm->isolate()->factory()->fixed_double_array_map())); + __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset), + Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + + // Install the new backing store in the JSArray. + __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi); + __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ add(FieldOperand(edx, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); + __ jmp(&finish_store); + + __ bind(&check_capacity); + // eax: value + // ecx: key + // edx: receiver + // edi: elements + // Make sure that the backing store can hold additional elements. + __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); + __ j(above_equal, &slow); + + // Grow the array and finish the store. + __ add(FieldOperand(edx, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ jmp(&finish_store); + + __ bind(&prepare_slow); + // Restore the key, which is known to be the array length. + __ mov(ecx, Immediate(0)); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ jmp(ic_slow, RelocInfo::CODE_TARGET); + } } diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h index 4daf944434..6a86921a41 100644 --- a/deps/v8/src/ic-inl.h +++ b/deps/v8/src/ic-inl.h @@ -79,19 +79,20 @@ Code* IC::GetTargetAtAddress(Address address) { void IC::SetTargetAtAddress(Address address, Code* target) { ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub()); + Code* old_target = GetTargetAtAddress(address); #ifdef DEBUG // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark // ICs as strict mode. The strict-ness of the IC must be preserved. - Code* old_target = GetTargetAtAddress(address); if (old_target->kind() == Code::STORE_IC || old_target->kind() == Code::KEYED_STORE_IC) { - ASSERT(old_target->extra_ic_state() == target->extra_ic_state()); + ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) == + Code::GetStrictMode(target->extra_ic_state())); } #endif Assembler::set_target_address_at(address, target->instruction_start()); target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address, target); - PostPatching(); + PostPatching(address, target, old_target); } diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 9846984dd3..642a9e2731 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -81,9 +81,13 @@ void IC::TraceIC(const char* type, } } JavaScriptFrame::PrintTop(stdout, false, true); - PrintF(" (%c->%c)", + bool new_can_grow = + Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) == + ALLOW_JSARRAY_GROWTH; + PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state), - TransitionMarkFromState(new_state)); + TransitionMarkFromState(new_state), + new_can_grow ? ".GROW" : ""); name->Print(); PrintF("]\n"); } @@ -292,7 +296,32 @@ Failure* IC::ReferenceError(const char* type, Handle<String> name) { } -void IC::PostPatching() { +void IC::PostPatching(Address address, Code* target, Code* old_target) { + if (FLAG_type_info_threshold > 0) { + if (old_target->is_inline_cache_stub() && + target->is_inline_cache_stub()) { + State old_state = old_target->ic_state(); + State new_state = target->ic_state(); + bool was_uninitialized = + old_state == UNINITIALIZED || old_state == PREMONOMORPHIC; + bool is_uninitialized = + new_state == UNINITIALIZED || new_state == PREMONOMORPHIC; + int delta = 0; + if (was_uninitialized && !is_uninitialized) { + delta = 1; + } else if (!was_uninitialized && is_uninitialized) { + delta = -1; + } + if (delta != 0) { + Code* host = target->GetHeap()->isolate()-> + inner_pointer_to_code_cache()->GetCacheEntry(address)->code; + TypeFeedbackInfo* info = + TypeFeedbackInfo::cast(host->type_feedback_info()); + info->set_ic_with_typeinfo_count( + info->ic_with_typeinfo_count() + delta); + } + } + } if (FLAG_watch_ic_patching) { Isolate::Current()->runtime_profiler()->NotifyICChanged(); // We do not want to optimize until the ICs have settled down, @@ -309,7 +338,9 @@ void IC::PostPatching() { if (raw_frame->is_java_script()) { JSFunction* function = JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function()); - function->shared()->set_profiler_ticks(0); + if (function->IsOptimized()) continue; + SharedFunctionInfo* shared = function->shared(); + shared->set_profiler_ticks(0); } it.Advance(); } @@ -375,7 +406,7 @@ void LoadIC::Clear(Address address, Code* target) { void StoreIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; SetTargetAtAddress(address, - (target->extra_ic_state() == kStrictMode) + (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode) ? initialize_stub_strict() : initialize_stub()); } @@ -384,7 +415,7 @@ void StoreIC::Clear(Address address, Code* target) { void KeyedStoreIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; SetTargetAtAddress(address, - (target->extra_ic_state() == kStrictMode) + (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode) ? initialize_stub_strict() : initialize_stub()); } @@ -996,19 +1027,22 @@ void LoadIC::UpdateCaches(LookupResult* lookup, Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck( bool is_js_array, - ElementsKind elements_kind) { + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) { + ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH); return KeyedLoadElementStub(elements_kind).GetCode(); } Handle<Code> KeyedLoadIC::ComputePolymorphicStub( MapHandleList* receiver_maps, - StrictModeFlag strict_mode) { + StrictModeFlag strict_mode, + KeyedAccessGrowMode growth_mode) { CodeHandleList handler_ics(receiver_maps->length()); for (int i = 0; i < receiver_maps->length(); ++i) { Handle<Map> receiver_map = receiver_maps->at(i); Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck( - receiver_map, strict_mode); + receiver_map, strict_mode, growth_mode); handler_ics.Add(cached_stub); } KeyedLoadStubCompiler compiler(isolate()); @@ -1493,12 +1527,9 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver, StrictModeFlag strict_mode, Handle<Code> generic_stub) { State ic_state = target()->ic_state(); - if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) && - !IsTransitionStubKind(stub_kind)) { - return ComputeMonomorphicStub( - receiver, stub_kind, strict_mode, generic_stub); - } - ASSERT(target() != *generic_stub); + KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind) + ? ALLOW_JSARRAY_GROWTH + : DO_NOT_ALLOW_JSARRAY_GROWTH; // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS // via megamorphic stubs, since they don't have a map in their relocation info @@ -1508,15 +1539,39 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver, return generic_stub; } - // Determine the list of receiver maps that this call site has seen, - // adding the map that was just encountered. + bool monomorphic = false; MapHandleList target_receiver_maps; - Handle<Map> receiver_map(receiver->map()); - if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { - target_receiver_maps.Add(receiver_map); - } else { + if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) { GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps); } + if (!IsTransitionStubKind(stub_kind)) { + if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { + monomorphic = true; + } else { + if (ic_state == MONOMORPHIC) { + // The first time a receiver is seen that is a transitioned version of + // the previous monomorphic receiver type, assume the new ElementsKind + // is the monomorphic type. This benefits global arrays that only + // transition once, and all call sites accessing them are faster if they + // remain monomorphic. If this optimistic assumption is not true, the IC + // will miss again and it will become polymorphic and support both the + // untransitioned and transitioned maps. + monomorphic = IsMoreGeneralElementsKindTransition( + target_receiver_maps.at(0)->elements_kind(), + receiver->GetElementsKind()); + } + } + } + + if (monomorphic) { + return ComputeMonomorphicStub( + receiver, stub_kind, strict_mode, generic_stub); + } + ASSERT(target() != *generic_stub); + + // Determine the list of receiver maps that this call site has seen, + // adding the map that was just encountered. + Handle<Map> receiver_map(receiver->map()); bool map_added = AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map); if (IsTransitionStubKind(stub_kind)) { @@ -1537,14 +1592,21 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver, return generic_stub; } + if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) == + ALLOW_JSARRAY_GROWTH)) { + grow_mode = ALLOW_JSARRAY_GROWTH; + } + Handle<PolymorphicCodeCache> cache = isolate()->factory()->polymorphic_code_cache(); - Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, strict_mode); + Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode, + strict_mode); + Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state); Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags); if (probe->IsCode()) return Handle<Code>::cast(probe); Handle<Code> stub = - ComputePolymorphicStub(&target_receiver_maps, strict_mode); + ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode); PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub); return stub; } @@ -1552,7 +1614,8 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver, Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck( Handle<Map> receiver_map, - StrictModeFlag strict_mode) { + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode) { if ((receiver_map->instance_type() & kNotStringTag) == 0) { ASSERT(!string_stub().is_null()); return string_stub(); @@ -1564,7 +1627,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck( receiver_map->has_external_array_elements()); bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; return GetElementStubWithoutMapCheck(is_js_array, - receiver_map->elements_kind()); + receiver_map->elements_kind(), + grow_mode); } } @@ -1591,9 +1655,12 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver, switch (stub_kind) { case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT: case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT: + case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT: + case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT: return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS); break; case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE: + case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE: return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS); break; default: @@ -1605,13 +1672,16 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver, Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck( bool is_js_array, - ElementsKind elements_kind) { - return KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) { + return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode(); } -Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps, - StrictModeFlag strict_mode) { +Handle<Code> KeyedStoreIC::ComputePolymorphicStub( + MapHandleList* receiver_maps, + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode) { // Collect MONOMORPHIC stubs for all target_receiver_maps. CodeHandleList handler_ics(receiver_maps->length()); MapHandleList transitioned_maps(receiver_maps->length()); @@ -1625,16 +1695,17 @@ Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps, receiver_map->elements_kind(), // original elements_kind transitioned_map->elements_kind(), receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array - strict_mode).GetCode(); + strict_mode, grow_mode).GetCode(); } else { cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map, - strict_mode); + strict_mode, + grow_mode); } ASSERT(!cached_stub.is_null()); handler_ics.Add(cached_stub); transitioned_maps.Add(transitioned_map); } - KeyedStoreStubCompiler compiler(isolate(), strict_mode); + KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode); Handle<Code> code = compiler.CompileStorePolymorphic( receiver_maps, &handler_ics, &transitioned_maps); isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); @@ -1644,6 +1715,48 @@ Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps, } +KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver, + Handle<Object> key, + Handle<Object> value) { + ASSERT(key->IsSmi()); + int index = Smi::cast(*key)->value(); + bool allow_growth = receiver->IsJSArray() && + JSArray::cast(*receiver)->length()->IsSmi() && + index >= Smi::cast(JSArray::cast(*receiver)->length())->value(); + + if (allow_growth) { + // Handle growing array in stub if necessary. + if (receiver->HasFastSmiOnlyElements()) { + if (value->IsHeapNumber()) { + return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE; + } + if (value->IsHeapObject()) { + return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT; + } + } else if (receiver->HasFastDoubleElements()) { + if (!value->IsSmi() && !value->IsHeapNumber()) { + return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT; + } + } + return STORE_AND_GROW_NO_TRANSITION; + } else { + // Handle only in-bounds elements accesses. + if (receiver->HasFastSmiOnlyElements()) { + if (value->IsHeapNumber()) { + return STORE_TRANSITION_SMI_TO_DOUBLE; + } else if (value->IsHeapObject()) { + return STORE_TRANSITION_SMI_TO_OBJECT; + } + } else if (receiver->HasFastDoubleElements()) { + if (!value->IsSmi() && !value->IsHeapNumber()) { + return STORE_TRANSITION_DOUBLE_TO_OBJECT; + } + } + return STORE_NO_TRANSITION; + } +} + + MaybeObject* KeyedStoreIC::Store(State state, StrictModeFlag strict_mode, Handle<Object> object, @@ -1706,18 +1819,7 @@ MaybeObject* KeyedStoreIC::Store(State state, stub = non_strict_arguments_stub(); } else if (!force_generic) { if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { - StubKind stub_kind = STORE_NO_TRANSITION; - if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) { - if (value->IsHeapNumber()) { - stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE; - } else if (value->IsHeapObject()) { - stub_kind = STORE_TRANSITION_SMI_TO_OBJECT; - } - } else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) { - if (!value->IsSmi() && !value->IsHeapNumber()) { - stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT; - } - } + StubKind stub_kind = GetStubKind(receiver, key, value); stub = ComputeStub(receiver, stub_kind, strict_mode, stub); } } else { @@ -1900,7 +2002,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) { IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); return ic.Store(state, - static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), + Code::GetStrictMode(extra_ic_state), args.at<Object>(0), args.at<String>(1), args.at<Object>(2)); @@ -1976,7 +2078,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) { IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); return ic.Store(state, - static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), + Code::GetStrictMode(extra_ic_state), args.at<Object>(0), args.at<Object>(1), args.at<Object>(2), @@ -1992,8 +2094,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) { Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); - StrictModeFlag strict_mode = - static_cast<StrictModeFlag>(extra_ic_state & kStrictMode); + StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state); return Runtime::SetObjectProperty(isolate, object, key, @@ -2010,7 +2111,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) { IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); return ic.Store(state, - static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), + Code::GetStrictMode(extra_ic_state), args.at<Object>(0), args.at<Object>(1), args.at<Object>(2), diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index d2c98c0869..56625525d4 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -165,7 +165,7 @@ class IC { // Access the target code for the given IC address. static inline Code* GetTargetAtAddress(Address address); static inline void SetTargetAtAddress(Address address, Code* target); - static void PostPatching(); + static void PostPatching(Address address, Code* target, Code* old_target); private: // Frame pointer for the frame that uses (calls) the IC. @@ -377,14 +377,48 @@ class KeyedIC: public IC { STORE_NO_TRANSITION, STORE_TRANSITION_SMI_TO_OBJECT, STORE_TRANSITION_SMI_TO_DOUBLE, - STORE_TRANSITION_DOUBLE_TO_OBJECT + STORE_TRANSITION_DOUBLE_TO_OBJECT, + STORE_AND_GROW_NO_TRANSITION, + STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT, + STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE, + STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT }; + + static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - + STORE_NO_TRANSITION; + STATIC_ASSERT(kGrowICDelta == + STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT - + STORE_TRANSITION_SMI_TO_OBJECT); + STATIC_ASSERT(kGrowICDelta == + STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE - + STORE_TRANSITION_SMI_TO_DOUBLE); + STATIC_ASSERT(kGrowICDelta == + STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT - + STORE_TRANSITION_DOUBLE_TO_OBJECT); + explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {} virtual ~KeyedIC() {} + static inline KeyedAccessGrowMode GetGrowModeFromStubKind( + StubKind stub_kind) { + return (stub_kind >= STORE_AND_GROW_NO_TRANSITION) + ? ALLOW_JSARRAY_GROWTH + : DO_NOT_ALLOW_JSARRAY_GROWTH; + } + + static inline StubKind GetGrowStubKind(StubKind stub_kind) { + ASSERT(stub_kind != LOAD); + if (stub_kind < STORE_AND_GROW_NO_TRANSITION) { + stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) + + kGrowICDelta); + } + return stub_kind; + } + virtual Handle<Code> GetElementStubWithoutMapCheck( bool is_js_array, - ElementsKind elements_kind) = 0; + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) = 0; protected: virtual Handle<Code> string_stub() { @@ -398,12 +432,15 @@ class KeyedIC: public IC { StrictModeFlag strict_mode, Handle<Code> default_stub); - virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, - StrictModeFlag strict_mode) = 0; + virtual Handle<Code> ComputePolymorphicStub( + MapHandleList* receiver_maps, + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode) = 0; Handle<Code> ComputeMonomorphicStubWithoutMapCheck( Handle<Map> receiver_map, - StrictModeFlag strict_mode); + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode); private: void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result); @@ -417,7 +454,12 @@ class KeyedIC: public IC { StubKind stub_kind); static bool IsTransitionStubKind(StubKind stub_kind) { - return stub_kind > STORE_NO_TRANSITION; + return stub_kind > STORE_NO_TRANSITION && + stub_kind != STORE_AND_GROW_NO_TRANSITION; + } + + static bool IsGrowStubKind(StubKind stub_kind) { + return stub_kind >= STORE_AND_GROW_NO_TRANSITION; } }; @@ -456,7 +498,8 @@ class KeyedLoadIC: public KeyedIC { virtual Handle<Code> GetElementStubWithoutMapCheck( bool is_js_array, - ElementsKind elements_kind); + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode); virtual bool IsGeneric() const { return target() == *generic_stub(); @@ -466,7 +509,8 @@ class KeyedLoadIC: public KeyedIC { virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; } virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, - StrictModeFlag strict_mode); + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode); virtual Handle<Code> string_stub() { return isolate()->builtins()->KeyedLoadIC_String(); @@ -540,8 +584,8 @@ class StoreIC: public IC { void set_target(Code* code) { // Strict mode must be preserved across IC patching. - ASSERT((code->extra_ic_state() & kStrictMode) == - (target()->extra_ic_state() & kStrictMode)); + ASSERT(Code::GetStrictMode(code->extra_ic_state()) == + Code::GetStrictMode(target()->extra_ic_state())); IC::set_target(code); } @@ -603,7 +647,8 @@ class KeyedStoreIC: public KeyedIC { virtual Handle<Code> GetElementStubWithoutMapCheck( bool is_js_array, - ElementsKind elements_kind); + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode); virtual bool IsGeneric() const { return target() == *generic_stub() || @@ -614,7 +659,8 @@ class KeyedStoreIC: public KeyedIC { virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; } virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, - StrictModeFlag strict_mode); + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode); private: // Update the inline cache. @@ -627,8 +673,8 @@ class KeyedStoreIC: public KeyedIC { void set_target(Code* code) { // Strict mode must be preserved across IC patching. - ASSERT((code->extra_ic_state() & kStrictMode) == - (target()->extra_ic_state() & kStrictMode)); + ASSERT(Code::GetStrictMode(code->extra_ic_state()) == + Code::GetStrictMode(target()->extra_ic_state())); IC::set_target(code); } @@ -659,6 +705,10 @@ class KeyedStoreIC: public KeyedIC { static void Clear(Address address, Code* target); + StubKind GetStubKind(Handle<JSObject> receiver, + Handle<Object> key, + Handle<Object> value); + friend class IC; }; diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index 96c45b1360..128136faa0 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -775,10 +775,12 @@ void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) { HandleScope scope; Handle<JSObject> receiver_handle(receiver); Handle<Object> data(AccessCheckInfo::cast(data_obj)->data()); - thread_local_top()->failed_access_check_callback_( - v8::Utils::ToLocal(receiver_handle), - type, - v8::Utils::ToLocal(data)); + { VMState state(this, EXTERNAL); + thread_local_top()->failed_access_check_callback_( + v8::Utils::ToLocal(receiver_handle), + type, + v8::Utils::ToLocal(data)); + } } diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index 5612630c04..80edee9c7b 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -333,8 +333,6 @@ class HashMap; typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache; #define ISOLATE_INIT_LIST(V) \ - /* AssertNoZoneAllocation state. */ \ - V(bool, zone_allow_allocation, true) \ /* SerializerDeserializer state. */ \ V(int, serialize_partial_snapshot_cache_length, 0) \ /* Assembler state. */ \ diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 18b86bafee..82b495819e 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -3597,22 +3597,20 @@ void RegExpEngine::DotPrint(const char* label, // ------------------------------------------------------------------- // Tree to graph conversion -static const int kSpaceRangeCount = 20; -static const int kSpaceRangeAsciiCount = 4; -static const uc16 kSpaceRanges[kSpaceRangeCount] = { 0x0009, 0x000D, 0x0020, - 0x0020, 0x00A0, 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, - 0x2028, 0x2029, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000 }; - -static const int kWordRangeCount = 8; -static const uc16 kWordRanges[kWordRangeCount] = { '0', '9', 'A', 'Z', '_', - '_', 'a', 'z' }; - -static const int kDigitRangeCount = 2; -static const uc16 kDigitRanges[kDigitRangeCount] = { '0', '9' }; - -static const int kLineTerminatorRangeCount = 6; -static const uc16 kLineTerminatorRanges[kLineTerminatorRangeCount] = { 0x000A, - 0x000A, 0x000D, 0x000D, 0x2028, 0x2029 }; +static const uc16 kSpaceRanges[] = { 0x0009, 0x000D, 0x0020, 0x0020, 0x00A0, + 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, 0x2028, 0x2029, + 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000, 0xFEFF, 0xFEFF }; +static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges); + +static const uc16 kWordRanges[] = { '0', '9', 'A', 'Z', '_', '_', 'a', 'z' }; +static const int kWordRangeCount = ARRAY_SIZE(kWordRanges); + +static const uc16 kDigitRanges[] = { '0', '9' }; +static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges); + +static const uc16 kLineTerminatorRanges[] = { 0x000A, 0x000A, 0x000D, 0x000D, + 0x2028, 0x2029 }; +static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges); RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler, RegExpNode* on_success) { diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 2cab9a7665..20003f05cb 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -110,9 +110,9 @@ bool UsePosition::RegisterIsBeneficial() const { } -void UseInterval::SplitAt(LifetimePosition pos) { +void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) { ASSERT(Contains(pos) && pos.Value() != start().Value()); - UseInterval* after = new UseInterval(pos, end_); + UseInterval* after = new(zone) UseInterval(pos, end_); after->next_ = next_; next_ = after; end_ = pos; @@ -149,7 +149,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const { #endif -LiveRange::LiveRange(int id) +LiveRange::LiveRange(int id, Zone* zone) : id_(id), spilled_(false), is_double_(false), @@ -161,24 +161,26 @@ LiveRange::LiveRange(int id) next_(NULL), current_interval_(NULL), last_processed_use_(NULL), - spill_operand_(new LOperand()), + spill_operand_(new(zone) LOperand()), spill_start_index_(kMaxInt) { } -void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) { +void LiveRange::set_assigned_register(int reg, + RegisterKind register_kind, + Zone* zone) { ASSERT(!HasRegisterAssigned() && !IsSpilled()); assigned_register_ = reg; is_double_ = (register_kind == DOUBLE_REGISTERS); - ConvertOperands(); + ConvertOperands(zone); } -void LiveRange::MakeSpilled() { +void LiveRange::MakeSpilled(Zone* zone) { ASSERT(!IsSpilled()); ASSERT(TopLevel()->HasAllocatedSpillOperand()); spilled_ = true; assigned_register_ = kInvalidAssignment; - ConvertOperands(); + ConvertOperands(zone); } @@ -246,7 +248,7 @@ UsePosition* LiveRange::FirstPosWithHint() const { } -LOperand* LiveRange::CreateAssignedOperand() { +LOperand* LiveRange::CreateAssignedOperand(Zone* zone) { LOperand* op = NULL; if (HasRegisterAssigned()) { ASSERT(!IsSpilled()); @@ -260,7 +262,7 @@ LOperand* LiveRange::CreateAssignedOperand() { op = TopLevel()->GetSpillOperand(); ASSERT(!op->IsUnallocated()); } else { - LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE); + LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE); unalloc->set_virtual_register(id_); op = unalloc; } @@ -292,7 +294,9 @@ void LiveRange::AdvanceLastProcessedMarker( } -void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) { +void LiveRange::SplitAt(LifetimePosition position, + LiveRange* result, + Zone* zone) { ASSERT(Start().Value() < position.Value()); ASSERT(result->IsEmpty()); // Find the last interval that ends before the position. If the @@ -311,7 +315,7 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) { while (current != NULL) { if (current->Contains(position)) { - current->SplitAt(position); + current->SplitAt(position, zone); break; } UseInterval* next = current->next(); @@ -404,7 +408,9 @@ void LiveRange::ShortenTo(LifetimePosition start) { } -void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) { +void LiveRange::EnsureInterval(LifetimePosition start, + LifetimePosition end, + Zone* zone) { LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n", id_, start.Value(), @@ -418,7 +424,7 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) { first_interval_ = first_interval_->next(); } - UseInterval* new_interval = new UseInterval(start, new_end); + UseInterval* new_interval = new(zone) UseInterval(start, new_end); new_interval->next_ = first_interval_; first_interval_ = new_interval; if (new_interval->next() == NULL) { @@ -427,20 +433,22 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) { } -void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) { +void LiveRange::AddUseInterval(LifetimePosition start, + LifetimePosition end, + Zone* zone) { LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_, start.Value(), end.Value()); if (first_interval_ == NULL) { - UseInterval* interval = new UseInterval(start, end); + UseInterval* interval = new(zone) UseInterval(start, end); first_interval_ = interval; last_interval_ = interval; } else { if (end.Value() == first_interval_->start().Value()) { first_interval_->set_start(start); } else if (end.Value() < first_interval_->start().Value()) { - UseInterval* interval = new UseInterval(start, end); + UseInterval* interval = new(zone) UseInterval(start, end); interval->set_next(first_interval_); first_interval_ = interval; } else { @@ -456,11 +464,12 @@ void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) { UsePosition* LiveRange::AddUsePosition(LifetimePosition pos, - LOperand* operand) { + LOperand* operand, + Zone* zone) { LAllocator::TraceAlloc("Add to live range %d use position %d\n", id_, pos.Value()); - UsePosition* use_pos = new UsePosition(pos, operand); + UsePosition* use_pos = new(zone) UsePosition(pos, operand); UsePosition* prev = NULL; UsePosition* current = first_pos_; while (current != NULL && current->pos().Value() < pos.Value()) { @@ -480,8 +489,8 @@ UsePosition* LiveRange::AddUsePosition(LifetimePosition pos, } -void LiveRange::ConvertOperands() { - LOperand* op = CreateAssignedOperand(); +void LiveRange::ConvertOperands(Zone* zone) { + LOperand* op = CreateAssignedOperand(zone); UsePosition* use_pos = first_pos(); while (use_pos != NULL) { ASSERT(Start().Value() <= use_pos->pos().Value() && @@ -545,8 +554,8 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) { LAllocator::LAllocator(int num_values, HGraph* graph) - : chunk_(NULL), - allocation_ok_(true), + : zone_(graph->zone()), + chunk_(NULL), live_in_sets_(graph->blocks()->length()), live_ranges_(num_values * 2), fixed_live_ranges_(NULL), @@ -560,7 +569,8 @@ LAllocator::LAllocator(int num_values, HGraph* graph) mode_(GENERAL_REGISTERS), num_registers_(-1), graph_(graph), - has_osr_entry_(false) {} + has_osr_entry_(false), + allocation_ok_(true) { } void LAllocator::InitializeLivenessAnalysis() { @@ -574,7 +584,7 @@ void LAllocator::InitializeLivenessAnalysis() { BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) { // Compute live out for the given block, except not including backward // successor edges. - BitVector* live_out = new BitVector(next_virtual_register_); + BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_); // Process all successor blocks. for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) { @@ -612,7 +622,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block, while (!iterator.Done()) { int operand_index = iterator.Current(); LiveRange* range = LiveRangeFor(operand_index); - range->AddUseInterval(start, end); + range->AddUseInterval(start, end, zone_); iterator.Advance(); } } @@ -654,9 +664,9 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) { ASSERT(index < Register::kNumAllocatableRegisters); LiveRange* result = fixed_live_ranges_[index]; if (result == NULL) { - result = new LiveRange(FixedLiveRangeID(index)); + result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_); ASSERT(result->IsFixed()); - result->set_assigned_register(index, GENERAL_REGISTERS); + result->set_assigned_register(index, GENERAL_REGISTERS, zone_); fixed_live_ranges_[index] = result; } return result; @@ -667,9 +677,9 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) { ASSERT(index < DoubleRegister::kNumAllocatableRegisters); LiveRange* result = fixed_double_live_ranges_[index]; if (result == NULL) { - result = new LiveRange(FixedDoubleLiveRangeID(index)); + result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_); ASSERT(result->IsFixed()); - result->set_assigned_register(index, DOUBLE_REGISTERS); + result->set_assigned_register(index, DOUBLE_REGISTERS, zone_); fixed_double_live_ranges_[index] = result; } return result; @@ -682,7 +692,7 @@ LiveRange* LAllocator::LiveRangeFor(int index) { } LiveRange* result = live_ranges_[index]; if (result == NULL) { - result = new LiveRange(index); + result = new(zone_) LiveRange(index, zone_); live_ranges_[index] = result; } return result; @@ -728,15 +738,15 @@ void LAllocator::Define(LifetimePosition position, if (range->IsEmpty() || range->Start().Value() > position.Value()) { // Can happen if there is a definition without use. - range->AddUseInterval(position, position.NextInstruction()); - range->AddUsePosition(position.NextInstruction(), NULL); + range->AddUseInterval(position, position.NextInstruction(), zone_); + range->AddUsePosition(position.NextInstruction(), NULL, zone_); } else { range->ShortenTo(position); } if (operand->IsUnallocated()) { LUnallocated* unalloc_operand = LUnallocated::cast(operand); - range->AddUsePosition(position, unalloc_operand)->set_hint(hint); + range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint); } } @@ -749,9 +759,9 @@ void LAllocator::Use(LifetimePosition block_start, if (range == NULL) return; if (operand->IsUnallocated()) { LUnallocated* unalloc_operand = LUnallocated::cast(operand); - range->AddUsePosition(position, unalloc_operand)->set_hint(hint); + range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint); } - range->AddUseInterval(block_start, position); + range->AddUseInterval(block_start, position, zone_); } @@ -860,7 +870,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first, if (RequiredRegisterKind(input_copy->virtual_register()) == DOUBLE_REGISTERS) { double_artificial_registers_.Add( - cur_input->virtual_register() - first_artificial_register_); + cur_input->virtual_register() - first_artificial_register_, + zone_); } AddConstraintsGapMove(gap_index, input_copy, cur_input); @@ -964,7 +975,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) { output->index() != i) { LiveRange* range = FixedLiveRangeFor(i); range->AddUseInterval(curr_position, - curr_position.InstructionEnd()); + curr_position.InstructionEnd(), + zone_); } } } @@ -975,7 +987,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) { output->index() != i) { LiveRange* range = FixedDoubleLiveRangeFor(i); range->AddUseInterval(curr_position, - curr_position.InstructionEnd()); + curr_position.InstructionEnd(), + zone_); } } } @@ -1023,7 +1036,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) { const ZoneList<HPhi*>* phis = block->phis(); for (int i = 0; i < phis->length(); ++i) { HPhi* phi = phis->at(i); - LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE); + LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE); phi_operand->set_virtual_register(phi->id()); for (int j = 0; j < phi->OperandCount(); ++j) { HValue* op = phi->OperandAt(j); @@ -1033,7 +1046,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) { operand = chunk_->DefineConstantOperand(constant); } else { ASSERT(!op->EmitAtUses()); - LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY); + LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY); unalloc->set_virtual_register(op->id()); operand = unalloc; } @@ -1140,8 +1153,8 @@ void LAllocator::ResolveControlFlow(LiveRange* range, if (cur_cover->IsSpilled()) return; ASSERT(pred_cover != NULL && cur_cover != NULL); if (pred_cover != cur_cover) { - LOperand* pred_op = pred_cover->CreateAssignedOperand(); - LOperand* cur_op = cur_cover->CreateAssignedOperand(); + LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_); + LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_); if (!pred_op->Equals(cur_op)) { LGap* gap = NULL; if (block->predecessors()->length() == 1) { @@ -1213,8 +1226,8 @@ void LAllocator::ConnectRanges() { } if (should_insert) { LParallelMove* move = GetConnectingParallelMove(pos); - LOperand* prev_operand = first_range->CreateAssignedOperand(); - LOperand* cur_operand = second_range->CreateAssignedOperand(); + LOperand* prev_operand = first_range->CreateAssignedOperand(zone_); + LOperand* cur_operand = second_range->CreateAssignedOperand(zone_); move->AddMove(prev_operand, cur_operand); } } @@ -1317,7 +1330,7 @@ void LAllocator::BuildLiveRanges() { while (!iterator.Done()) { int operand_index = iterator.Current(); LiveRange* range = LiveRangeFor(operand_index); - range->EnsureInterval(start, end); + range->EnsureInterval(start, end, zone_); iterator.Advance(); } @@ -1438,7 +1451,7 @@ void LAllocator::PopulatePointerMaps() { TraceAlloc("Pointer in register for range %d (start at %d) " "at safe point %d\n", cur->id(), cur->Start().Value(), safe_point); - LOperand* operand = cur->CreateAssignedOperand(); + LOperand* operand = cur->CreateAssignedOperand(zone_); ASSERT(!operand->IsStackSlot()); map->RecordPointer(operand); } @@ -1810,7 +1823,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { TraceAlloc("Assigning preferred reg %s to live range %d\n", RegisterName(register_index), current->id()); - current->set_assigned_register(register_index, mode_); + current->set_assigned_register(register_index, mode_, zone_); return true; } } @@ -1846,7 +1859,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg), current->id()); - current->set_assigned_register(reg, mode_); + current->set_assigned_register(reg, mode_, zone_); return true; } @@ -1936,7 +1949,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg), current->id()); - current->set_assigned_register(reg, mode_); + current->set_assigned_register(reg, mode_, zone_); // This register was not free. Thus we need to find and spill // parts of active and inactive live regions that use the same register @@ -2003,7 +2016,7 @@ LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) { LiveRange* result = LiveRangeFor(GetVirtualRegister()); if (!AllocationOk()) return NULL; - range->SplitAt(pos, result); + range->SplitAt(pos, result, zone_); return result; } @@ -2102,7 +2115,7 @@ void LAllocator::Spill(LiveRange* range) { if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS); first->SetSpillOperand(op); } - range->MakeSpilled(); + range->MakeSpilled(zone_); } diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h index 43a48cc61b..f5ab055ab3 100644 --- a/deps/v8/src/lithium-allocator.h +++ b/deps/v8/src/lithium-allocator.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -216,7 +216,7 @@ class UseInterval: public ZoneObject { // Split this interval at the given position without effecting the // live range that owns it. The interval must contain the position. - void SplitAt(LifetimePosition pos); + void SplitAt(LifetimePosition pos, Zone* zone); // If this interval intersects with other return smallest position // that belongs to both of them. @@ -277,7 +277,7 @@ class LiveRange: public ZoneObject { public: static const int kInvalidAssignment = 0x7fffffff; - explicit LiveRange(int id); + LiveRange(int id, Zone* zone); UseInterval* first_interval() const { return first_interval_; } UsePosition* first_pos() const { return first_pos_; } @@ -288,11 +288,13 @@ class LiveRange: public ZoneObject { int id() const { return id_; } bool IsFixed() const { return id_ < 0; } bool IsEmpty() const { return first_interval() == NULL; } - LOperand* CreateAssignedOperand(); + LOperand* CreateAssignedOperand(Zone* zone); int assigned_register() const { return assigned_register_; } int spill_start_index() const { return spill_start_index_; } - void set_assigned_register(int reg, RegisterKind register_kind); - void MakeSpilled(); + void set_assigned_register(int reg, + RegisterKind register_kind, + Zone* zone); + void MakeSpilled(Zone* zone); // Returns use position in this live range that follows both start // and last processed use position. @@ -316,7 +318,7 @@ class LiveRange: public ZoneObject { // the range. // All uses following the given position will be moved from this // live range to the result live range. - void SplitAt(LifetimePosition position, LiveRange* result); + void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone); bool IsDouble() const { return is_double_; } bool HasRegisterAssigned() const { @@ -355,9 +357,15 @@ class LiveRange: public ZoneObject { LifetimePosition FirstIntersection(LiveRange* other); // Add a new interval or a new use position to this live range. - void EnsureInterval(LifetimePosition start, LifetimePosition end); - void AddUseInterval(LifetimePosition start, LifetimePosition end); - UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand); + void EnsureInterval(LifetimePosition start, + LifetimePosition end, + Zone* zone); + void AddUseInterval(LifetimePosition start, + LifetimePosition end, + Zone* zone); + UsePosition* AddUsePosition(LifetimePosition pos, + LOperand* operand, + Zone* zone); // Shorten the most recently added interval by setting a new start. void ShortenTo(LifetimePosition start); @@ -369,7 +377,7 @@ class LiveRange: public ZoneObject { #endif private: - void ConvertOperands(); + void ConvertOperands(Zone* zone); UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const; void AdvanceLastProcessedMarker(UseInterval* to_start_of, LifetimePosition but_not_past) const; @@ -400,8 +408,8 @@ class GrowableBitVector BASE_EMBEDDED { return bits_->Contains(value); } - void Add(int value) { - EnsureCapacity(value); + void Add(int value, Zone* zone) { + EnsureCapacity(value, zone); bits_->Add(value); } @@ -412,11 +420,11 @@ class GrowableBitVector BASE_EMBEDDED { return bits_ != NULL && bits_->length() > value; } - void EnsureCapacity(int value) { + void EnsureCapacity(int value, Zone* zone) { if (InBitsRange(value)) return; int new_length = bits_ == NULL ? kInitialLength : bits_->length(); while (new_length <= value) new_length *= 2; - BitVector* new_bits = new BitVector(new_length); + BitVector* new_bits = new(zone) BitVector(new_length, zone); if (bits_ != NULL) new_bits->CopyFrom(*bits_); bits_ = new_bits; } @@ -587,10 +595,9 @@ class LAllocator BASE_EMBEDDED { inline LGap* GapAt(int index); - LChunk* chunk_; + Zone* zone_; - // Indicates success or failure during register allocation. - bool allocation_ok_; + LChunk* chunk_; // During liveness analysis keep a mapping from block id to live_in sets // for blocks already analyzed. @@ -621,6 +628,9 @@ class LAllocator BASE_EMBEDDED { bool has_osr_entry_; + // Indicates success or failure during register allocation. + bool allocation_ok_; + DISALLOW_COPY_AND_ASSIGN(LAllocator); }; diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index 4987b5a4cd..474e555fec 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -453,11 +453,10 @@ class LEnvironment: public ZoneObject { parameter_count_(parameter_count), pc_offset_(-1), values_(value_count), - is_tagged_(value_count), + is_tagged_(value_count, closure->GetHeap()->isolate()->zone()), spilled_registers_(NULL), spilled_double_registers_(NULL), - outer_(outer) { - } + outer_(outer) { } Handle<JSFunction> closure() const { return closure_; } int arguments_stack_height() const { return arguments_stack_height_; } diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 7d4bbbc0ca..11e2217e07 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -36,13 +36,6 @@ enum InvokeFlag { }; -// Types of uncatchable exceptions. -enum UncatchableExceptionType { - OUT_OF_MEMORY, - TERMINATION -}; - - // Invalid depth in prototype chain. const int kInvalidProtoDepth = -1; diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 1adb74745f..9d83d90fbf 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -1176,11 +1176,15 @@ class StaticMarkingVisitor : public StaticVisitorBase { Heap* heap = map->GetHeap(); Code* code = reinterpret_cast<Code*>(object); if (FLAG_cleanup_code_caches_at_gc) { - TypeFeedbackCells* type_feedback_cells = code->type_feedback_cells(); - for (int i = 0; i < type_feedback_cells->CellCount(); i++) { - ASSERT(type_feedback_cells->AstId(i)->IsSmi()); - JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i); - cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap)); + Object* raw_info = code->type_feedback_info(); + if (raw_info->IsTypeFeedbackInfo()) { + TypeFeedbackCells* type_feedback_cells = + TypeFeedbackInfo::cast(raw_info)->type_feedback_cells(); + for (int i = 0; i < type_feedback_cells->CellCount(); i++) { + ASSERT(type_feedback_cells->AstId(i)->IsSmi()); + JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i); + cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap)); + } } } code->CodeIterateBody<StaticMarkingVisitor>(heap); diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js index f4426f4a00..8e735c4a68 100644 --- a/deps/v8/src/math.js +++ b/deps/v8/src/math.js @@ -29,15 +29,15 @@ // Keep reference to original values of some global properties. This // has the added benefit that the code in this file is isolated from // changes to these properties. -const $floor = MathFloor; -const $random = MathRandom; -const $abs = MathAbs; +var $floor = MathFloor; +var $random = MathRandom; +var $abs = MathAbs; // Instance class name can only be set on functions. That is the only // purpose for MathConstructor. function MathConstructor() {} %FunctionSetInstanceClassName(MathConstructor, 'Math'); -const $Math = new MathConstructor(); +var $Math = new MathConstructor(); $Math.__proto__ = $Object.prototype; %SetProperty(global, "Math", $Math, DONT_ENUM); diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index cd4add4bcb..e641133dda 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -25,17 +25,16 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // ------------------------------------------------------------------- // // If this object gets passed to an error constructor the error will // get an accessor for .message that constructs a descriptive error // message on access. -const kAddMessageAccessorsMarker = { }; +var kAddMessageAccessorsMarker = { }; // This will be lazily initialized when first needed (and forcibly // overwritten even though it's const). -const kMessages = 0; +var kMessages = 0; function FormatString(format, message) { var args = %MessageGetArguments(message); @@ -603,7 +602,7 @@ function SourceLocation(script, position, line, column, start, end) { this.end = end; } -const kLineLengthLimit = 78; +var kLineLengthLimit = 78; /** * Restrict source location start and end positions to make the source slice @@ -748,18 +747,18 @@ function DefineOneShotAccessor(obj, name, fun) { // can't rely on 'this' being the same as 'obj'. var hasBeenSet = false; var value; - function getter() { + var getter = function() { if (hasBeenSet) { return value; } hasBeenSet = true; value = fun(obj); return value; - } - function setter(v) { + }; + var setter = function(v) { hasBeenSet = true; value = v; - } + }; %DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM); %DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM); } @@ -1090,7 +1089,7 @@ function captureStackTrace(obj, cons_opt) { function SetUpError() { // Define special error type constructors. - function DefineError(f) { + var DefineError = function(f) { // Store the error function in both the global object // and the runtime object. The function is fetched // from the runtime object when throwing errors from @@ -1106,7 +1105,7 @@ function SetUpError() { // However, it can't be an instance of the Error object because // it hasn't been properly configured yet. Instead we create a // special not-a-true-error-but-close-enough object. - function ErrorPrototype() {} + var ErrorPrototype = function() {}; %FunctionSetPrototype(ErrorPrototype, $Object.prototype); %FunctionSetInstanceClassName(ErrorPrototype, 'Error'); %FunctionSetPrototype(f, new ErrorPrototype()); @@ -1148,7 +1147,7 @@ function SetUpError() { } }); %SetNativeFlag(f); - } + }; DefineError(function Error() { }); DefineError(function TypeError() { }); @@ -1167,8 +1166,8 @@ $Error.captureStackTrace = captureStackTrace; // Global list of error objects visited during ErrorToString. This is // used to detect cycles in error toString formatting. -const visited_errors = new InternalArray(); -const cyclic_error_marker = new $Object(); +var visited_errors = new InternalArray(); +var cyclic_error_marker = new $Object(); function ErrorToStringDetectCycle(error) { if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker; @@ -1213,4 +1212,4 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]); // Boilerplate for exceptions for stack overflows. Used from // Isolate::StackOverflow(). -const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []); +var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []); diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 259df211bd..cc11235824 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -321,7 +321,7 @@ static void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Counters* counters = masm->isolate()->counters(); Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, - has_non_smi_element; + has_non_smi_element, finish, cant_transition_map, not_double; // Check for array construction with zero arguments or one. __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg)); @@ -417,14 +417,16 @@ static void ArrayNativeCode(MacroAssembler* masm, __ mov(t3, sp); __ bind(&loop); __ lw(a2, MemOperand(t3)); - __ Addu(t3, t3, kPointerSize); if (FLAG_smi_only_arrays) { __ JumpIfNotSmi(a2, &has_non_smi_element); } + __ Addu(t3, t3, kPointerSize); __ Addu(t1, t1, -kPointerSize); __ sw(a2, MemOperand(t1)); __ bind(&entry); __ Branch(&loop, lt, t0, Operand(t1)); + + __ bind(&finish); __ mov(sp, t3); // Remove caller arguments and receiver from the stack, setup return value and @@ -437,8 +439,39 @@ static void ArrayNativeCode(MacroAssembler* masm, __ Ret(); __ bind(&has_non_smi_element); + // Double values are handled by the runtime. + __ CheckMap( + a2, t5, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK); + __ bind(&cant_transition_map); __ UndoAllocationInNewSpace(a3, t0); - __ b(call_generic_code); + __ Branch(call_generic_code); + + __ bind(¬_double); + // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // a3: JSArray + __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + FAST_ELEMENTS, + a2, + t5, + &cant_transition_map); + __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); + __ RecordWriteField(a3, + HeapObject::kMapOffset, + a2, + t5, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + Label loop2; + __ bind(&loop2); + __ lw(a2, MemOperand(t3)); + __ Addu(t3, t3, kPointerSize); + __ Subu(t1, t1, kPointerSize); + __ sw(a2, MemOperand(t1)); + __ Branch(&loop2, lt, t0, Operand(t1)); + __ Branch(&finish); } diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 852b3c9e66..de6fb953c4 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -3580,6 +3580,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void InterruptStub::Generate(MacroAssembler* masm) { + __ TailCallRuntime(Runtime::kInterrupt, 0, 1); +} + + void MathPowStub::Generate(MacroAssembler* masm) { CpuFeatures::Scope fpu_scope(FPU); const Register base = a1; @@ -3832,17 +3837,6 @@ void CEntryStub::GenerateAheadOfTime() { } -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - __ Throw(v0); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - __ ThrowUncatchable(type, v0); -} - - void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -4033,13 +4027,27 @@ void CEntryStub::Generate(MacroAssembler* masm) { true); __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + // Set external caught exception to false. + Isolate* isolate = masm->isolate(); + ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, + isolate); + __ li(a0, Operand(false, RelocInfo::NONE)); + __ li(a2, Operand(external_caught)); + __ sw(a0, MemOperand(a2)); + + // Set pending exception and v0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); + __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + __ sw(v0, MemOperand(a2)); + // Fall through to the next label. __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); + __ ThrowUncatchable(v0); __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); + __ Throw(v0); } @@ -5133,10 +5141,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label termination_exception; __ Branch(&termination_exception, eq, v0, Operand(a0)); - __ Throw(v0); // Expects thrown value in v0. + __ Throw(v0); __ bind(&termination_exception); - __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0. + __ ThrowUncatchable(v0); __ bind(&failure); // For failure and exception return null. @@ -6058,25 +6066,23 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is // safe in this case. - __ UntagAndJumpIfSmi(a2, a2, &runtime); - __ UntagAndJumpIfSmi(a3, a3, &runtime); - + __ UntagAndJumpIfNotSmi(a2, a2, &runtime); + __ UntagAndJumpIfNotSmi(a3, a3, &runtime); // Both a2 and a3 are untagged integers. __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. - __ subu(a2, t5, a3); - __ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to. + __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to. + __ Subu(a2, a2, a3); // Make sure first argument is a string. __ lw(v0, MemOperand(sp, kStringOffset)); - __ Branch(&runtime, eq, v0, Operand(kSmiTagMask)); - + __ JumpIfSmi(v0, &runtime); __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); - __ And(t4, v0, Operand(kIsNotStringMask)); + __ And(t0, a1, Operand(kIsNotStringMask)); - __ Branch(&runtime, ne, t4, Operand(zero_reg)); + __ Branch(&runtime, ne, t0, Operand(zero_reg)); // Short-cut for the case of trivial substring. Label return_v0; @@ -7326,11 +7332,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { { a2, a1, a3, EMIT_REMEMBERED_SET }, { a3, a1, a2, EMIT_REMEMBERED_SET }, // KeyedStoreStubCompiler::GenerateStoreFastElement. - { t0, a2, a3, EMIT_REMEMBERED_SET }, + { a3, a2, t0, EMIT_REMEMBERED_SET }, + { a2, a3, t0, EMIT_REMEMBERED_SET }, // ElementsTransitionGenerator::GenerateSmiOnlyToObject // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { a2, a3, t5, EMIT_REMEMBERED_SET }, + { a2, a3, t5, OMIT_REMEMBERED_SET }, // ElementsTransitionGenerator::GenerateDoubleToObject { t2, a2, a0, EMIT_REMEMBERED_SET }, { a2, t2, t5, EMIT_REMEMBERED_SET }, diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index c48432c702..d7bddaf125 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -89,13 +89,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( // -- a3 : target map, scratch for subsequent call // -- t0 : scratch (elements) // ----------------------------------- - Label loop, entry, convert_hole, gc_required; + Label loop, entry, convert_hole, gc_required, only_change_map, done; bool fpu_supported = CpuFeatures::IsSupported(FPU); - __ push(ra); Register scratch = t6; + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); + __ Branch(&only_change_map, eq, at, Operand(t0)); + + __ push(ra); __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); // t0: source FixedArray // t1: number of elements (smi-tagged) @@ -118,7 +123,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( t5, kRAHasBeenSaved, kDontSaveFPRegs, - EMIT_REMEMBERED_SET, + OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); // Replace receiver's backing store with newly created FixedDoubleArray. __ Addu(a3, t2, Operand(kHeapObjectTag)); @@ -149,6 +154,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( __ Branch(&entry); + __ bind(&only_change_map); + __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ RecordWriteField(a2, + HeapObject::kMapOffset, + a3, + t5, + kRAHasBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Branch(&done); + // Call into runtime if GC is required. __ bind(&gc_required); __ pop(ra); @@ -201,6 +218,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( if (!fpu_supported) __ Pop(a1, a0); __ pop(ra); + __ bind(&done); } @@ -214,10 +232,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // -- a3 : target map, scratch for subsequent call // -- t0 : scratch (elements) // ----------------------------------- - Label entry, loop, convert_hole, gc_required; - __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); + Label entry, loop, convert_hole, gc_required, only_change_map; + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); + __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); + __ Branch(&only_change_map, eq, at, Operand(t0)); + + __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); + __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); // t0: source FixedArray // t1: number of elements (smi-tagged) @@ -289,16 +313,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ Branch(&loop, lt, a3, Operand(t1)); __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); - // Update receiver's map. - __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); - __ RecordWriteField(a2, - HeapObject::kMapOffset, - a3, - t5, - kRAHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); // Replace receiver's backing store with newly created and filled FixedArray. __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); __ RecordWriteField(a2, @@ -310,6 +324,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ pop(ra); + + __ bind(&only_change_map); + // Update receiver's map. + __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ RecordWriteField(a2, + HeapObject::kMapOffset, + a3, + t5, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 26a406333f..78720f4403 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -36,9 +36,6 @@ namespace v8 { namespace internal { -const int Deoptimizer::table_entry_size_ = 32; - - int Deoptimizer::patch_size() { const int kCallInstructionSizeInWords = 4; return kCallInstructionSizeInWords * Assembler::kInstrSize; @@ -839,32 +836,55 @@ void Deoptimizer::EntryGenerator::Generate() { } +// Maximum size of a table entry generated below. +const int Deoptimizer::table_entry_size_ = 12 * Assembler::kInstrSize; + void Deoptimizer::TableEntryGenerator::GeneratePrologue() { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); // Create a sequence of deoptimization entries. Note that any // registers may be still live. - - Label done; + Label table_start; + __ bind(&table_start); for (int i = 0; i < count(); i++) { - int start = masm()->pc_offset(); - USE(start); + Label start; + __ bind(&start); if (type() != EAGER) { // Emulate ia32 like call by pushing return address to stack. - __ push(ra); + __ addiu(sp, sp, -3 * kPointerSize); + __ sw(ra, MemOperand(sp, 2 * kPointerSize)); + } else { + __ addiu(sp, sp, -2 * kPointerSize); } - __ li(at, Operand(i)); - __ push(at); - __ Branch(&done); + // Using ori makes sure only one instruction is generated. This will work + // as long as the number of deopt entries is below 2^16. + __ ori(at, zero_reg, i); + __ sw(at, MemOperand(sp, kPointerSize)); + __ sw(ra, MemOperand(sp, 0)); + // This branch instruction only jumps over one instruction, and that is + // executed in the delay slot. The result is that execution is linear but + // the ra register is updated. + __ bal(1); + // Jump over the remaining deopt entries (including this one). + // Only include the remaining part of the current entry in the calculation. + const int remaining_entries = (count() - i) * table_entry_size_; + const int cur_size = masm()->SizeOfCodeGeneratedSince(&start); + // ra points to the instruction after the delay slot. Adjust by 4. + __ Addu(at, ra, remaining_entries - cur_size - Assembler::kInstrSize); + __ lw(ra, MemOperand(sp, 0)); + __ jr(at); // Expose delay slot. + __ addiu(sp, sp, kPointerSize); // In delay slot. // Pad the rest of the code. - while (table_entry_size_ > (masm()->pc_offset() - start)) { + while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) { __ nop(); } - ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start); + ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); } - __ bind(&done); + + ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), + count() * table_entry_size_); } #undef __ diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index 201742efec..fd0f487978 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -133,10 +133,8 @@ class JumpPatchSite BASE_EMBEDDED { // // The function builds a JS frame. Please see JavaScriptFrameConstants in // frames-mips.h for its layout. -void FullCodeGenerator::Generate(CompilationInfo* info) { - ASSERT(info_ == NULL); - info_ = info; - scope_ = info->scope(); +void FullCodeGenerator::Generate() { + CompilationInfo* info = info_; handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); SetFunctionPosition(function()); @@ -149,6 +147,27 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { } #endif + // We can optionally optimize based on counters rather than statistical + // sampling. + if (info->ShouldSelfOptimize()) { + if (FLAG_trace_opt_verbose) { + PrintF("[adding self-optimization header to %s]\n", + *info->function()->debug_name()->ToCString()); + } + MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell( + Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt)); + JSGlobalPropertyCell* cell; + if (maybe_cell->To(&cell)) { + __ li(a2, Handle<JSGlobalPropertyCell>(cell)); + __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); + __ Subu(a3, a3, Operand(Smi::FromInt(1))); + __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); + Handle<Code> compile_stub( + isolate()->builtins()->builtin(Builtins::kLazyRecompile)); + __ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg)); + } + } + // Strict mode functions and builtins need to replace the receiver // with undefined when called as functions (without an explicit // receiver object). t1 is zero for method calls and non-zero for @@ -274,11 +293,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // For named function expressions, declare the function name as a // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { - int ignored = 0; VariableProxy* proxy = scope()->function(); ASSERT(proxy->var()->mode() == CONST || proxy->var()->mode() == CONST_HARMONY); - EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored); + ASSERT(proxy->var()->location() != Variable::UNALLOCATED); + EmitDeclaration(proxy, proxy->var()->mode(), NULL); } VisitDeclarations(scope()->declarations()); } @@ -315,7 +334,8 @@ void FullCodeGenerator::ClearAccumulator() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { +void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, + Label* back_edge_target) { // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need // to make sure it is constant. Branch may emit a skip-or-jump sequence // instead of the normal Branch. It seems that the "skip" part of that @@ -716,8 +736,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, VariableMode mode, - FunctionLiteral* function, - int* global_count) { + FunctionLiteral* function) { // If it was not possible to allocate the variable at compile time, we // need to "declare" it at runtime to make sure it actually exists in the // local context. @@ -726,7 +745,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, (mode == CONST || mode == CONST_HARMONY || mode == LET); switch (variable->location()) { case Variable::UNALLOCATED: - ++(*global_count); + ++global_count_; break; case Variable::PARAMETER: @@ -814,9 +833,6 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, } -void FullCodeGenerator::VisitDeclaration(Declaration* decl) { } - - void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { // Call the runtime to declare the globals. // The context is the first argument. @@ -1098,7 +1114,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ Addu(a0, a0, Operand(Smi::FromInt(1))); __ push(a0); - EmitStackCheck(stmt); + EmitStackCheck(stmt, &loop); __ Branch(&loop); // Remove the pointers stored on the stack. @@ -1516,7 +1532,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Smi::FromInt(0))); __ push(a1); VisitForStackValue(value); - __ CallRuntime(Runtime::kDefineAccessor, 4); + __ li(a0, Operand(Smi::FromInt(NONE))); + __ push(a0); + __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); break; } } diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index aead65c440..252e8f4478 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -2764,6 +2764,15 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { } +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs()); + __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); + // The context is the first argument. + __ Push(cp, scratch0(), scratch1()); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); @@ -4272,26 +4281,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, ASSERT(!source.is(a2)); ASSERT(!result.is(a2)); + // Only elements backing stores for non-COW arrays need to be copied. + Handle<FixedArrayBase> elements(object->elements()); + bool has_elements = elements->length() > 0 && + elements->map() != isolate()->heap()->fixed_cow_array_map(); + // Increase the offset so that subsequent objects end up right after - // this one. - int current_offset = *offset; - int size = object->map()->instance_size(); - *offset += size; + // this object and its backing store. + int object_offset = *offset; + int object_size = object->map()->instance_size(); + int elements_offset = *offset + object_size; + int elements_size = has_elements ? elements->Size() : 0; + *offset += object_size + elements_size; // Copy object header. ASSERT(object->properties()->length() == 0); - ASSERT(object->elements()->length() == 0 || - object->elements()->map() == isolate()->heap()->fixed_cow_array_map()); int inobject_properties = object->map()->inobject_properties(); - int header_size = size - inobject_properties * kPointerSize; + int header_size = object_size - inobject_properties * kPointerSize; for (int i = 0; i < header_size; i += kPointerSize) { - __ lw(a2, FieldMemOperand(source, i)); - __ sw(a2, FieldMemOperand(result, current_offset + i)); + if (has_elements && i == JSObject::kElementsOffset) { + __ Addu(a2, result, Operand(elements_offset)); + } else { + __ lw(a2, FieldMemOperand(source, i)); + } + __ sw(a2, FieldMemOperand(result, object_offset + i)); } // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { - int total_offset = current_offset + object->GetInObjectPropertyOffset(i); + int total_offset = object_offset + object->GetInObjectPropertyOffset(i); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); @@ -4307,10 +4325,42 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ sw(a2, FieldMemOperand(result, total_offset)); } } + + + // Copy elements backing store header. + ASSERT(!has_elements || elements->IsFixedArray()); + if (has_elements) { + __ LoadHeapObject(source, elements); + for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { + __ lw(a2, FieldMemOperand(source, i)); + __ sw(a2, FieldMemOperand(result, elements_offset + i)); + } + } + + // Copy elements backing store content. + ASSERT(!has_elements || elements->IsFixedArray()); + int elements_length = has_elements ? elements->length() : 0; + for (int i = 0; i < elements_length; i++) { + int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); + Handle<Object> value = JSObject::GetElement(object, i); + if (value->IsJSObject()) { + Handle<JSObject> value_object = Handle<JSObject>::cast(value); + __ Addu(a2, result, Operand(*offset)); + __ sw(a2, FieldMemOperand(result, total_offset)); + __ LoadHeapObject(source, value_object); + EmitDeepCopy(value_object, result, source, offset); + } else if (value->IsHeapObject()) { + __ LoadHeapObject(a2, Handle<HeapObject>::cast(value)); + __ sw(a2, FieldMemOperand(result, total_offset)); + } else { + __ li(a2, Operand(value)); + __ sw(a2, FieldMemOperand(result, total_offset)); + } + } } -void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { +void LCodeGen::DoFastLiteral(LFastLiteral* instr) { int size = instr->hydrogen()->total_size(); // Allocate all objects that are part of the literal in one big @@ -4332,14 +4382,14 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { } -void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { +void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { ASSERT(ToRegister(instr->result()).is(v0)); - + Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); - __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset)); + // Set up the parameters to the stub/runtime call. + __ LoadHeapObject(t0, literals); __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); __ li(a2, Operand(constant_properties)); int flags = instr->hydrogen()->fast_elements() @@ -4348,7 +4398,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { __ li(a1, Operand(Smi::FromInt(flags))); __ Push(t0, a3, a2, a1); - // Pick the right runtime function to call. + // Pick the right runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index 0bc222339f..c534abcb65 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -1124,6 +1124,11 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { } +LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { + return MarkAsCall(new LDeclareGlobals, instr); +} + + LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LOperand* context = UseRegisterAtStart(instr->value()); return DefineAsRegister(new LGlobalObject(context)); @@ -2093,19 +2098,18 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { } -LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { - return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr); +LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { + return MarkAsCall(DefineFixed(new LFastLiteral, v0), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr); +LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { + return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralGeneric( - HObjectLiteralGeneric* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr); +LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { + return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr); } diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index 0a21649fa8..f4c3c21276 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -87,11 +87,13 @@ class LCodeGen; V(ConstantI) \ V(ConstantT) \ V(Context) \ + V(DeclareGlobals) \ V(DeleteProperty) \ V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ V(ElementsKind) \ + V(FastLiteral) \ V(FixedArrayBaseLength) \ V(FunctionLiteral) \ V(GetCachedArrayIndex) \ @@ -134,8 +136,7 @@ class LCodeGen; V(NumberTagD) \ V(NumberTagI) \ V(NumberUntagD) \ - V(ObjectLiteralFast) \ - V(ObjectLiteralGeneric) \ + V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ @@ -1346,6 +1347,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> { }; +class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") + DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) +}; + + class LGlobalObject: public LTemplateInstruction<1, 1, 0> { public: explicit LGlobalObject(LOperand* context) { @@ -1909,24 +1917,24 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { }; -class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { +class LFastLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") - DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) + DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") + DECLARE_HYDROGEN_ACCESSOR(FastLiteral) }; -class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> { +class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) + DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") + DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) }; -class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> { +class LObjectLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) + DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") + DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) }; diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index f4e043a7b2..7a733bca5b 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -2679,8 +2679,7 @@ void MacroAssembler::Throw(Register value) { } -void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, - Register value) { +void MacroAssembler::ThrowUncatchable(Register value) { // Adjust this code if not the case. STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); @@ -2690,24 +2689,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); // The exception is expected in v0. - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, - isolate()); - li(a0, Operand(false, RelocInfo::NONE)); - li(a2, Operand(external_caught)); - sw(a0, MemOperand(a2)); - - // Set pending exception and v0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); - li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate()))); - sw(v0, MemOperand(a2)); - } else if (!value.is(v0)) { + if (!value.is(v0)) { mov(v0, value); } - // Drop the stack pointer to the top of the top stack handler. li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); lw(sp, MemOperand(a3)); diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 69b3f9d63a..56a3433b86 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -871,12 +871,12 @@ class MacroAssembler: public Assembler { // Must preserve the result register. void PopTryHandler(); - // Passes thrown value (in v0) to the handler of top of the try handler chain. + // Passes thrown value to the handler of top of the try handler chain. void Throw(Register value); // Propagates an uncatchable exception to the top of the current JS stack's // handler chain. - void ThrowUncatchable(UncatchableExceptionType type, Register value); + void ThrowUncatchable(Register value); // Copies a fixed number of fields of heap objects from src to dst. void CopyFields(Register dst, Register src, RegList temps, int field_count); diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc index a158f045f5..f31ce7ea48 100644 --- a/deps/v8/src/mips/simulator-mips.cc +++ b/deps/v8/src/mips/simulator-mips.cc @@ -1369,9 +1369,9 @@ void Simulator::WriteB(int32_t addr, int8_t value) { // Returns the limit of the stack area to enable checking for stack overflows. uintptr_t Simulator::StackLimit() const { - // Leave a safety margin of 512 bytes to prevent overrunning the stack when + // Leave a safety margin of 1024 bytes to prevent overrunning the stack when // pushing values. - return reinterpret_cast<uintptr_t>(stack_) + 512; + return reinterpret_cast<uintptr_t>(stack_) + 1024; } diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index ae563069f8..b9ab2422ae 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -3058,7 +3058,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( ElementsKind elements_kind = receiver_map->elements_kind(); bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; Handle<Code> stub = - KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); + KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode(); __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK); @@ -4168,7 +4168,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, - ElementsKind elements_kind) { + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -4177,15 +4178,17 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // -- a3 : scratch // -- a4 : scratch (elements) // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, grow, slow; + Label finish_store, check_capacity; Register value_reg = a0; Register key_reg = a1; Register receiver_reg = a2; - Register scratch = a3; - Register elements_reg = t0; - Register scratch2 = t1; - Register scratch3 = t2; + Register scratch = t0; + Register elements_reg = a3; + Register length_reg = t1; + Register scratch2 = t2; + Register scratch3 = t3; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -4193,26 +4196,35 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi. __ JumpIfNotSmi(key_reg, &miss_force_generic); - // Get the elements array and make sure it is a fast element array, not 'cow'. - __ lw(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ CheckMap(elements_reg, - scratch, - Heap::kFixedArrayMapRootIndex, - &miss_force_generic, - DONT_DO_SMI_CHECK); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(value_reg, &transition_elements_kind); + } // Check that the key is within bounds. + __ lw(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); if (is_js_array) { __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis. - __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + __ Branch(&grow, hs, key_reg, Operand(scratch)); + } else { + __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); + } + + // Make sure elements is a fast element array, not 'cow'. + __ CheckMap(elements_reg, + scratch, + Heap::kFixedArrayMapRootIndex, + &miss_force_generic, + DONT_DO_SMI_CHECK); + + __ bind(&finish_store); if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(value_reg, &transition_elements_kind); __ Addu(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4249,12 +4261,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ Jump(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Grow the array by a single element if possible. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. + __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch)); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ lw(length_reg, + FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ lw(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); + __ Branch(&check_capacity, ne, elements_reg, Operand(at)); + + int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow, + TAG_OBJECT); + + __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); + __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset)); + __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) { + __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i))); + } + + // Store the element at index zero. + __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0))); + + // Install the new backing store in the JSArray. + __ sw(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg, + scratch, kRAHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ li(length_reg, Operand(Smi::FromInt(1))); + __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ Ret(); + + __ bind(&check_capacity); + // Check for cow elements, in general they are not handled by this stub + __ CheckMap(elements_reg, + scratch, + Heap::kFixedCOWArrayMapRootIndex, + &miss_force_generic, + DONT_DO_SMI_CHECK); + + __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); + __ Branch(&slow, hs, length_reg, Operand(scratch)); + + // Grow the array and finish the store. + __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1))); + __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ jmp(&finish_store); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ Jump(ic_slow, RelocInfo::CODE_TARGET); + } } void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, - bool is_js_array) { + bool is_js_array, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -4266,7 +4345,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- t2 : scratch (exponent_reg) // -- t3 : scratch4 // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, grow, slow; + Label finish_store, check_capacity; Register value_reg = a0; Register key_reg = a1; @@ -4276,6 +4356,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register scratch2 = t1; Register scratch3 = t2; Register scratch4 = t3; + Register length_reg = t3; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -4293,7 +4374,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1)); + if (grow_mode == ALLOW_JSARRAY_GROWTH) { + __ Branch(&grow, hs, key_reg, Operand(scratch1)); + } else { + __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1)); + } + + __ bind(&finish_store); __ StoreNumberToDoubleElements(value_reg, key_reg, @@ -4317,6 +4404,71 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ Jump(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Grow the array by a single element if possible. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. + __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1)); + + // Transition on values that can't be stored in a FixedDoubleArray. + Label value_is_smi; + __ JumpIfSmi(value_reg, &value_is_smi); + __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + __ Branch(&transition_elements_kind, ne, scratch1, Operand(at)); + __ bind(&value_is_smi); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ lw(length_reg, + FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ lw(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); + __ Branch(&check_capacity, ne, elements_reg, Operand(at)); + + int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, + TAG_OBJECT); + + // Initialize the new FixedDoubleArray. Leave elements unitialized for + // efficiency, they are guaranteed to be initialized before use. + __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); + __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); + __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + __ sw(scratch1, + FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + + // Install the new backing store in the JSArray. + __ sw(elements_reg, + FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); + __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg, + scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ li(length_reg, Operand(Smi::FromInt(1))); + __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ jmp(&finish_store); + + __ bind(&check_capacity); + // Make sure that the backing store can hold additional elements. + __ lw(scratch1, + FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + __ Branch(&slow, hs, length_reg, Operand(scratch1)); + + // Grow the array and finish the store. + __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1))); + __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ jmp(&finish_store); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ Jump(ic_slow, RelocInfo::CODE_TARGET); + } } diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js index 0944b719f1..168a12d3aa 100644 --- a/deps/v8/src/mirror-debugger.js +++ b/deps/v8/src/mirror-debugger.js @@ -144,32 +144,32 @@ function inherits(ctor, superCtor) { // Type names of the different mirrors. -const UNDEFINED_TYPE = 'undefined'; -const NULL_TYPE = 'null'; -const BOOLEAN_TYPE = 'boolean'; -const NUMBER_TYPE = 'number'; -const STRING_TYPE = 'string'; -const OBJECT_TYPE = 'object'; -const FUNCTION_TYPE = 'function'; -const REGEXP_TYPE = 'regexp'; -const ERROR_TYPE = 'error'; -const PROPERTY_TYPE = 'property'; -const FRAME_TYPE = 'frame'; -const SCRIPT_TYPE = 'script'; -const CONTEXT_TYPE = 'context'; -const SCOPE_TYPE = 'scope'; +var UNDEFINED_TYPE = 'undefined'; +var NULL_TYPE = 'null'; +var BOOLEAN_TYPE = 'boolean'; +var NUMBER_TYPE = 'number'; +var STRING_TYPE = 'string'; +var OBJECT_TYPE = 'object'; +var FUNCTION_TYPE = 'function'; +var REGEXP_TYPE = 'regexp'; +var ERROR_TYPE = 'error'; +var PROPERTY_TYPE = 'property'; +var FRAME_TYPE = 'frame'; +var SCRIPT_TYPE = 'script'; +var CONTEXT_TYPE = 'context'; +var SCOPE_TYPE = 'scope'; // Maximum length when sending strings through the JSON protocol. -const kMaxProtocolStringLength = 80; +var kMaxProtocolStringLength = 80; // Different kind of properties. -PropertyKind = {}; +var PropertyKind = {}; PropertyKind.Named = 1; PropertyKind.Indexed = 2; // A copy of the PropertyType enum from global.h -PropertyType = {}; +var PropertyType = {}; PropertyType.Normal = 0; PropertyType.Field = 1; PropertyType.ConstantFunction = 2; @@ -183,7 +183,7 @@ PropertyType.NullDescriptor = 9; // Different attributes for a property. -PropertyAttribute = {}; +var PropertyAttribute = {}; PropertyAttribute.None = NONE; PropertyAttribute.ReadOnly = READ_ONLY; PropertyAttribute.DontEnum = DONT_ENUM; @@ -191,12 +191,12 @@ PropertyAttribute.DontDelete = DONT_DELETE; // A copy of the scope types from runtime.cc. -ScopeType = { Global: 0, - Local: 1, - With: 2, - Closure: 3, - Catch: 4, - Block: 5 }; +var ScopeType = { Global: 0, + Local: 1, + With: 2, + Closure: 3, + Catch: 4, + Block: 5 }; // Mirror hierarchy: @@ -1237,24 +1237,24 @@ PropertyMirror.prototype.isNative = function() { }; -const kFrameDetailsFrameIdIndex = 0; -const kFrameDetailsReceiverIndex = 1; -const kFrameDetailsFunctionIndex = 2; -const kFrameDetailsArgumentCountIndex = 3; -const kFrameDetailsLocalCountIndex = 4; -const kFrameDetailsSourcePositionIndex = 5; -const kFrameDetailsConstructCallIndex = 6; -const kFrameDetailsAtReturnIndex = 7; -const kFrameDetailsFlagsIndex = 8; -const kFrameDetailsFirstDynamicIndex = 9; +var kFrameDetailsFrameIdIndex = 0; +var kFrameDetailsReceiverIndex = 1; +var kFrameDetailsFunctionIndex = 2; +var kFrameDetailsArgumentCountIndex = 3; +var kFrameDetailsLocalCountIndex = 4; +var kFrameDetailsSourcePositionIndex = 5; +var kFrameDetailsConstructCallIndex = 6; +var kFrameDetailsAtReturnIndex = 7; +var kFrameDetailsFlagsIndex = 8; +var kFrameDetailsFirstDynamicIndex = 9; -const kFrameDetailsNameIndex = 0; -const kFrameDetailsValueIndex = 1; -const kFrameDetailsNameValueSize = 2; +var kFrameDetailsNameIndex = 0; +var kFrameDetailsValueIndex = 1; +var kFrameDetailsNameValueSize = 2; -const kFrameDetailsFlagDebuggerFrameMask = 1 << 0; -const kFrameDetailsFlagOptimizedFrameMask = 1 << 1; -const kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2; +var kFrameDetailsFlagDebuggerFrameMask = 1 << 0; +var kFrameDetailsFlagOptimizedFrameMask = 1 << 1; +var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2; /** * Wrapper for the frame details information retreived from the VM. The frame @@ -1732,8 +1732,8 @@ FrameMirror.prototype.toText = function(opt_locals) { }; -const kScopeDetailsTypeIndex = 0; -const kScopeDetailsObjectIndex = 1; +var kScopeDetailsTypeIndex = 0; +var kScopeDetailsObjectIndex = 1; function ScopeDetails(frame, index) { this.break_id_ = frame.break_id_; diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 3a667a4398..6f009970da 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -280,7 +280,9 @@ void JSObject::JSObjectVerify() { (map()->inobject_properties() + properties()->length() - map()->NextFreePropertyIndex())); } - ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()), + ASSERT_EQ((map()->has_fast_elements() || + map()->has_fast_smi_only_elements() || + (elements() == GetHeap()->empty_fixed_array())), (elements()->map() == GetHeap()->fixed_array_map() || elements()->map() == GetHeap()->fixed_cow_array_map())); ASSERT(map()->has_fast_elements() == HasFastElements()); @@ -324,6 +326,13 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() { } +void TypeFeedbackInfo::TypeFeedbackInfoVerify() { + VerifyObjectField(kIcTotalCountOffset); + VerifyObjectField(kIcWithTypeinfoCountOffset); + VerifyHeapPointer(type_feedback_cells()); +} + + void FixedArray::FixedArrayVerify() { for (int i = 0; i < length(); i++) { Object* e = get(i); diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index a5ea659b60..326c088e04 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -1339,11 +1339,12 @@ void JSObject::set_map_and_elements(Map* new_map, } } ASSERT((map()->has_fast_elements() || - map()->has_fast_smi_only_elements()) == + map()->has_fast_smi_only_elements() || + (value == GetHeap()->empty_fixed_array())) == (value->map() == GetHeap()->fixed_array_map() || value->map() == GetHeap()->fixed_cow_array_map())); - ASSERT(map()->has_fast_double_elements() == - value->IsFixedDoubleArray()); + ASSERT((value == GetHeap()->empty_fixed_array()) || + (map()->has_fast_double_elements() == value->IsFixedDoubleArray())); WRITE_FIELD(this, kElementsOffset, value); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); } @@ -2052,16 +2053,6 @@ void DescriptorArray::Set(int descriptor_number, } -void DescriptorArray::CopyFrom(int index, - DescriptorArray* src, - int src_index, - const WhitenessWitness& witness) { - Descriptor desc; - src->Get(src_index, &desc); - Set(index, &desc, witness); -} - - void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors( int first, int second) { NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second)); @@ -3715,8 +3706,9 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, kNameShouldPrintAsAnonymous) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous) -BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft, - kDontCrankshaft) +BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction) +BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize, + kDontOptimize) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline) ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset) @@ -3946,13 +3938,17 @@ MaybeObject* JSFunction::set_initial_map_and_cache_transitions( Map* new_double_map = NULL; if (!maybe_map->To<Map>(&new_double_map)) return maybe_map; new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS); - initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS, new_double_map); + maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS, + new_double_map); + if (maybe_map->IsFailure()) return maybe_map; maybe_map = new_double_map->CopyDropTransitions(); Map* new_object_map = NULL; if (!maybe_map->To<Map>(&new_object_map)) return maybe_map; new_object_map->set_elements_kind(FAST_ELEMENTS); - new_double_map->AddElementsTransition(FAST_ELEMENTS, new_object_map); + maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS, + new_object_map); + if (maybe_map->IsFailure()) return maybe_map; global_context->set_smi_js_array_map(initial_map); global_context->set_double_js_array_map(new_double_map); @@ -4127,8 +4123,7 @@ INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset) ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset) ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset) ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset) -ACCESSORS(Code, type_feedback_cells, TypeFeedbackCells, - kTypeFeedbackCellsOffset) +ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset) ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset) @@ -4804,6 +4799,13 @@ Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) { } +SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset) +SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count, + kIcWithTypeinfoCountOffset) +ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells, + kTypeFeedbackCellsOffset) + + Relocatable::Relocatable(Isolate* isolate) { ASSERT(isolate == Isolate::Current()); isolate_ = isolate; diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index 67adad6919..d6e8920838 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -554,6 +554,15 @@ void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) { } +void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) { + HeapObject::PrintHeader(out, "TypeFeedbackInfo"); + PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d", + ic_total_count(), ic_with_typeinfo_count()); + PrintF(out, "\n - type_feedback_cells: "); + type_feedback_cells()->FixedArrayPrint(out); +} + + void FixedArray::FixedArrayPrint(FILE* out) { HeapObject::PrintHeader(out, "FixedArray"); PrintF(out, " - length: %d", length()); diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h index 880b44b50e..627d1bc2ef 100644 --- a/deps/v8/src/objects-visiting-inl.h +++ b/deps/v8/src/objects-visiting-inl.h @@ -109,7 +109,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) { IteratePointer(v, kRelocationInfoOffset); IteratePointer(v, kHandlerTableOffset); IteratePointer(v, kDeoptimizationDataOffset); - IteratePointer(v, kTypeFeedbackCellsOffset); + IteratePointer(v, kTypeFeedbackInfoOffset); RelocIterator it(this, mode_mask); for (; !it.done(); it.next()) { @@ -141,7 +141,7 @@ void Code::CodeIterateBody(Heap* heap) { reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset)); StaticVisitor::VisitPointer( heap, - reinterpret_cast<Object**>(this->address() + kTypeFeedbackCellsOffset)); + reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset)); RelocIterator it(this, mode_mask); for (; !it.done(); it.next()) { diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 284b631b13..85ba646005 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -1756,7 +1756,7 @@ MaybeObject* JSObject::AddProperty(String* name, Heap* heap = GetHeap(); if (!map_of_this->is_extensible()) { if (strict_mode == kNonStrictMode) { - return heap->undefined_value(); + return value; } else { Handle<Object> args[1] = {Handle<String>(name)}; return heap->isolate()->Throw( @@ -3379,12 +3379,10 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, } else { property_count += 2; // Make space for two more properties. } - Object* obj; - { MaybeObject* maybe_obj = - StringDictionary::Allocate(property_count); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + StringDictionary* dictionary; + { MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; } - StringDictionary* dictionary = StringDictionary::cast(obj); DescriptorArray* descs = map_of_this->instance_descriptors(); for (int i = 0; i < descs->number_of_descriptors(); i++) { @@ -3394,36 +3392,31 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, details.index()); Object* value = descs->GetConstantFunction(i); - Object* result; - { MaybeObject* maybe_result = - dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - dictionary = StringDictionary::cast(result); + MaybeObject* maybe_dictionary = + dictionary->Add(descs->GetKey(i), value, d); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; break; } case FIELD: { PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, details.index()); Object* value = FastPropertyAt(descs->GetFieldIndex(i)); - Object* result; - { MaybeObject* maybe_result = - dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - dictionary = StringDictionary::cast(result); + MaybeObject* maybe_dictionary = + dictionary->Add(descs->GetKey(i), value, d); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; break; } case CALLBACKS: { - PropertyDetails d = - PropertyDetails(details.attributes(), CALLBACKS, details.index()); + if (!descs->IsProperty(i)) break; Object* value = descs->GetCallbacksObject(i); - Object* result; - { MaybeObject* maybe_result = - dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_result->ToObject(&result)) return maybe_result; + if (value->IsAccessorPair()) { + MaybeObject* maybe_copy = + AccessorPair::cast(value)->CopyWithoutTransitions(); + if (!maybe_copy->To(&value)) return maybe_copy; } - dictionary = StringDictionary::cast(result); + MaybeObject* maybe_dictionary = + dictionary->Add(descs->GetKey(i), value, details); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; break; } case MAP_TRANSITION: @@ -3445,12 +3438,12 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, int index = map_of_this->instance_descriptors()->NextEnumerationIndex(); dictionary->SetNextEnumerationIndex(index); - { MaybeObject* maybe_obj = + Map* new_map; + { MaybeObject* maybe_map = current_heap->isolate()->context()->global_context()-> normalized_map_cache()->Get(this, mode); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + if (!maybe_map->To(&new_map)) return maybe_map; } - Map* new_map = Map::cast(obj); // We have now successfully allocated all the necessary objects. // Changes can now be made with the guarantee that all of them take effect. @@ -4369,15 +4362,14 @@ void JSObject::LookupCallback(String* name, LookupResult* result) { } -// Search for a getter or setter in an elements dictionary and update its -// attributes. Returns either undefined if the element is non-deletable, or the -// getter/setter pair if there is an existing one, or the hole value if the -// element does not exist or is a normal non-getter/setter data element. -static Object* UpdateGetterSetterInDictionary( +// Try to update an accessor in an elements dictionary. Return true if the +// update succeeded, and false otherwise. +static bool UpdateGetterSetterInDictionary( SeededNumberDictionary* dictionary, uint32_t index, - PropertyAttributes attributes, - Heap* heap) { + bool is_getter, + Object* fun, + PropertyAttributes attributes) { int entry = dictionary->FindEntry(index); if (entry != SeededNumberDictionary::kNotFound) { Object* result = dictionary->ValueAt(entry); @@ -4389,108 +4381,116 @@ static Object* UpdateGetterSetterInDictionary( dictionary->DetailsAtPut(entry, PropertyDetails(attributes, CALLBACKS, index)); } - return result; + AccessorPair::cast(result)->set(is_getter, fun); + return true; } } - return heap->the_hole_value(); + return false; } -MaybeObject* JSObject::DefineGetterSetter(String* name, - PropertyAttributes attributes) { - Heap* heap = GetHeap(); - // Make sure that the top context does not change when doing callbacks or - // interceptor calls. - AssertNoContextChange ncc; - - // Try to flatten before operating on the string. - name->TryFlatten(); - - if (!CanSetCallback(name)) { - return heap->undefined_value(); - } - - uint32_t index = 0; - bool is_element = name->AsArrayIndex(&index); - - if (is_element) { - switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - // Ignore getters and setters on pixel and external array - // elements. - return heap->undefined_value(); - case DICTIONARY_ELEMENTS: { - Object* probe = UpdateGetterSetterInDictionary(element_dictionary(), - index, - attributes, - heap); - if (!probe->IsTheHole()) return probe; - // Otherwise allow to override it. - break; +MaybeObject* JSObject::DefineElementAccessor(uint32_t index, + bool is_getter, + Object* fun, + PropertyAttributes attributes) { + switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + // Ignore getters and setters on pixel and external array elements. + return GetHeap()->undefined_value(); + case DICTIONARY_ELEMENTS: + if (UpdateGetterSetterInDictionary(element_dictionary(), + index, + is_getter, + fun, + attributes)) { + return GetHeap()->undefined_value(); } - case NON_STRICT_ARGUMENTS_ELEMENTS: { - // Ascertain whether we have read-only properties or an existing - // getter/setter pair in an arguments elements dictionary backing - // store. - FixedArray* parameter_map = FixedArray::cast(elements()); - uint32_t length = parameter_map->length(); - Object* probe = - index < (length - 2) ? parameter_map->get(index + 2) : NULL; - if (probe == NULL || probe->IsTheHole()) { - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); - if (arguments->IsDictionary()) { - SeededNumberDictionary* dictionary = - SeededNumberDictionary::cast(arguments); - probe = UpdateGetterSetterInDictionary(dictionary, - index, - attributes, - heap); - if (!probe->IsTheHole()) return probe; + break; + case NON_STRICT_ARGUMENTS_ELEMENTS: { + // Ascertain whether we have read-only properties or an existing + // getter/setter pair in an arguments elements dictionary backing + // store. + FixedArray* parameter_map = FixedArray::cast(elements()); + uint32_t length = parameter_map->length(); + Object* probe = + index < (length - 2) ? parameter_map->get(index + 2) : NULL; + if (probe == NULL || probe->IsTheHole()) { + FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + if (arguments->IsDictionary()) { + SeededNumberDictionary* dictionary = + SeededNumberDictionary::cast(arguments); + if (UpdateGetterSetterInDictionary(dictionary, + index, + is_getter, + fun, + attributes)) { + return GetHeap()->undefined_value(); } } - break; } + break; } - } else { - // Lookup the name. - LookupResult result(heap->isolate()); - LocalLookupRealNamedProperty(name, &result); - if (result.IsFound()) { - // TODO(mstarzinger): We should check for result.IsDontDelete() here once - // we only call into the runtime once to set both getter and setter. - if (result.type() == CALLBACKS) { - Object* obj = result.GetCallbackObject(); - // Need to preserve old getters/setters. - if (obj->IsAccessorPair()) { - // Use set to update attributes. - return SetPropertyCallback(name, obj, attributes); + } + + AccessorPair* accessors; + { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair(); + if (!maybe_accessors->To(&accessors)) return maybe_accessors; + } + accessors->set(is_getter, fun); + + { MaybeObject* maybe_ok = SetElementCallback(index, accessors, attributes); + if (maybe_ok->IsFailure()) return maybe_ok; + } + return GetHeap()->undefined_value(); +} + + +MaybeObject* JSObject::DefinePropertyAccessor(String* name, + bool is_getter, + Object* fun, + PropertyAttributes attributes) { + // Lookup the name. + LookupResult result(GetHeap()->isolate()); + LocalLookupRealNamedProperty(name, &result); + if (result.IsFound()) { + // TODO(mstarzinger): We should check for result.IsDontDelete() here once + // we only call into the runtime once to set both getter and setter. + if (result.type() == CALLBACKS) { + Object* obj = result.GetCallbackObject(); + // Need to preserve old getters/setters. + if (obj->IsAccessorPair()) { + AccessorPair::cast(obj)->set(is_getter, fun); + // Use set to update attributes. + { MaybeObject* maybe_ok = SetPropertyCallback(name, obj, attributes); + if (maybe_ok->IsFailure()) return maybe_ok; } + return GetHeap()->undefined_value(); } } } AccessorPair* accessors; - { MaybeObject* maybe_accessors = heap->AllocateAccessorPair(); - if (!maybe_accessors->To<AccessorPair>(&accessors)) return maybe_accessors; + { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair(); + if (!maybe_accessors->To(&accessors)) return maybe_accessors; } + accessors->set(is_getter, fun); - if (is_element) { - return SetElementCallback(index, accessors, attributes); - } else { - return SetPropertyCallback(name, accessors, attributes); + { MaybeObject* maybe_ok = SetPropertyCallback(name, accessors, attributes); + if (maybe_ok->IsFailure()) return maybe_ok; } + return GetHeap()->undefined_value(); } @@ -4524,19 +4524,15 @@ MaybeObject* JSObject::SetElementCallback(uint32_t index, PropertyDetails details = PropertyDetails(attributes, CALLBACKS); // Normalize elements to make this operation simple. - SeededNumberDictionary* dictionary = NULL; - { Object* result; - MaybeObject* maybe = NormalizeElements(); - if (!maybe->ToObject(&result)) return maybe; - dictionary = SeededNumberDictionary::cast(result); + SeededNumberDictionary* dictionary; + { MaybeObject* maybe_dictionary = NormalizeElements(); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; } ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements()); // Update the dictionary with the new CALLBACKS property. - { Object* result; - MaybeObject* maybe = dictionary->Set(index, structure, details); - if (!maybe->ToObject(&result)) return maybe; - dictionary = SeededNumberDictionary::cast(result); + { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details); + if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; } dictionary->set_requires_slow_elements(); @@ -4548,8 +4544,7 @@ MaybeObject* JSObject::SetElementCallback(uint32_t index, // switch to a direct backing store without the parameter map. This // would allow GC of the context. FixedArray* parameter_map = FixedArray::cast(elements()); - uint32_t length = parameter_map->length(); - if (index < length - 2) { + if (index < static_cast<uint32_t>(parameter_map->length()) - 2) { parameter_map->set(index + 2, GetHeap()->the_hole_value()); } parameter_map->set(1, dictionary); @@ -4557,7 +4552,7 @@ MaybeObject* JSObject::SetElementCallback(uint32_t index, set_elements(dictionary); } - return structure; + return GetHeap()->undefined_value(); } @@ -4571,19 +4566,18 @@ MaybeObject* JSObject::SetPropertyCallback(String* name, < DescriptorArray::kMaxNumberOfDescriptors); // Normalize object to make this operation simple. - Object* ok; { MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); - if (!maybe_ok->ToObject(&ok)) return maybe_ok; + if (maybe_ok->IsFailure()) return maybe_ok; } // For the global object allocate a new map to invalidate the global inline // caches which have a global property cell reference directly in the code. if (IsGlobalObject()) { - Object* new_map; + Map* new_map; { MaybeObject* maybe_new_map = map()->CopyDropDescriptors(); - if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map; + if (!maybe_new_map->To(&new_map)) return maybe_new_map; } - set_map(Map::cast(new_map)); + set_map(new_map); // When running crankshaft, changing the map is not enough. We // need to deoptimize all functions that rely on this global // object. @@ -4591,17 +4585,15 @@ MaybeObject* JSObject::SetPropertyCallback(String* name, } // Update the dictionary with the new CALLBACKS property. - Object* result; - { MaybeObject* maybe_result = SetNormalizedProperty(name, structure, details); - if (!maybe_result->ToObject(&result)) return maybe_result; + { MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details); + if (maybe_ok->IsFailure()) return maybe_ok; } if (convert_back_to_fast) { - { MaybeObject* maybe_ok = TransformToFastProperties(0); - if (!maybe_ok->ToObject(&ok)) return maybe_ok; - } + MaybeObject* maybe_ok = TransformToFastProperties(0); + if (maybe_ok->IsFailure()) return maybe_ok; } - return result; + return GetHeap()->undefined_value(); } MaybeObject* JSObject::DefineAccessor(String* name, @@ -4625,17 +4617,19 @@ MaybeObject* JSObject::DefineAccessor(String* name, fun, attributes); } - Object* accessors; - { MaybeObject* maybe_accessors = DefineGetterSetter(name, attributes); - if (!maybe_accessors->To<Object>(&accessors)) return maybe_accessors; - } - if (accessors->IsUndefined()) return accessors; - if (is_getter) { - AccessorPair::cast(accessors)->set_getter(fun); - } else { - AccessorPair::cast(accessors)->set_setter(fun); - } - return this; + // Make sure that the top context does not change when doing callbacks or + // interceptor calls. + AssertNoContextChange ncc; + + // Try to flatten before operating on the string. + name->TryFlatten(); + + if (!CanSetCallback(name)) return isolate->heap()->undefined_value(); + + uint32_t index = 0; + return name->AsArrayIndex(&index) ? + DefineElementAccessor(index, is_getter, fun, attributes) : + DefinePropertyAccessor(name, is_getter, fun, attributes); } @@ -4698,10 +4692,9 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { break; } - Object* ok; { MaybeObject* maybe_ok = SetElementCallback(index, info, info->property_attributes()); - if (!maybe_ok->ToObject(&ok)) return maybe_ok; + if (maybe_ok->IsFailure()) return maybe_ok; } } else { // Lookup the name. @@ -4712,10 +4705,9 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) { return isolate->heap()->undefined_value(); } - Object* ok; { MaybeObject* maybe_ok = SetPropertyCallback(name, info, info->property_attributes()); - if (!maybe_ok->ToObject(&ok)) return maybe_ok; + if (maybe_ok->IsFailure()) return maybe_ok; } } @@ -5713,15 +5705,21 @@ MaybeObject* DescriptorArray::Allocate(int number_of_descriptors) { void DescriptorArray::SetEnumCache(FixedArray* bridge_storage, - FixedArray* new_cache) { + FixedArray* new_cache, + Object* new_index_cache) { ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength); + ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray()); if (HasEnumCache()) { FixedArray::cast(get(kEnumerationIndexIndex))-> set(kEnumCacheBridgeCacheIndex, new_cache); + FixedArray::cast(get(kEnumerationIndexIndex))-> + set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache); } else { if (IsEmpty()) return; // Do nothing for empty descriptor array. FixedArray::cast(bridge_storage)-> set(kEnumCacheBridgeCacheIndex, new_cache); + FixedArray::cast(bridge_storage)-> + set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache); NoWriteBarrierSet(FixedArray::cast(bridge_storage), kEnumCacheBridgeEnumIndex, get(kEnumerationIndexIndex)); @@ -5735,6 +5733,33 @@ static bool InsertionPointFound(String* key1, String* key2) { } +void DescriptorArray::CopyFrom(Handle<DescriptorArray> dst, + int dst_index, + Handle<DescriptorArray> src, + int src_index, + const WhitenessWitness& witness) { + CALL_HEAP_FUNCTION_VOID(dst->GetIsolate(), + dst->CopyFrom(dst_index, *src, src_index, witness)); +} + + +MaybeObject* DescriptorArray::CopyFrom(int dst_index, + DescriptorArray* src, + int src_index, + const WhitenessWitness& witness) { + Object* value = src->GetValue(src_index); + PropertyDetails details(src->GetDetails(src_index)); + if (details.type() == CALLBACKS && value->IsAccessorPair()) { + MaybeObject* maybe_copy = + AccessorPair::cast(value)->CopyWithoutTransitions(); + if (!maybe_copy->To(&value)) return maybe_copy; + } + Descriptor desc(src->GetKey(src_index), value, details); + Set(dst_index, &desc, witness); + return this; +} + + MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor, TransitionFlag transition_flag) { // Transitions are only kept when inserting another transition. @@ -5818,7 +5843,9 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor, } else { if (!(IsNullDescriptor(from_index) || (remove_transitions && IsTransitionOnly(from_index)))) { - new_descriptors->CopyFrom(to_index++, this, from_index, witness); + MaybeObject* copy_result = + new_descriptors->CopyFrom(to_index++, this, from_index, witness); + if (copy_result->IsFailure()) return copy_result; } from_index++; } @@ -5858,7 +5885,9 @@ MaybeObject* DescriptorArray::RemoveTransitions() { int next_descriptor = 0; for (int i = 0; i < number_of_descriptors(); i++) { if (IsProperty(i)) { - new_descriptors->CopyFrom(next_descriptor++, this, i, witness); + MaybeObject* copy_result = + new_descriptors->CopyFrom(next_descriptor++, this, i, witness); + if (copy_result->IsFailure()) return copy_result; } } ASSERT(next_descriptor == new_descriptors->number_of_descriptors()); @@ -5971,6 +6000,18 @@ int DescriptorArray::LinearSearch(String* name, int len) { } +MaybeObject* AccessorPair::CopyWithoutTransitions() { + Heap* heap = GetHeap(); + AccessorPair* copy; + { MaybeObject* maybe_copy = heap->AllocateAccessorPair(); + if (!maybe_copy->To(©)) return maybe_copy; + } + copy->set_getter(getter()->IsMap() ? heap->the_hole_value() : getter()); + copy->set_setter(setter()->IsMap() ? heap->the_hole_value() : setter()); + return copy; +} + + MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count, PretenureFlag pretenure) { ASSERT(deopt_entry_count > 0); @@ -7816,7 +7857,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) { } -void SharedFunctionInfo::DisableOptimization(JSFunction* function) { +void SharedFunctionInfo::DisableOptimization() { // Disable optimization for the shared function info and mark the // code as non-optimizable. The marker on the shared function info // is there because we flush non-optimized code thereby loosing the @@ -7833,16 +7874,13 @@ void SharedFunctionInfo::DisableOptimization(JSFunction* function) { } if (FLAG_trace_opt) { PrintF("[disabled optimization for: "); - function->PrintName(); - PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); + DebugName()->ShortPrint(); + PrintF("]\n"); } } bool SharedFunctionInfo::VerifyBailoutId(int id) { - // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while - // we are always bailing out on ARM. - ASSERT(id != AstNode::kNoNumber); Code* unoptimized = code(); DeoptimizationOutputData* data = @@ -8647,23 +8685,25 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( FixedArrayBase* old_elements = elements(); ElementsKind elements_kind(GetElementsKind()); AssertNoAllocation no_gc; - switch (elements_kind) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { - elems->Initialize(FixedArray::cast(old_elements)); - break; - } - case FAST_DOUBLE_ELEMENTS: { - elems->Initialize(FixedDoubleArray::cast(old_elements)); - break; - } - case DICTIONARY_ELEMENTS: { - elems->Initialize(SeededNumberDictionary::cast(old_elements)); - break; + if (old_elements->length() != 0) { + switch (elements_kind) { + case FAST_SMI_ONLY_ELEMENTS: + case FAST_ELEMENTS: { + elems->Initialize(FixedArray::cast(old_elements)); + break; + } + case FAST_DOUBLE_ELEMENTS: { + elems->Initialize(FixedDoubleArray::cast(old_elements)); + break; + } + case DICTIONARY_ELEMENTS: { + elems->Initialize(SeededNumberDictionary::cast(old_elements)); + break; + } + default: + UNREACHABLE(); + break; } - default: - UNREACHABLE(); - break; } if (FLAG_trace_elements_transitions) { @@ -9511,8 +9551,12 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, return SetElementWithCallback(element, index, value, this, strict_mode); } else { dictionary->UpdateMaxNumberKey(index); - // If put fails in strict mode, throw an exception. - if (!dictionary->ValueAtPut(entry, value) && strict_mode == kStrictMode) { + // If a value has not been initialized we allow writing to it even if it + // is read-only (a declared const that has not been initialized). + if (!dictionary->DetailsAt(entry).IsReadOnly() || + dictionary->ValueAt(entry)->IsTheHole()) { + dictionary->ValueAtPut(entry, value); + } else if (strict_mode == kStrictMode) { Handle<Object> holder(this); Handle<Object> number = isolate->factory()->NewNumberFromUint(index); Handle<Object> args[2] = { number, holder }; @@ -9607,13 +9651,14 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement( bool check_prototype) { ASSERT(HasFastDoubleElements()); - FixedDoubleArray* elms = FixedDoubleArray::cast(elements()); - uint32_t elms_length = static_cast<uint32_t>(elms->length()); + FixedArrayBase* base_elms = FixedArrayBase::cast(elements()); + uint32_t elms_length = static_cast<uint32_t>(base_elms->length()); // If storing to an element that isn't in the array, pass the store request // up the prototype chain before storing in the receiver's elements. if (check_prototype && - (index >= elms_length || elms->is_the_hole(index))) { + (index >= elms_length || + FixedDoubleArray::cast(base_elms)->is_the_hole(index))) { bool found; MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index, value, @@ -9648,6 +9693,7 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement( // Check whether there is extra space in the fixed array. if (index < elms_length) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elements()); elms->set(index, double_value); if (IsJSArray()) { // Update the length of the array if needed. @@ -11720,7 +11766,7 @@ MaybeObject* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) { MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) { - float cast_value = 0; + float cast_value = static_cast<float>(OS::nan_value()); Heap* heap = GetHeap(); if (index < static_cast<uint32_t>(length())) { if (value->IsSmi()) { @@ -11730,7 +11776,7 @@ MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) { double double_value = HeapNumber::cast(value)->value(); cast_value = static_cast<float>(double_value); } else { - // Clamp undefined to zero (default). All other types have been + // Clamp undefined to NaN (default). All other types have been // converted to a number type further up in the call chain. ASSERT(value->IsUndefined()); } @@ -11741,7 +11787,7 @@ MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) { MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) { - double double_value = 0; + double double_value = OS::nan_value(); Heap* heap = GetHeap(); if (index < static_cast<uint32_t>(length())) { if (value->IsSmi()) { @@ -11750,7 +11796,7 @@ MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) { } else if (value->IsHeapNumber()) { double_value = HeapNumber::cast(value)->value(); } else { - // Clamp undefined to zero (default). All other types have been + // Clamp undefined to NaN (default). All other types have been // converted to a number type further up in the call chain. ASSERT(value->IsUndefined()); } @@ -12631,6 +12677,11 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor( details.index()); descriptors->Set(next_descriptor++, &d, witness); } else if (type == CALLBACKS) { + if (value->IsAccessorPair()) { + MaybeObject* maybe_copy = + AccessorPair::cast(value)->CopyWithoutTransitions(); + if (!maybe_copy->To(&value)) return maybe_copy; + } CallbacksDescriptor d(String::cast(key), value, details.attributes(), diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index bc18bf8fab..d870ccecb2 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -168,6 +168,11 @@ enum CompareMapMode { ALLOW_ELEMENT_TRANSITION_MAPS }; +enum KeyedAccessGrowMode { + DO_NOT_ALLOW_JSARRAY_GROWTH, + ALLOW_JSARRAY_GROWTH +}; + const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; void PrintElementsKind(FILE* out, ElementsKind kind); @@ -434,7 +439,8 @@ const int kVariableSizeSentinel = 0; V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \ V(SCRIPT, Script, script) \ V(CODE_CACHE, CodeCache, code_cache) \ - V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) + V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \ + V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) #ifdef ENABLE_DEBUGGER_SUPPORT #define STRUCT_LIST_DEBUGGER(V) \ @@ -589,6 +595,7 @@ enum InstanceType { SCRIPT_TYPE, CODE_CACHE_TYPE, POLYMORPHIC_CODE_CACHE_TYPE, + TYPE_FEEDBACK_INFO_TYPE, // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT // is defined. However as include/v8.h contain some of the instance type // constants always having them avoids them getting different numbers @@ -923,10 +930,11 @@ class Object : public MaybeObject { JSReceiver* getter); static Handle<Object> GetElement(Handle<Object> object, uint32_t index); - inline MaybeObject* GetElement(uint32_t index); + MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index); // For use when we know that no exception can be thrown. inline Object* GetElementNoExceptionThrown(uint32_t index); - MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index); + MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver, + uint32_t index); // Return the object's prototype (might be Heap::null_value()). Object* GetPrototype(); @@ -1006,7 +1014,8 @@ class Smi: public Object { void SmiVerify(); #endif - static const int kMinValue = (-1 << (kSmiValueSize - 1)); + static const int kMinValue = + (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1); static const int kMaxValue = -(kMinValue + 1); private: @@ -1598,22 +1607,23 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info); // Used from Object::GetProperty(). - MaybeObject* GetPropertyWithFailedAccessCheck( + MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck( Object* receiver, LookupResult* result, String* name, PropertyAttributes* attributes); - MaybeObject* GetPropertyWithInterceptor( + MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor( JSReceiver* receiver, String* name, PropertyAttributes* attributes); - MaybeObject* GetPropertyPostInterceptor( + MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor( + JSReceiver* receiver, + String* name, + PropertyAttributes* attributes); + MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor( JSReceiver* receiver, String* name, PropertyAttributes* attributes); - MaybeObject* GetLocalPropertyPostInterceptor(JSReceiver* receiver, - String* name, - PropertyAttributes* attributes); // Returns true if this is an instance of an api function and has // been modified since it was created. May give false positives. @@ -1663,18 +1673,21 @@ class JSObject: public JSReceiver { inline void ValidateSmiOnlyElements(); // Makes sure that this object can contain HeapObject as elements. - inline MaybeObject* EnsureCanContainHeapObjectElements(); + MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements(); // Makes sure that this object can contain the specified elements. - inline MaybeObject* EnsureCanContainElements(Object** elements, - uint32_t count, - EnsureElementsMode mode); - inline MaybeObject* EnsureCanContainElements(FixedArrayBase* elements, - EnsureElementsMode mode); - MaybeObject* EnsureCanContainElements(Arguments* arguments, - uint32_t first_arg, - uint32_t arg_count, - EnsureElementsMode mode); + MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements( + Object** elements, + uint32_t count, + EnsureElementsMode mode); + MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements( + FixedArrayBase* elements, + EnsureElementsMode mode); + MUST_USE_RESULT MaybeObject* EnsureCanContainElements( + Arguments* arguments, + uint32_t first_arg, + uint32_t arg_count, + EnsureElementsMode mode); // Do we want to keep the elements in fast case when increasing the // capacity? @@ -1757,7 +1770,8 @@ class JSObject: public JSReceiver { // Returns the index'th element. // The undefined object if index is out of bounds. - MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index); + MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver, + uint32_t index); enum SetFastElementsCapacityMode { kAllowSmiOnlyElements, @@ -2064,11 +2078,12 @@ class JSObject: public JSReceiver { Object* structure, uint32_t index, Object* holder); - MaybeObject* SetElementWithCallback(Object* structure, - uint32_t index, - Object* value, - JSObject* holder, - StrictModeFlag strict_mode); + MUST_USE_RESULT MaybeObject* SetElementWithCallback( + Object* structure, + uint32_t index, + Object* value, + JSObject* holder, + StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetElementWithInterceptor( uint32_t index, Object* value, @@ -2124,10 +2139,16 @@ class JSObject: public JSReceiver { String* name, Object* structure, PropertyAttributes attributes); - MUST_USE_RESULT MaybeObject* DefineGetterSetter( + MUST_USE_RESULT MaybeObject* DefineElementAccessor( + uint32_t index, + bool is_getter, + Object* fun, + PropertyAttributes attributes); + MUST_USE_RESULT MaybeObject* DefinePropertyAccessor( String* name, + bool is_getter, + Object* fun, PropertyAttributes attributes); - void LookupInDescriptor(String* name, LookupResult* result); // Returns the hidden properties backing store object, currently @@ -2135,9 +2156,11 @@ class JSObject: public JSReceiver { // If no hidden properties object has been put on this object, // return undefined, unless create_if_absent is true, in which case // a new dictionary is created, added to this object, and returned. - MaybeObject* GetHiddenPropertiesDictionary(bool create_if_absent); + MUST_USE_RESULT MaybeObject* GetHiddenPropertiesDictionary( + bool create_if_absent); // Updates the existing hidden properties dictionary. - MaybeObject* SetHiddenPropertiesDictionary(StringDictionary* dictionary); + MUST_USE_RESULT MaybeObject* SetHiddenPropertiesDictionary( + StringDictionary* dictionary); DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject); }; @@ -2284,7 +2307,7 @@ class FixedDoubleArray: public FixedArrayBase { // Setter and getter for elements. inline double get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, double value); inline void set_the_hole(int index); @@ -2398,7 +2421,9 @@ class DescriptorArray: public FixedArray { // Initialize or change the enum cache, // using the supplied storage for the small "bridge". - void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache); + void SetEnumCache(FixedArray* bridge_storage, + FixedArray* new_cache, + Object* new_index_cache); // Accessors for fetching instance descriptor at descriptor number. inline String* GetKey(int descriptor_number); @@ -2429,12 +2454,20 @@ class DescriptorArray: public FixedArray { Descriptor* desc, const WhitenessWitness&); - // Transfer complete descriptor from another descriptor array to - // this one. - inline void CopyFrom(int index, - DescriptorArray* src, + // Transfer a complete descriptor from the src descriptor array to the dst + // one, dropping map transitions in CALLBACKS. + static void CopyFrom(Handle<DescriptorArray> dst, + int dst_index, + Handle<DescriptorArray> src, int src_index, - const WhitenessWitness&); + const WhitenessWitness& witness); + + // Transfer a complete descriptor from the src descriptor array to this + // descriptor array, dropping map transitions in CALLBACKS. + MUST_USE_RESULT MaybeObject* CopyFrom(int dst_index, + DescriptorArray* src, + int src_index, + const WhitenessWitness&); // Copy the descriptor array, insert a new descriptor and optionally // remove map transitions. If the descriptor is already present, it is @@ -2494,9 +2527,10 @@ class DescriptorArray: public FixedArray { static const int kFirstIndex = 3; // The length of the "bridge" to the enum cache. - static const int kEnumCacheBridgeLength = 2; + static const int kEnumCacheBridgeLength = 3; static const int kEnumCacheBridgeEnumIndex = 0; static const int kEnumCacheBridgeCacheIndex = 1; + static const int kEnumCacheBridgeIndicesCacheIndex = 2; // Layout description. static const int kBitField3StorageOffset = FixedArray::kHeaderSize; @@ -2897,22 +2931,12 @@ class Dictionary: public HashTable<Shape, Key> { // Returns the value at entry. Object* ValueAt(int entry) { - return this->get(HashTable<Shape, Key>::EntryToIndex(entry)+1); + return this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 1); } // Set the value for entry. - // Returns false if the put wasn't performed due to property being read only. - // Returns true on successful put. - bool ValueAtPut(int entry, Object* value) { - // Check that this value can actually be written. - PropertyDetails details = DetailsAt(entry); - // If a value has not been initilized we allow writing to it even if - // it is read only (a declared const that has not been initialized). - if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) { - return false; - } + void ValueAtPut(int entry, Object* value) { this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value); - return true; } // Returns the property details for the property at entry. @@ -3611,7 +3635,7 @@ class ExternalPixelArray: public ExternalArray { // Setter and getter. inline uint8_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, uint8_t value); // This accessor applies the correct conversion from Smi, HeapNumber and @@ -3640,12 +3664,12 @@ class ExternalByteArray: public ExternalArray { public: // Setter and getter. inline int8_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, int8_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalByteArray* cast(Object* obj); @@ -3669,12 +3693,12 @@ class ExternalUnsignedByteArray: public ExternalArray { public: // Setter and getter. inline uint8_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, uint8_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalUnsignedByteArray* cast(Object* obj); @@ -3698,12 +3722,12 @@ class ExternalShortArray: public ExternalArray { public: // Setter and getter. inline int16_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, int16_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalShortArray* cast(Object* obj); @@ -3727,12 +3751,12 @@ class ExternalUnsignedShortArray: public ExternalArray { public: // Setter and getter. inline uint16_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, uint16_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalUnsignedShortArray* cast(Object* obj); @@ -3756,12 +3780,12 @@ class ExternalIntArray: public ExternalArray { public: // Setter and getter. inline int32_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, int32_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalIntArray* cast(Object* obj); @@ -3785,12 +3809,12 @@ class ExternalUnsignedIntArray: public ExternalArray { public: // Setter and getter. inline uint32_t get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, uint32_t value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalUnsignedIntArray* cast(Object* obj); @@ -3814,12 +3838,12 @@ class ExternalFloatArray: public ExternalArray { public: // Setter and getter. inline float get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, float value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalFloatArray* cast(Object* obj); @@ -3843,12 +3867,12 @@ class ExternalDoubleArray: public ExternalArray { public: // Setter and getter. inline double get_scalar(int index); - inline MaybeObject* get(int index); + MUST_USE_RESULT inline MaybeObject* get(int index); inline void set(int index, double value); // This accessor applies the correct conversion from Smi, HeapNumber // and undefined. - MaybeObject* SetValue(uint32_t index, Object* value); + MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value); // Casting. static inline ExternalDoubleArray* cast(Object* obj); @@ -4019,6 +4043,7 @@ class TypeFeedbackCells: public FixedArray { // Forward declaration. class SafepointEntry; +class TypeFeedbackInfo; // Code describes objects with on-the-fly generated machine code. class Code: public HeapObject { @@ -4090,8 +4115,9 @@ class Code: public HeapObject { // [deoptimization_data]: Array containing data for deopt. DECL_ACCESSORS(deoptimization_data, FixedArray) - // [type_feedback_cells]: Array containing cache cells used for type feedback. - DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells) + // [type_feedback_info]: Struct containing type feedback information. + // Will contain either a TypeFeedbackInfo object, or undefined. + DECL_ACCESSORS(type_feedback_info, Object) // [gc_metadata]: Field used to hold GC related metadata. The contents of this // field does not have to be traced during garbage collection since @@ -4216,6 +4242,28 @@ class Code: public HeapObject { // Find the first map in an IC stub. Map* FindFirstMap(); + class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {}; + class ExtraICStateKeyedAccessGrowMode: + public BitField<KeyedAccessGrowMode, 1, 1> {}; // NOLINT + + static const int kExtraICStateGrowModeShift = 1; + + static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) { + return ExtraICStateStrictMode::decode(extra_ic_state); + } + + static inline KeyedAccessGrowMode GetKeyedAccessGrowMode( + ExtraICState extra_ic_state) { + return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state); + } + + static inline ExtraICState ComputeExtraICState( + KeyedAccessGrowMode grow_mode, + StrictModeFlag strict_mode) { + return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) | + ExtraICStateStrictMode::encode(strict_mode); + } + // Flags operations. static inline Flags ComputeFlags( Kind kind, @@ -4320,9 +4368,9 @@ class Code: public HeapObject { static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize; static const int kDeoptimizationDataOffset = kHandlerTableOffset + kPointerSize; - static const int kTypeFeedbackCellsOffset = + static const int kTypeFeedbackInfoOffset = kDeoptimizationDataOffset + kPointerSize; - static const int kGCMetadataOffset = kTypeFeedbackCellsOffset + kPointerSize; + static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize; static const int kFlagsOffset = kGCMetadataOffset + kPointerSize; static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize; @@ -4711,8 +4759,8 @@ class Map: public HeapObject { // Adds an entry to this map's descriptor array for a transition to // |transitioned_map| when its elements_kind is changed to |elements_kind|. - MaybeObject* AddElementsTransition(ElementsKind elements_kind, - Map* transitioned_map); + MUST_USE_RESULT MaybeObject* AddElementsTransition( + ElementsKind elements_kind, Map* transitioned_map); // Returns the transitioned map for this map with the most generic // elements_kind that's found in |candidates|, or null handle if no match is @@ -4744,7 +4792,8 @@ class Map: public HeapObject { Object* GetPrototypeTransition(Object* prototype); - MaybeObject* PutPrototypeTransition(Object* prototype, Map* map); + MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype, + Map* map); static const int kMaxPreAllocatedPropertyFields = 255; @@ -5278,8 +5327,11 @@ class SharedFunctionInfo: public HeapObject { // through the API, which does not change this flag). DECL_BOOLEAN_ACCESSORS(is_anonymous) - // Indicates that the function cannot be crankshafted. - DECL_BOOLEAN_ACCESSORS(dont_crankshaft) + // Is this a function or top-level/eval code. + DECL_BOOLEAN_ACCESSORS(is_function) + + // Indicates that the function cannot be optimized. + DECL_BOOLEAN_ACCESSORS(dont_optimize) // Indicates that the function cannot be inlined. DECL_BOOLEAN_ACCESSORS(dont_inline) @@ -5292,9 +5344,8 @@ class SharedFunctionInfo: public HeapObject { void EnableDeoptimizationSupport(Code* recompiled); // Disable (further) attempted optimization of all functions sharing this - // shared function info. The function is the one we actually tried to - // optimize. - void DisableOptimization(JSFunction* function); + // shared function info. + void DisableOptimization(); // Lookup the bailout ID and ASSERT that it exists in the non-optimized // code, returns whether it asserted (i.e., always true if assertions are @@ -5492,7 +5543,8 @@ class SharedFunctionInfo: public HeapObject { kBoundFunction, kIsAnonymous, kNameShouldPrintAsAnonymous, - kDontCrankshaft, + kIsFunction, + kDontOptimize, kDontInline, kCompilerHintsCount // Pseudo entry }; @@ -5633,7 +5685,8 @@ class JSFunction: public JSObject { // The initial map for an object created by this constructor. inline Map* initial_map(); inline void set_initial_map(Map* value); - inline MaybeObject* set_initial_map_and_cache_transitions(Map* value); + MUST_USE_RESULT inline MaybeObject* set_initial_map_and_cache_transitions( + Map* value); inline bool has_initial_map(); // Get and set the prototype property on a JSFunction. If the @@ -5644,7 +5697,7 @@ class JSFunction: public JSObject { inline bool has_instance_prototype(); inline Object* prototype(); inline Object* instance_prototype(); - MaybeObject* SetInstancePrototype(Object* value); + MUST_USE_RESULT MaybeObject* SetInstancePrototype(Object* value); MUST_USE_RESULT MaybeObject* SetPrototype(Object* value); // After prototype is removed, it will not be created when accessed, and @@ -6157,12 +6210,14 @@ class CompilationCacheTable: public HashTable<CompilationCacheShape, LanguageMode language_mode, int scope_position); Object* LookupRegExp(String* source, JSRegExp::Flags flags); - MaybeObject* Put(String* src, Object* value); - MaybeObject* PutEval(String* src, - Context* context, - SharedFunctionInfo* value, - int scope_position); - MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value); + MUST_USE_RESULT MaybeObject* Put(String* src, Object* value); + MUST_USE_RESULT MaybeObject* PutEval(String* src, + Context* context, + SharedFunctionInfo* value, + int scope_position); + MUST_USE_RESULT MaybeObject* PutRegExp(String* src, + JSRegExp::Flags flags, + FixedArray* value); // Remove given value from cache. void Remove(Object* value); @@ -6323,6 +6378,40 @@ class PolymorphicCodeCacheHashTable }; +class TypeFeedbackInfo: public Struct { + public: + inline int ic_total_count(); + inline void set_ic_total_count(int count); + + inline int ic_with_typeinfo_count(); + inline void set_ic_with_typeinfo_count(int count); + + DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells) + + static inline TypeFeedbackInfo* cast(Object* obj); + +#ifdef OBJECT_PRINT + inline void TypeFeedbackInfoPrint() { + TypeFeedbackInfoPrint(stdout); + } + void TypeFeedbackInfoPrint(FILE* out); +#endif +#ifdef DEBUG + void TypeFeedbackInfoVerify(); +#endif + + static const int kIcTotalCountOffset = HeapObject::kHeaderSize; + static const int kIcWithTypeinfoCountOffset = + kIcTotalCountOffset + kPointerSize; + static const int kTypeFeedbackCellsOffset = + kIcWithTypeinfoCountOffset + kPointerSize; + static const int kSize = kTypeFeedbackCellsOffset + kPointerSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo); +}; + + enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS}; enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL}; @@ -7627,7 +7716,7 @@ class JSArray: public JSObject { MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length); // Set the content of the array to the content of storage. - inline MaybeObject* SetContent(FixedArrayBase* storage); + MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage); // Casting. static inline JSArray* cast(Object* obj); @@ -7755,6 +7844,17 @@ class AccessorPair: public Struct { static inline AccessorPair* cast(Object* obj); + MUST_USE_RESULT MaybeObject* CopyWithoutTransitions(); + + // TODO(svenpanne) Evil temporary helper, will vanish soon... + void set(bool modify_getter, Object* value) { + if (modify_getter) { + set_getter(value); + } else { + set_setter(value); + } + } + #ifdef OBJECT_PRINT void AccessorPairPrint(FILE* out = stdout); #endif diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index c02cad93e1..3dfab294c8 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -503,7 +503,9 @@ Parser::FunctionState::FunctionState(Parser* parser, Parser::FunctionState::~FunctionState() { parser_->top_scope_ = outer_scope_; parser_->current_function_state_ = outer_function_state_; - parser_->isolate()->set_ast_node_id(saved_ast_node_id_); + if (outer_function_state_ != NULL) { + parser_->isolate()->set_ast_node_id(saved_ast_node_id_); + } } @@ -550,7 +552,7 @@ Parser::Parser(Handle<Script> script, allow_modules_((parser_flags & kAllowModules) != 0), stack_overflow_(false), parenthesized_function_(false) { - AstNode::ResetIds(); + isolate_->set_ast_node_id(0); if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) { scanner().SetHarmonyScoping(true); } @@ -602,7 +604,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, FunctionLiteral* result = NULL; { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE); info->SetGlobalScope(scope); - if (!info->is_global()) { + if (!info->is_global() && + (info->shared_info().is_null() || info->shared_info()->is_function())) { scope = Scope::DeserializeScopeChain(*info->calling_context(), scope); scope = NewScope(scope, EVAL_SCOPE); } @@ -633,9 +636,9 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, function_state.only_simple_this_property_assignments(), function_state.this_property_assignments(), 0, - false, // Does not have duplicate parameters. + FunctionLiteral::kNoDuplicateParameters, FunctionLiteral::ANONYMOUS_EXPRESSION, - false); // Top-level literal doesn't count for the AST's properties. + FunctionLiteral::kGlobalOrEval); result->set_ast_properties(factory()->visitor()->ast_properties()); } else if (stack_overflow_) { isolate()->StackOverflow(); @@ -651,6 +654,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, return result; } + FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) { ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT); HistogramTimerScope timer(isolate()->counters()->parse_lazy()); @@ -1084,32 +1088,11 @@ class ThisNamedPropertyAssignmentFinder : public ParserFinder { }; -Statement* Parser::ParseSourceElement(ZoneStringList* labels, - bool* ok) { - // (Ecma 262 5th Edition, clause 14): - // SourceElement: - // Statement - // FunctionDeclaration - // - // In harmony mode we allow additionally the following productions - // SourceElement: - // LetDeclaration - // ConstDeclaration - - if (peek() == Token::FUNCTION) { - return ParseFunctionDeclaration(ok); - } else if (peek() == Token::LET || peek() == Token::CONST) { - return ParseVariableStatement(kSourceElement, ok); - } - return ParseStatement(labels, ok); -} - - void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, int end_token, bool* ok) { // SourceElements :: - // (SourceElement)* <end_token> + // (ModuleElement)* <end_token> // Allocate a target stack to use for this set of source // elements. This way, all scripts and functions get their own @@ -1128,7 +1111,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, } Scanner::Location token_loc = scanner().peek_location(); - Statement* stat = ParseSourceElement(NULL, CHECK_OK); + Statement* stat = ParseModuleElement(NULL, CHECK_OK); if (stat == NULL || stat->IsEmpty()) { directive_prologue = false; // End of directive prologue. continue; @@ -1184,6 +1167,194 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, } +Statement* Parser::ParseModuleElement(ZoneStringList* labels, + bool* ok) { + // (Ecma 262 5th Edition, clause 14): + // SourceElement: + // Statement + // FunctionDeclaration + // + // In harmony mode we allow additionally the following productions + // ModuleElement: + // LetDeclaration + // ConstDeclaration + // ModuleDeclaration + // ImportDeclaration + // ExportDeclaration + + switch (peek()) { + case Token::FUNCTION: + return ParseFunctionDeclaration(ok); + case Token::LET: + case Token::CONST: + return ParseVariableStatement(kModuleElement, ok); + case Token::MODULE: + return ParseModuleDeclaration(ok); + case Token::IMPORT: + return ParseImportDeclaration(ok); + case Token::EXPORT: + return ParseExportDeclaration(ok); + default: + return ParseStatement(labels, ok); + } +} + + +Block* Parser::ParseModuleDeclaration(bool* ok) { + // ModuleDeclaration: + // 'module' Identifier Module + + // Create new block with one expected declaration. + Block* block = factory()->NewBlock(NULL, 1, true); + Expect(Token::MODULE, CHECK_OK); + Handle<String> name = ParseIdentifier(CHECK_OK); + // top_scope_->AddDeclaration( + // factory()->NewModuleDeclaration(proxy, module, top_scope_)); + VariableProxy* proxy = Declare(name, LET, NULL, true, CHECK_OK); + Module* module = ParseModule(ok); + // TODO(rossberg): Add initialization statement to block. + USE(proxy); + USE(module); + return block; +} + + +Module* Parser::ParseModule(bool* ok) { + // Module: + // '{' ModuleElement '}' + // '=' ModulePath + // 'at' String + + switch (peek()) { + case Token::LBRACE: + return ParseModuleLiteral(ok); + + case Token::ASSIGN: + Expect(Token::ASSIGN, CHECK_OK); + return ParseModulePath(ok); + + default: + return ParseModuleUrl(ok); + } +} + + +Module* Parser::ParseModuleLiteral(bool* ok) { + // Module: + // '{' ModuleElement '}' + + // Construct block expecting 16 statements. + Block* body = factory()->NewBlock(NULL, 16, false); + Scope* scope = NewScope(top_scope_, MODULE_SCOPE); + + Expect(Token::LBRACE, CHECK_OK); + scope->set_start_position(scanner().location().beg_pos); + scope->SetLanguageMode(EXTENDED_MODE); + + { + BlockState block_state(this, scope); + TargetCollector collector; + Target target(&this->target_stack_, &collector); + Target target_body(&this->target_stack_, body); + InitializationBlockFinder block_finder(top_scope_, target_stack_); + + while (peek() != Token::RBRACE) { + Statement* stat = ParseModuleElement(NULL, CHECK_OK); + if (stat && !stat->IsEmpty()) { + body->AddStatement(stat); + block_finder.Update(stat); + } + } + } + + Expect(Token::RBRACE, CHECK_OK); + scope->set_end_position(scanner().location().end_pos); + body->set_block_scope(scope); + return factory()->NewModuleLiteral(body); +} + + +Module* Parser::ParseModulePath(bool* ok) { + // ModulePath: + // Identifier + // ModulePath '.' Identifier + + Module* result = ParseModuleVariable(CHECK_OK); + + while (Check(Token::PERIOD)) { + Handle<String> name = ParseIdentifierName(CHECK_OK); + result = factory()->NewModulePath(result, name); + } + + return result; +} + + +Module* Parser::ParseModuleVariable(bool* ok) { + // ModulePath: + // Identifier + + Handle<String> name = ParseIdentifier(CHECK_OK); + VariableProxy* proxy = top_scope_->NewUnresolved( + factory(), name, scanner().location().beg_pos); + return factory()->NewModuleVariable(proxy); +} + + +Module* Parser::ParseModuleUrl(bool* ok) { + // Module: + // 'at' String + + Expect(Token::IDENTIFIER, CHECK_OK); + Handle<String> symbol = GetSymbol(CHECK_OK); + if (!symbol->IsEqualTo(CStrVector("at"))) { + *ok = false; + ReportUnexpectedToken(scanner().current_token()); + return NULL; + } + Expect(Token::STRING, CHECK_OK); + symbol = GetSymbol(CHECK_OK); + + return factory()->NewModuleUrl(symbol); +} + + +Block* Parser::ParseImportDeclaration(bool* ok) { + // TODO(rossberg) + return NULL; +} + + +Block* Parser::ParseExportDeclaration(bool* ok) { + // TODO(rossberg) + return NULL; +} + + +Statement* Parser::ParseBlockElement(ZoneStringList* labels, + bool* ok) { + // (Ecma 262 5th Edition, clause 14): + // SourceElement: + // Statement + // FunctionDeclaration + // + // In harmony mode we allow additionally the following productions + // BlockElement (aka SourceElement): + // LetDeclaration + // ConstDeclaration + + switch (peek()) { + case Token::FUNCTION: + return ParseFunctionDeclaration(ok); + case Token::LET: + case Token::CONST: + return ParseVariableStatement(kModuleElement, ok); + default: + return ParseStatement(labels, ok); + } +} + + Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { // Statement :: // Block @@ -1341,7 +1512,8 @@ VariableProxy* Parser::Declare(Handle<String> name, // statically declared. if (declaration_scope->is_function_scope() || declaration_scope->is_strict_or_extended_eval_scope() || - declaration_scope->is_block_scope()) { + declaration_scope->is_block_scope() || + declaration_scope->is_module_scope()) { // Declare the variable in the function scope. var = declaration_scope->LocalLookup(name); if (var == NULL) { @@ -1571,10 +1743,10 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) { Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) { - // The harmony mode uses source elements instead of statements. + // The harmony mode uses block elements instead of statements. // // Block :: - // '{' SourceElement* '}' + // '{' BlockElement* '}' // Construct block expecting 16 statements. Block* body = factory()->NewBlock(labels, 16, false); @@ -1590,7 +1762,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) { InitializationBlockFinder block_finder(top_scope_, target_stack_); while (peek() != Token::RBRACE) { - Statement* stat = ParseSourceElement(NULL, CHECK_OK); + Statement* stat = ParseBlockElement(NULL, CHECK_OK); if (stat && !stat->IsEmpty()) { body->AddStatement(stat); block_finder.Update(stat); @@ -1611,10 +1783,8 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context, // VariableDeclarations ';' Handle<String> ignore; - Block* result = ParseVariableDeclarations(var_context, - NULL, - &ignore, - CHECK_OK); + Block* result = + ParseVariableDeclarations(var_context, NULL, &ignore, CHECK_OK); ExpectSemicolon(CHECK_OK); return result; } @@ -1681,8 +1851,7 @@ Block* Parser::ParseVariableDeclarations( *ok = false; return NULL; case EXTENDED_MODE: - if (var_context != kSourceElement && - var_context != kForStatement) { + if (var_context == kStatement) { // In extended mode 'const' declarations are only allowed in source // element positions. ReportMessage("unprotected_const", Vector<const char*>::empty()); @@ -1707,10 +1876,8 @@ Block* Parser::ParseVariableDeclarations( return NULL; } Consume(Token::LET); - if (var_context != kSourceElement && - var_context != kForStatement) { + if (var_context == kStatement) { // Let declarations are only allowed in source element positions. - ASSERT(var_context == kStatement); ReportMessage("unprotected_let", Vector<const char*>::empty()); *ok = false; return NULL; @@ -2450,10 +2617,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { Handle<String> name; VariableDeclarationProperties decl_props = kHasNoInitializers; Block* variable_statement = - ParseVariableDeclarations(kForStatement, - &decl_props, - &name, - CHECK_OK); + ParseVariableDeclarations(kForStatement, &decl_props, &name, CHECK_OK); bool accept_IN = !name.is_null() && decl_props != kHasInitializers; if (peek() == Token::IN && accept_IN) { // Rewrite a for-in statement of the form @@ -4015,7 +4179,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, int handler_count = 0; bool only_simple_this_property_assignments; Handle<FixedArray> this_property_assignments; - bool has_duplicate_parameters = false; + FunctionLiteral::ParameterFlag duplicate_parameters = + FunctionLiteral::kNoDuplicateParameters; AstProperties ast_properties; // Parse function body. { FunctionState function_state(this, scope, isolate()); @@ -4041,14 +4206,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, name_loc = scanner().location(); } if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) { - has_duplicate_parameters = true; + duplicate_parameters = FunctionLiteral::kHasDuplicateParameters; dupe_loc = scanner().location(); } if (!reserved_loc.IsValid() && is_strict_reserved) { reserved_loc = scanner().location(); } - top_scope_->DeclareParameter(param_name, is_extended_mode() ? LET : VAR); + top_scope_->DeclareParameter(param_name, VAR); num_parameters++; if (num_parameters > kMaxNumFunctionParameters) { ReportMessageAt(scanner().location(), "too_many_parameters", @@ -4252,9 +4417,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, only_simple_this_property_assignments, this_property_assignments, num_parameters, - has_duplicate_parameters, + duplicate_parameters, type, - true); + FunctionLiteral::kIsFunction); function_literal->set_function_token_position(function_token_position); function_literal->set_ast_properties(&ast_properties); @@ -5594,7 +5759,11 @@ bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) { if (info->is_lazy()) { ASSERT(!info->is_eval()); Parser parser(script, parsing_flags, NULL, NULL); - result = parser.ParseLazy(info); + if (info->shared_info()->is_function()) { + result = parser.ParseLazy(info); + } else { + result = parser.ParseProgram(info); + } } else { ScriptDataImpl* pre_data = info->pre_parse_data(); Parser parser(script, parsing_flags, info->extension(), pre_data); diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h index fbc4a1529d..66c801d981 100644 --- a/deps/v8/src/parser.h +++ b/deps/v8/src/parser.h @@ -464,7 +464,8 @@ class Parser { }; enum VariableDeclarationContext { - kSourceElement, + kModuleElement, + kBlockElement, kStatement, kForStatement }; @@ -575,7 +576,16 @@ class Parser { // for failure at the call sites. void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token, bool* ok); - Statement* ParseSourceElement(ZoneStringList* labels, bool* ok); + Statement* ParseModuleElement(ZoneStringList* labels, bool* ok); + Block* ParseModuleDeclaration(bool* ok); + Module* ParseModule(bool* ok); + Module* ParseModuleLiteral(bool* ok); + Module* ParseModulePath(bool* ok); + Module* ParseModuleVariable(bool* ok); + Module* ParseModuleUrl(bool* ok); + Block* ParseImportDeclaration(bool* ok); + Block* ParseExportDeclaration(bool* ok); + Statement* ParseBlockElement(ZoneStringList* labels, bool* ok); Statement* ParseStatement(ZoneStringList* labels, bool* ok); Statement* ParseFunctionDeclaration(bool* ok); Statement* ParseNativeDeclaration(bool* ok); diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc index c27e3c982f..79134da352 100644 --- a/deps/v8/src/platform-cygwin.cc +++ b/deps/v8/src/platform-cygwin.cc @@ -365,16 +365,9 @@ class Thread::PlatformData : public Malloced { Thread::Thread(const Options& options) - : data_(new PlatformData), - stack_size_(options.stack_size) { - set_name(options.name); -} - - -Thread::Thread(const char* name) - : data_(new PlatformData), - stack_size_(0) { - set_name(name); + : data_(new PlatformData()), + stack_size_(options.stack_size()) { + set_name(options.name()); } @@ -617,8 +610,10 @@ class Sampler::PlatformData : public Malloced { class SamplerThread : public Thread { public: + static const int kSamplerThreadStackSize = 64 * KB; + explicit SamplerThread(int interval) - : Thread("SamplerThread"), + : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)), interval_(interval) {} static void AddActiveSampler(Sampler* sampler) { diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index f16b5e74fd..6f77b3b797 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -58,21 +58,26 @@ int localtime_s(tm* out_tm, const time_t* time) { } -// Not sure this the correct interpretation of _mkgmtime -time_t _mkgmtime(tm* timeptr) { - return mktime(timeptr); -} - - int fopen_s(FILE** pFile, const char* filename, const char* mode) { *pFile = fopen(filename, mode); return *pFile != NULL ? 0 : 1; } +#ifndef __MINGW64_VERSION_MAJOR + +// Not sure this the correct interpretation of _mkgmtime +time_t _mkgmtime(tm* timeptr) { + return mktime(timeptr); +} + + #define _TRUNCATE 0 #define STRUNCATE 80 +#endif // __MINGW64_VERSION_MAJOR + + int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count, const char* format, va_list argptr) { ASSERT(count == _TRUNCATE); @@ -831,43 +836,63 @@ size_t OS::AllocateAlignment() { } -void* OS::Allocate(const size_t requested, - size_t* allocated, - bool is_executable) { - // The address range used to randomize RWX allocations in OS::Allocate - // Try not to map pages into the default range that windows loads DLLs - // Use a multiple of 64k to prevent committing unused memory. - // Note: This does not guarantee RWX regions will be within the - // range kAllocationRandomAddressMin to kAllocationRandomAddressMax +static void* GetRandomAddr() { + Isolate* isolate = Isolate::UncheckedCurrent(); + // Note that the current isolate isn't set up in a call path via + // CpuFeatures::Probe. We don't care about randomization in this case because + // the code page is immediately freed. + if (isolate != NULL) { + // The address range used to randomize RWX allocations in OS::Allocate + // Try not to map pages into the default range that windows loads DLLs + // Use a multiple of 64k to prevent committing unused memory. + // Note: This does not guarantee RWX regions will be within the + // range kAllocationRandomAddressMin to kAllocationRandomAddressMax #ifdef V8_HOST_ARCH_64_BIT - static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; - static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; + static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; + static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000; #else - static const intptr_t kAllocationRandomAddressMin = 0x04000000; - static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; + static const intptr_t kAllocationRandomAddressMin = 0x04000000; + static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000; #endif + uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits) + | kAllocationRandomAddressMin; + address &= kAllocationRandomAddressMax; + return reinterpret_cast<void *>(address); + } + return NULL; +} + + +static void* RandomizedVirtualAlloc(size_t size, int action, int protection) { + LPVOID base = NULL; + + if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) { + // For exectutable pages try and randomize the allocation address + for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) { + base = VirtualAlloc(GetRandomAddr(), size, action, protection); + } + } + + // After three attempts give up and let the OS find an address to use. + if (base == NULL) base = VirtualAlloc(NULL, size, action, protection); + + return base; +} + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool is_executable) { // VirtualAlloc rounds allocated size to page size automatically. size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); - intptr_t address = 0; + void* address = 0; // Windows XP SP2 allows Data Excution Prevention (DEP). int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - // For exectutable pages try and randomize the allocation address - if (prot == PAGE_EXECUTE_READWRITE && - msize >= static_cast<size_t>(Page::kPageSize)) { - address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits) - | kAllocationRandomAddressMin; - address &= kAllocationRandomAddressMax; - } - - LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address), - msize, - MEM_COMMIT | MEM_RESERVE, - prot); - if (mbase == NULL && address != 0) - mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot); + LPVOID mbase = RandomizedVirtualAlloc(msize, + MEM_COMMIT | MEM_RESERVE, + prot); if (mbase == NULL) { LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed")); @@ -1471,7 +1496,7 @@ bool VirtualMemory::Uncommit(void* address, size_t size) { void* VirtualMemory::ReserveRegion(size_t size) { - return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); + return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS); } @@ -1496,7 +1521,6 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) { } - // ---------------------------------------------------------------------------- // Win32 thread support. diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index 685e443e24..f3ec75adcf 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -84,7 +84,7 @@ void PrettyPrinter::VisitModuleLiteral(ModuleLiteral* node) { void PrettyPrinter::VisitModuleVariable(ModuleVariable* node) { - PrintLiteral(node->var()->name(), false); + Visit(node->proxy()); } @@ -773,7 +773,7 @@ void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) { void AstPrinter::VisitModuleVariable(ModuleVariable* node) { - PrintLiteralIndented("VARIABLE", node->var()->name(), false); + Visit(node->proxy()); } diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index c6e6131a71..1ba68a1667 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -1083,7 +1083,7 @@ void HeapEntry::Print( for (int i = 0; i < ch.length(); ++i) { HeapGraphEdge& edge = ch[i]; const char* edge_prefix = ""; - ScopedVector<char> index(64); + EmbeddedVector<char, 64> index; const char* edge_name = index.start(); switch (edge.type()) { case HeapGraphEdge::kContextVariable: @@ -1164,6 +1164,7 @@ class RetainedSizeCalculator { int retained_size_; }; + void HeapEntry::CalculateExactRetainedSize() { // To calculate retained size, first we paint all reachable nodes in // one color, then we paint (or re-paint) all nodes reachable from @@ -1208,11 +1209,13 @@ template <size_t ptr_size> struct SnapshotSizeConstants; template <> struct SnapshotSizeConstants<4> { static const int kExpectedHeapGraphEdgeSize = 12; static const int kExpectedHeapEntrySize = 36; + static const int kMaxSerializableSnapshotRawSize = 256 * MB; }; template <> struct SnapshotSizeConstants<8> { static const int kExpectedHeapGraphEdgeSize = 24; static const int kExpectedHeapEntrySize = 48; + static const int kMaxSerializableSnapshotRawSize = 768 * MB; }; } // namespace @@ -1241,6 +1244,7 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, } } + HeapSnapshot::~HeapSnapshot() { DeleteArray(raw_entries_); } @@ -1266,6 +1270,7 @@ static void HeapEntryClearPaint(HeapEntry** entry_ptr) { (*entry_ptr)->clear_paint(); } + void HeapSnapshot::ClearPaint() { entries_.Iterate(HeapEntryClearPaint); } @@ -1371,6 +1376,7 @@ static int SortByIds(const T* entry1_ptr, return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1; } + List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() { if (!entries_sorted_) { entries_.Sort(SortByIds); @@ -1775,9 +1781,11 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object, } else if (object->IsJSFunction()) { JSFunction* func = JSFunction::cast(object); SharedFunctionInfo* shared = func->shared(); + const char* name = shared->bound() ? "native_bind" : + collection_->names()->GetName(String::cast(shared->name())); return AddEntry(object, HeapEntry::kClosure, - collection_->names()->GetName(String::cast(shared->name())), + name, children_count, retainers_count); } else if (object->IsJSRegExp()) { @@ -2011,19 +2019,22 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) { heap_->prototype_symbol(), js_fun->prototype()); } } + SharedFunctionInfo* shared_info = js_fun->shared(); + // JSFunction has either bindings or literals and never both. + bool bound = shared_info->bound(); + TagObject(js_fun->literals_or_bindings(), + bound ? "(function bindings)" : "(function literals)"); SetInternalReference(js_fun, entry, - "shared", js_fun->shared(), + bound ? "bindings" : "literals", + js_fun->literals_or_bindings(), + JSFunction::kLiteralsOffset); + SetInternalReference(js_fun, entry, + "shared", shared_info, JSFunction::kSharedFunctionInfoOffset); TagObject(js_fun->unchecked_context(), "(context)"); SetInternalReference(js_fun, entry, "context", js_fun->unchecked_context(), JSFunction::kContextOffset); - TagObject(js_fun->literals_or_bindings(), - "(function literals_or_bindings)"); - SetInternalReference(js_fun, entry, - "literals_or_bindings", - js_fun->literals_or_bindings(), - JSFunction::kLiteralsOffset); for (int i = JSFunction::kNonWeakFieldsEndOffset; i < JSFunction::kSize; i += kPointerSize) { @@ -2126,17 +2137,6 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) { SetInternalReference(obj, entry, "line_ends", script->line_ends(), Script::kLineEndsOffset); - } else if (obj->IsDescriptorArray()) { - DescriptorArray* desc_array = DescriptorArray::cast(obj); - if (desc_array->length() > DescriptorArray::kContentArrayIndex) { - Object* content_array = - desc_array->get(DescriptorArray::kContentArrayIndex); - TagObject(content_array, "(map descriptor content)"); - SetInternalReference(obj, entry, - "content", content_array, - FixedArray::OffsetOfElementAt( - DescriptorArray::kContentArrayIndex)); - } } else if (obj->IsCodeCache()) { CodeCache* code_cache = CodeCache::cast(obj); TagObject(code_cache->default_cache(), "(default code cache)"); @@ -2162,11 +2162,27 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) { void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry) { - if (js_obj->IsJSFunction()) { - JSFunction* func = JSFunction::cast(js_obj); - Context* context = func->context(); - ScopeInfo* scope_info = context->closure()->shared()->scope_info(); - + if (!js_obj->IsJSFunction()) return; + + JSFunction* func = JSFunction::cast(js_obj); + Context* context = func->context(); + ScopeInfo* scope_info = context->closure()->shared()->scope_info(); + + if (func->shared()->bound()) { + FixedArray* bindings = func->function_bindings(); + SetNativeBindReference(js_obj, entry, "bound_this", + bindings->get(JSFunction::kBoundThisIndex)); + SetNativeBindReference(js_obj, entry, "bound_function", + bindings->get(JSFunction::kBoundFunctionIndex)); + for (int i = JSFunction::kBoundArgumentsStartIndex; + i < bindings->length(); i++) { + const char* reference_name = collection_->names()->GetFormatted( + "bound_argument_%d", + i - JSFunction::kBoundArgumentsStartIndex); + SetNativeBindReference(js_obj, entry, reference_name, + bindings->get(i)); + } + } else { // Add context allocated locals. int context_locals = scope_info->ContextLocalCount(); for (int i = 0; i < context_locals; ++i) { @@ -2444,6 +2460,22 @@ void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj, } +void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj, + HeapEntry* parent_entry, + const char* reference_name, + Object* child_obj) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + filler_->SetNamedReference(HeapGraphEdge::kShortcut, + parent_obj, + parent_entry, + reference_name, + child_obj, + child_entry); + } +} + + void V8HeapExplorer::SetElementReference(HeapObject* parent_obj, HeapEntry* parent_entry, int index, @@ -2617,7 +2649,6 @@ void V8HeapExplorer::TagObject(Object* obj, const char* tag) { !obj->IsOddball() && obj != heap_->raw_unchecked_empty_byte_array() && obj != heap_->raw_unchecked_empty_fixed_array() && - obj != heap_->raw_unchecked_empty_fixed_double_array() && obj != heap_->raw_unchecked_empty_descriptor_array()) { objects_tags_.SetTag(obj, tag); } @@ -3240,57 +3271,77 @@ void HeapSnapshotGenerator::FillReversePostorderIndexes( } -static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) { +static int Intersect(int i1, int i2, const Vector<int>& dominators) { int finger1 = i1, finger2 = i2; while (finger1 != finger2) { - while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index(); - while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index(); + while (finger1 < finger2) finger1 = dominators[finger1]; + while (finger2 < finger1) finger2 = dominators[finger2]; } return finger1; } + // The algorithm is based on the article: // K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm" // Softw. Pract. Exper. 4 (2001), pp. 1-10. bool HeapSnapshotGenerator::BuildDominatorTree( const Vector<HeapEntry*>& entries, - Vector<HeapEntry*>* dominators) { + Vector<int>* dominators) { if (entries.length() == 0) return true; const int entries_length = entries.length(), root_index = entries_length - 1; - for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL; - (*dominators)[root_index] = entries[root_index]; + static const int kNoDominator = -1; + for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator; + (*dominators)[root_index] = root_index; + + // We use time_stamps array to stamp entries with the iteration number + // when the dominance for the entry has been updated. + ScopedVector<int> time_stamps(entries_length); + for (int i = 0; i < entries_length; ++i) time_stamps[i] = -1; + Vector<HeapGraphEdge> children = entries[root_index]->children(); + for (int i = 0; i < children.length(); ++i) { + // Mark the root direct children as affected on iteration zero. + time_stamps[children[i].to()->ordered_index()] = 0; + } + int changed = 1; + int iteration = 0; const int base_progress_counter = progress_counter_; while (changed != 0) { + ++iteration; changed = 0; for (int i = root_index - 1; i >= 0; --i) { - HeapEntry* new_idom = NULL; + // If dominator of the entry has already been set to root, + // then it can't propagate any further. + if ((*dominators)[i] == root_index) continue; + // If no retainers of the entry had been updated on current + // or previous iteration, then this entry is not affected. + if (time_stamps[i] < iteration - 1) continue; + int new_idom_index = kNoDominator; Vector<HeapGraphEdge*> rets = entries[i]->retainers(); - int j = 0; - for (; j < rets.length(); ++j) { + for (int j = 0; j < rets.length(); ++j) { if (rets[j]->type() == HeapGraphEdge::kShortcut) continue; - HeapEntry* ret = rets[j]->From(); - if (dominators->at(ret->ordered_index()) != NULL) { - new_idom = ret; - break; + int ret_index = rets[j]->From()->ordered_index(); + if (dominators->at(ret_index) != kNoDominator) { + new_idom_index = new_idom_index == kNoDominator + ? ret_index + : Intersect(ret_index, new_idom_index, *dominators); + // If idom has already reached the root, it doesn't make sense + // to check other retainers. + if (new_idom_index == root_index) break; } } - for (++j; j < rets.length(); ++j) { - if (rets[j]->type() == HeapGraphEdge::kShortcut) continue; - HeapEntry* ret = rets[j]->From(); - if (dominators->at(ret->ordered_index()) != NULL) { - new_idom = entries[Intersect(ret->ordered_index(), - new_idom->ordered_index(), - *dominators)]; - } - } - if (new_idom != NULL && dominators->at(i) != new_idom) { - (*dominators)[i] = new_idom; + if (new_idom_index != kNoDominator + && dominators->at(i) != new_idom_index) { + (*dominators)[i] = new_idom_index; ++changed; + Vector<HeapGraphEdge> children = entries[i]->children(); + for (int j = 0; j < children.length(); ++j) { + time_stamps[children[j].to()->ordered_index()] = iteration; + } } } int remaining = entries_length - changed; - if (remaining < 0) remaining = 0; + ASSERT(remaining >= 0); progress_counter_ = base_progress_counter + remaining; if (!ProgressReport(true)) return false; } @@ -3302,11 +3353,11 @@ bool HeapSnapshotGenerator::SetEntriesDominators() { // This array is used for maintaining reverse postorder of nodes. ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length()); FillReversePostorderIndexes(&ordered_entries); - ScopedVector<HeapEntry*> dominators(ordered_entries.length()); + ScopedVector<int> dominators(ordered_entries.length()); if (!BuildDominatorTree(ordered_entries, &dominators)) return false; for (int i = 0; i < ordered_entries.length(); ++i) { - ASSERT(dominators[i] != NULL); - ordered_entries[i]->set_dominator(dominators[i]); + ASSERT(dominators[i] >= 0); + ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]); } return true; } @@ -3336,6 +3387,17 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() { } +template<int bytes> struct MaxDecimalDigitsIn; +template<> struct MaxDecimalDigitsIn<4> { + static const int kSigned = 11; + static const int kUnsigned = 10; +}; +template<> struct MaxDecimalDigitsIn<8> { + static const int kSigned = 20; + static const int kUnsigned = 20; +}; + + class OutputStreamWriter { public: explicit OutputStreamWriter(v8::OutputStream* stream) @@ -3385,23 +3447,34 @@ class OutputStreamWriter { private: template<typename T> void AddNumberImpl(T n, const char* format) { - ScopedVector<char> buffer(32); - int result = OS::SNPrintF(buffer, format, n); - USE(result); - ASSERT(result != -1); - AddString(buffer.start()); + // Buffer for the longest value plus trailing \0 + static const int kMaxNumberSize = + MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1; + if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) { + int result = OS::SNPrintF( + chunk_.SubVector(chunk_pos_, chunk_size_), format, n); + ASSERT(result != -1); + chunk_pos_ += result; + MaybeWriteChunk(); + } else { + EmbeddedVector<char, kMaxNumberSize> buffer; + int result = OS::SNPrintF(buffer, format, n); + USE(result); + ASSERT(result != -1); + AddString(buffer.start()); + } } void MaybeWriteChunk() { ASSERT(chunk_pos_ <= chunk_size_); if (chunk_pos_ == chunk_size_) { WriteChunk(); - chunk_pos_ = 0; } } void WriteChunk() { if (aborted_) return; if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) == v8::OutputStream::kAbort) aborted_ = true; + chunk_pos_ = 0; } v8::OutputStream* stream_; @@ -3411,15 +3484,14 @@ class OutputStreamWriter { bool aborted_; }; -const int HeapSnapshotJSONSerializer::kMaxSerializableSnapshotRawSize = - 256 * MB; void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { ASSERT(writer_ == NULL); writer_ = new OutputStreamWriter(stream); HeapSnapshot* original_snapshot = NULL; - if (snapshot_->raw_entries_size() >= kMaxSerializableSnapshotRawSize) { + if (snapshot_->raw_entries_size() >= + SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) { // The snapshot is too big. Serialize a fake snapshot. original_snapshot = snapshot_; snapshot_ = CreateFakeSnapshot(); @@ -3446,8 +3518,14 @@ HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() { snapshot_->uid()); result->AllocateEntries(2, 1, 0); HeapEntry* root = result->AddRootEntry(1); + const char* text = snapshot_->collection()->names()->GetFormatted( + "The snapshot is too big. " + "Maximum snapshot size is %d MB. " + "Actual snapshot size is %d MB.", + SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB, + (snapshot_->raw_entries_size() + MB - 1) / MB); HeapEntry* message = result->AddEntry( - HeapEntry::kString, "The snapshot is too big", 0, 4, 0, 0); + HeapEntry::kString, text, 0, 4, 0, 0); root->SetUnidirElementReference(0, 1, message); result->SetDominatorsToSelf(); return result; @@ -3512,38 +3590,39 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) { void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) { - writer_->AddCharacter(','); - writer_->AddNumber(edge->type()); - writer_->AddCharacter(','); - if (edge->type() == HeapGraphEdge::kElement + // The buffer needs space for 3 ints, 3 commas and \0 + static const int kBufferSize = + MaxDecimalDigitsIn<sizeof(int)>::kSigned * 3 + 3 + 1; // NOLINT + EmbeddedVector<char, kBufferSize> buffer; + int edge_name_or_index = edge->type() == HeapGraphEdge::kElement || edge->type() == HeapGraphEdge::kHidden - || edge->type() == HeapGraphEdge::kWeak) { - writer_->AddNumber(edge->index()); - } else { - writer_->AddNumber(GetStringId(edge->name())); - } - writer_->AddCharacter(','); - writer_->AddNumber(GetNodeId(edge->to())); + || edge->type() == HeapGraphEdge::kWeak + ? edge->index() : GetStringId(edge->name()); + int result = OS::SNPrintF(buffer, ",%d,%d,%d", + edge->type(), edge_name_or_index, GetNodeId(edge->to())); + USE(result); + ASSERT(result != -1); + writer_->AddString(buffer.start()); } void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) { - writer_->AddCharacter('\n'); - writer_->AddCharacter(','); - writer_->AddNumber(entry->type()); - writer_->AddCharacter(','); - writer_->AddNumber(GetStringId(entry->name())); - writer_->AddCharacter(','); - writer_->AddNumber(entry->id()); - writer_->AddCharacter(','); - writer_->AddNumber(entry->self_size()); - writer_->AddCharacter(','); - writer_->AddNumber(entry->RetainedSize(false)); - writer_->AddCharacter(','); - writer_->AddNumber(GetNodeId(entry->dominator())); + // The buffer needs space for 7 ints, 7 commas, \n and \0 + static const int kBufferSize = + MaxDecimalDigitsIn<sizeof(int)>::kSigned * 7 + 7 + 1 + 1; // NOLINT + EmbeddedVector<char, kBufferSize> buffer; Vector<HeapGraphEdge> children = entry->children(); - writer_->AddCharacter(','); - writer_->AddNumber(children.length()); + int result = OS::SNPrintF(buffer, "\n,%d,%d,%d,%d,%d,%d,%d", + entry->type(), + GetStringId(entry->name()), + entry->id(), + entry->self_size(), + entry->RetainedSize(false), + GetNodeId(entry->dominator()), + children.length()); + USE(result); + ASSERT(result != -1); + writer_->AddString(buffer.start()); for (int i = 0; i < children.length(); ++i) { SerializeEdge(&children[i]); if (writer_->aborted()) return; diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index a0dea588fe..13c6b2db0b 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -964,6 +964,10 @@ class V8HeapExplorer : public HeapEntriesAllocator { HeapEntry* parent, String* reference_name, Object* child); + void SetNativeBindReference(HeapObject* parent_obj, + HeapEntry* parent, + const char* reference_name, + Object* child); void SetElementReference(HeapObject* parent_obj, HeapEntry* parent, int index, @@ -1097,7 +1101,7 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { private: bool ApproximateRetainedSizes(); bool BuildDominatorTree(const Vector<HeapEntry*>& entries, - Vector<HeapEntry*>* dominators); + Vector<int>* dominators); bool CountEntriesAndReferences(); bool FillReferences(); void FillReversePostorderIndexes(Vector<HeapEntry*>* entries); diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js index 3cd467faf2..4e86c8892a 100644 --- a/deps/v8/src/proxy.js +++ b/deps/v8/src/proxy.js @@ -25,6 +25,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"use strict"; + global.Proxy = new $Object(); var $Proxy = global.Proxy diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js index 00dd7f15b7..b724f68183 100644 --- a/deps/v8/src/regexp.js +++ b/deps/v8/src/regexp.js @@ -28,7 +28,7 @@ // Expect $Object = global.Object; // Expect $Array = global.Array; -const $RegExp = global.RegExp; +var $RegExp = global.RegExp; // A recursive descent parser for Patterns according to the grammar of // ECMA-262 15.10.1, with deviations noted below. @@ -413,24 +413,26 @@ function SetUpRegExp() { // The properties input, $input, and $_ are aliases for each other. When this // value is set the value it is set to is coerced to a string. // Getter and setter for the input. - function RegExpGetInput() { + var RegExpGetInput = function() { var regExpInput = LAST_INPUT(lastMatchInfo); return IS_UNDEFINED(regExpInput) ? "" : regExpInput; - } - function RegExpSetInput(string) { + }; + var RegExpSetInput = function(string) { LAST_INPUT(lastMatchInfo) = ToString(string); - } + }; - %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE); - %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE); - %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, - DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'input', GETTER, RegExpGetInput, + DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'input', SETTER, RegExpSetInput, + DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$_', GETTER, RegExpGetInput, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$_', SETTER, RegExpSetInput, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$input', GETTER, RegExpGetInput, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$input', SETTER, RegExpSetInput, + DONT_ENUM | DONT_DELETE); // The properties multiline and $* are aliases for each other. When this // value is set in SpiderMonkey, the value it is set to is coerced to a @@ -441,52 +443,62 @@ function SetUpRegExp() { // Getter and setter for multiline. var multiline = false; - function RegExpGetMultiline() { return multiline; } - function RegExpSetMultiline(flag) { multiline = flag ? true : false; } + var RegExpGetMultiline = function() { return multiline; }; + var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; }; - %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, - DONT_DELETE); - %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, - DONT_DELETE); - %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, - DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'multiline', GETTER, + RegExpGetMultiline, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'multiline', SETTER, + RegExpSetMultiline, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$*', GETTER, RegExpGetMultiline, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$*', SETTER, RegExpSetMultiline, + DONT_ENUM | DONT_DELETE); - function NoOpSetter(ignored) {} + var NoOpSetter = function(ignored) {}; // Static properties set by a successful match. - %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, - DONT_DELETE); - %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE); - %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, - DONT_DELETE); - %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE); - %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, - DONT_DELETE); - %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE); - %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, - DONT_DELETE); - %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE); - %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, - DONT_ENUM | DONT_DELETE); - %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', GETTER, + RegExpGetLastMatch, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', SETTER, NoOpSetter, + DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$&', GETTER, RegExpGetLastMatch, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$&', SETTER, NoOpSetter, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', GETTER, + RegExpGetLastParen, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', SETTER, NoOpSetter, + DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$+', GETTER, RegExpGetLastParen, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$+', SETTER, NoOpSetter, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'leftContext', GETTER, + RegExpGetLeftContext, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'leftContext', SETTER, NoOpSetter, + DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$`', GETTER, RegExpGetLeftContext, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$`', SETTER, NoOpSetter, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'rightContext', GETTER, + RegExpGetRightContext, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, 'rightContext', SETTER, NoOpSetter, + DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, "$'", GETTER, + RegExpGetRightContext, + DONT_ENUM | DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, "$'", SETTER, NoOpSetter, + DONT_ENUM | DONT_DELETE); for (var i = 1; i < 10; ++i) { - %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), - DONT_DELETE); - %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$' + i, GETTER, + RegExpMakeCaptureGetter(i), DONT_DELETE); + %DefineOrRedefineAccessorProperty($RegExp, '$' + i, SETTER, NoOpSetter, + DONT_DELETE); } } diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc index 3e719cd3e1..8bd59d1de2 100644 --- a/deps/v8/src/runtime-profiler.cc +++ b/deps/v8/src/runtime-profiler.cc @@ -102,6 +102,25 @@ void RuntimeProfiler::GlobalSetup() { } +static void GetICCounts(JSFunction* function, + int* ic_with_typeinfo_count, + int* ic_total_count, + int* percentage) { + *ic_total_count = 0; + *ic_with_typeinfo_count = 0; + Object* raw_info = + function->shared()->code()->type_feedback_info(); + if (raw_info->IsTypeFeedbackInfo()) { + TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info); + *ic_with_typeinfo_count = info->ic_with_typeinfo_count(); + *ic_total_count = info->ic_total_count(); + } + *percentage = *ic_total_count > 0 + ? 100 * *ic_with_typeinfo_count / *ic_total_count + : 100; +} + + void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { ASSERT(function->IsOptimizable()); if (FLAG_trace_opt) { @@ -109,6 +128,11 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { function->PrintName(); PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address())); PrintF(" for recompilation, reason: %s", reason); + if (FLAG_type_info_threshold > 0) { + int typeinfo, total, percentage; + GetICCounts(function, &typeinfo, &total, &percentage); + PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage); + } PrintF("]\n"); } @@ -147,9 +171,19 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { // Get the stack check stub code object to match against. We aren't // prepared to generate it, but we don't expect to have to. - StackCheckStub check_stub; + bool found_code = false; Code* stack_check_code = NULL; - if (check_stub.FindCodeInCache(&stack_check_code)) { +#ifdef V8_TARGET_ARCH_IA32 + if (FLAG_count_based_interrupts) { + InterruptStub interrupt_stub; + found_code = interrupt_stub.FindCodeInCache(&stack_check_code); + } else // NOLINT +#endif + { // NOLINT + StackCheckStub check_stub; + found_code = check_stub.FindCodeInCache(&stack_check_code); + } + if (found_code) { Code* replacement_code = isolate_->builtins()->builtin(Builtins::kOnStackReplacement); Code* unoptimized_code = shared->code(); @@ -198,8 +232,10 @@ void RuntimeProfiler::OptimizeNow() { JSFunction* samples[kSamplerFrameCount]; int sample_count = 0; int frame_count = 0; + int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count + : kSamplerFrameCount; for (JavaScriptFrameIterator it(isolate_); - frame_count++ < kSamplerFrameCount && !it.done(); + frame_count++ < frame_count_limit && !it.done(); it.Advance()) { JavaScriptFrame* frame = it.frame(); JSFunction* function = JSFunction::cast(frame->function()); @@ -232,13 +268,34 @@ void RuntimeProfiler::OptimizeNow() { // Do not record non-optimizable functions. if (!function->IsOptimizable()) continue; + // Only record top-level code on top of the execution stack and + // avoid optimizing excessively large scripts since top-level code + // will be executed only once. + const int kMaxToplevelSourceSize = 10 * 1024; + if (function->shared()->is_toplevel() + && (frame_count > 1 + || function->shared()->SourceSize() > kMaxToplevelSourceSize)) { + continue; + } + if (FLAG_watch_ic_patching) { int ticks = function->shared()->profiler_ticks(); if (ticks >= kProfilerTicksBeforeOptimization) { - // If this particular function hasn't had any ICs patched for enough - // ticks, optimize it now. - Optimize(function, "hot and stable"); + int typeinfo, total, percentage; + GetICCounts(function, &typeinfo, &total, &percentage); + if (percentage >= FLAG_type_info_threshold) { + // If this particular function hasn't had any ICs patched for enough + // ticks, optimize it now. + Optimize(function, "hot and stable"); + } else { + if (FLAG_trace_opt_verbose) { + PrintF("[not yet optimizing "); + function->PrintName(); + PrintF(", not enough type info: %d/%d (%d%%)]\n", + typeinfo, total, percentage); + } + } } else if (!any_ic_changed_ && function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) { // If no IC was patched since the last tick and this function is very @@ -255,7 +312,7 @@ void RuntimeProfiler::OptimizeNow() { } else { function->shared()->set_profiler_ticks(ticks + 1); } - } else { // !FLAG_counting_profiler + } else { // !FLAG_watch_ic_patching samples[sample_count++] = function; int function_size = function->shared()->SourceSize(); @@ -273,7 +330,7 @@ void RuntimeProfiler::OptimizeNow() { if (FLAG_watch_ic_patching) { any_ic_changed_ = false; code_generated_ = false; - } else { // !FLAG_counting_profiler + } else { // !FLAG_watch_ic_patching // Add the collected functions as samples. It's important not to do // this as part of collecting them because this will interfere with // the sample lookup in case of recursive functions. @@ -285,6 +342,9 @@ void RuntimeProfiler::OptimizeNow() { void RuntimeProfiler::NotifyTick() { +#ifdef V8_TARGET_ARCH_IA32 + if (FLAG_count_based_interrupts) return; +#endif isolate_->stack_guard()->RequestRuntimeProfilerTick(); } @@ -303,7 +363,7 @@ void RuntimeProfiler::SetUp() { void RuntimeProfiler::Reset() { if (FLAG_watch_ic_patching) { total_code_generated_ = 0; - } else { // !FLAG_counting_profiler + } else { // !FLAG_watch_ic_patching sampler_threshold_ = kSamplerThresholdInit; sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit; sampler_ticks_until_threshold_adjustment_ = diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h index f37456653b..f7ca3f020d 100644 --- a/deps/v8/src/runtime-profiler.h +++ b/deps/v8/src/runtime-profiler.h @@ -101,6 +101,8 @@ class RuntimeProfiler { void RemoveDeadSamples(); void UpdateSamplesAfterCompact(ObjectVisitor* visitor); + void AttemptOnStackReplacement(JSFunction* function); + private: static const int kSamplerWindowSize = 16; @@ -108,8 +110,6 @@ class RuntimeProfiler { void Optimize(JSFunction* function, const char* reason); - void AttemptOnStackReplacement(JSFunction* function); - void ClearSampleBuffer(); void ClearSampleBufferNewSpaceEntries(); diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 80ea7f4b6f..9597681959 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -69,20 +69,20 @@ namespace internal { // Cast the given object to a value of the specified type and store // it in a variable with the given name. If the object is not of the // expected type call IllegalOperation and return. -#define CONVERT_CHECKED(Type, name, obj) \ - RUNTIME_ASSERT(obj->Is##Type()); \ - Type* name = Type::cast(obj); - #define CONVERT_ARG_CHECKED(Type, name, index) \ RUNTIME_ASSERT(args[index]->Is##Type()); \ + Type* name = Type::cast(args[index]); + +#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \ + RUNTIME_ASSERT(args[index]->Is##Type()); \ Handle<Type> name = args.at<Type>(index); // Cast the given object to a boolean and store it in a variable with // the given name. If the object is not a boolean call IllegalOperation // and return. -#define CONVERT_BOOLEAN_CHECKED(name, obj) \ - RUNTIME_ASSERT(obj->IsBoolean()); \ - bool name = (obj)->IsTrue(); +#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \ + RUNTIME_ASSERT(args[index]->IsBoolean()); \ + bool name = args[index]->IsTrue(); // Cast the given argument to a Smi and store its value in an int variable // with the given name. If the argument is not a Smi call IllegalOperation @@ -106,12 +106,20 @@ namespace internal { type name = NumberTo##Type(obj); +// Cast the given argument to PropertyDetails and store its value in a +// variable with the given name. If the argument is not a Smi call +// IllegalOperation and return. +#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index) \ + RUNTIME_ASSERT(args[index]->IsSmi()); \ + PropertyDetails name = PropertyDetails(Smi::cast(args[index])); + + // Assert that the given argument has a valid value for a StrictModeFlag // and store it in a StrictModeFlag variable with the given name. -#define CONVERT_STRICT_MODE_ARG(name, index) \ - ASSERT(args[index]->IsSmi()); \ - ASSERT(args.smi_at(index) == kStrictMode || \ - args.smi_at(index) == kNonStrictMode); \ +#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index) \ + RUNTIME_ASSERT(args[index]->IsSmi()); \ + RUNTIME_ASSERT(args.smi_at(index) == kStrictMode || \ + args.smi_at(index) == kNonStrictMode); \ StrictModeFlag name = \ static_cast<StrictModeFlag>(args.smi_at(index)); @@ -558,9 +566,9 @@ static Handle<Object> CreateLiteralBoilerplate( RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) { HandleScope scope(isolate); ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(FixedArray, literals, 0); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); - CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2); CONVERT_SMI_ARG_CHECKED(flags, 3); bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0; bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0; @@ -584,9 +592,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) { HandleScope scope(isolate); ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(FixedArray, literals, 0); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); - CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2); CONVERT_SMI_ARG_CHECKED(flags, 3); bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0; bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0; @@ -610,9 +618,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(FixedArray, literals, 0); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); - CONVERT_ARG_CHECKED(FixedArray, elements, 2); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2); // Check if boilerplate exists. If not, create it first. Handle<Object> boilerplate(literals->get(literals_index), isolate); @@ -630,9 +638,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(FixedArray, literals, 0); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); CONVERT_SMI_ARG_CHECKED(literals_index, 1); - CONVERT_ARG_CHECKED(FixedArray, elements, 2); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2); // Check if boilerplate exists. If not, create it first. Handle<Object> boilerplate(literals->get(literals_index), isolate); @@ -691,28 +699,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSProxy, proxy, args[0]); + CONVERT_ARG_CHECKED(JSProxy, proxy, 0); return proxy->handler(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]); + CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0); return proxy->call_trap(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]); + CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0); return proxy->construct_trap(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSProxy, proxy, args[0]); + CONVERT_ARG_CHECKED(JSProxy, proxy, 0); proxy->Fix(); return isolate->heap()->undefined_value(); } @@ -721,7 +729,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSSet, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0); holder->set_table(*table); return *holder; @@ -731,7 +739,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSSet, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); Handle<Object> key(args[1]); Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); table = ObjectHashSetAdd(table, key); @@ -743,7 +751,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSSet, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); Handle<Object> key(args[1]); Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); return isolate->heap()->ToBoolean(table->Contains(*key)); @@ -753,7 +761,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSSet, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); Handle<Object> key(args[1]); Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); table = ObjectHashSetRemove(table, key); @@ -765,7 +773,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) { RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSMap, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0); holder->set_table(*table); return *holder; @@ -775,7 +783,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) { RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSMap, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); Handle<Object> key(args[1]); return ObjectHashTable::cast(holder->table())->Lookup(*key); } @@ -784,7 +792,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) { RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSMap, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); Handle<Object> key(args[1]); Handle<Object> value(args[2]); Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); @@ -797,7 +805,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) { RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0); + CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); ASSERT(weakmap->map()->inobject_properties() == 0); Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0); weakmap->set_table(*table); @@ -809,8 +817,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) { RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0); - CONVERT_ARG_CHECKED(JSReceiver, key, 1); + CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1); return ObjectHashTable::cast(weakmap->table())->Lookup(*key); } @@ -818,8 +826,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) { RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0); - CONVERT_ARG_CHECKED(JSReceiver, key, 1); + CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1); Handle<Object> value(args[2]); Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table())); Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value); @@ -840,7 +848,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSReceiver, input_obj, args[0]); + CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0); Object* obj = input_obj; // We don't expect access checks to be needed on JSProxy objects. ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject()); @@ -1009,8 +1017,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) { Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE); Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms); LookupResult result(isolate); - CONVERT_ARG_CHECKED(JSObject, obj, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); // This could be an element. uint32_t index; @@ -1147,14 +1155,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSObject, obj, args[0]); + CONVERT_ARG_CHECKED(JSObject, obj, 0); return obj->PreventExtensions(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSObject, obj, args[0]); + CONVERT_ARG_CHECKED(JSObject, obj, 0); if (obj->IsJSGlobalProxy()) { Object* proto = obj->GetPrototype(); if (proto->IsNull()) return isolate->heap()->false_value(); @@ -1168,9 +1176,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) { RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSRegExp, re, 0); - CONVERT_ARG_CHECKED(String, pattern, 1); - CONVERT_ARG_CHECKED(String, flags, 2); + CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0); + CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1); + CONVERT_ARG_HANDLE_CHECKED(String, flags, 2); Handle<Object> result = RegExpImpl::Compile(re, pattern, flags); if (result.is_null()) return Failure::Exception(); return *result; @@ -1180,7 +1188,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0); + CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0); return *isolate->factory()->CreateApiFunction(data); } @@ -1195,9 +1203,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) { ASSERT(args.length() == 2); - CONVERT_CHECKED(HeapObject, templ, args[0]); - CONVERT_CHECKED(Smi, field, args[1]); - int index = field->value(); + CONVERT_ARG_CHECKED(HeapObject, templ, 0); + CONVERT_SMI_ARG_CHECKED(index, 1) int offset = index * kPointerSize + HeapObject::kHeaderSize; InstanceType type = templ->map()->instance_type(); RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE || @@ -1214,7 +1221,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) { ASSERT(args.length() == 1); - CONVERT_CHECKED(HeapObject, object, args[0]); + CONVERT_ARG_CHECKED(HeapObject, object, 0); Map* old_map = object->map(); bool needs_access_checks = old_map->is_access_check_needed(); if (needs_access_checks) { @@ -1233,7 +1240,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) { RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) { ASSERT(args.length() == 1); - CONVERT_CHECKED(HeapObject, object, args[0]); + CONVERT_ARG_CHECKED(HeapObject, object, 0); Map* old_map = object->map(); if (!old_map->is_access_check_needed()) { // Copy map so it won't interfere constructor's initial map. @@ -1269,7 +1276,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { isolate->context()->global()); Handle<Context> context = args.at<Context>(0); - CONVERT_ARG_CHECKED(FixedArray, pairs, 1); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1); CONVERT_SMI_ARG_CHECKED(flags, 2); // Traverse the name/value pairs and set the properties. @@ -1471,7 +1478,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { RUNTIME_ASSERT(args.length() == 2 || args.length() == 3); bool assign = args.length() == 3; - CONVERT_ARG_CHECKED(String, name, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 0); GlobalObject* global = isolate->context()->global(); RUNTIME_ASSERT(args[1]->IsSmi()); CONVERT_LANGUAGE_MODE_ARG(language_mode, 1); @@ -1528,7 +1535,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) { // of the constant is the first argument and the initial value // is the second. RUNTIME_ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(String, name, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 0); Handle<Object> value = args.at<Object>(1); // Get the current global object from top. @@ -1700,7 +1707,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeObjectForAddingMultipleProperties) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_SMI_ARG_CHECKED(properties, 1); if (object->HasFastProperties()) { JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties); @@ -1712,12 +1719,12 @@ RUNTIME_FUNCTION(MaybeObject*, RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) { HandleScope scope(isolate); ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(JSRegExp, regexp, 0); - CONVERT_ARG_CHECKED(String, subject, 1); + CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 1); // Due to the way the JS calls are constructed this must be less than the // length of a string, i.e. it is always a Smi. We check anyway for security. CONVERT_SMI_ARG_CHECKED(index, 2); - CONVERT_ARG_CHECKED(JSArray, last_match_info, 3); + CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3); RUNTIME_ASSERT(last_match_info->HasFastElements()); RUNTIME_ASSERT(index >= 0); RUNTIME_ASSERT(index <= subject->length()); @@ -1769,8 +1776,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) { RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { AssertNoAllocation no_alloc; ASSERT(args.length() == 5); - CONVERT_CHECKED(JSRegExp, regexp, args[0]); - CONVERT_CHECKED(String, source, args[1]); + CONVERT_ARG_CHECKED(JSRegExp, regexp, 0); + CONVERT_ARG_CHECKED(String, source, 1); Object* global = args[2]; if (!global->IsTrue()) global = isolate->heap()->false_value(); @@ -1838,7 +1845,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSArray, prototype, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0); // This is necessary to enable fast checks for absence of elements // on Array.prototype and below. prototype->set_elements(isolate->heap()->empty_fixed_array()); @@ -1867,7 +1874,7 @@ static Handle<JSFunction> InstallBuiltin(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, holder, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, holder, 0); InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop); InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush); @@ -1883,7 +1890,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSReceiver, callable, args[0]); + CONVERT_ARG_CHECKED(JSReceiver, callable, 0); if (!callable->IsJSFunction()) { HandleScope scope(isolate); @@ -1911,7 +1918,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) { RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) { HandleScope scope(isolate); ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(FixedArray, literals, 0); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); int index = args.smi_at(1); Handle<String> pattern = args.at<String>(2); Handle<String> flags = args.at<String>(3); @@ -1942,7 +1949,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); return f->shared()->name(); } @@ -1951,8 +1958,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSFunction, f, args[0]); - CONVERT_CHECKED(String, name, args[1]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); + CONVERT_ARG_CHECKED(String, name, 1); f->shared()->set_name(name); return isolate->heap()->undefined_value(); } @@ -1961,7 +1968,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean( f->shared()->name_should_print_as_anonymous()); } @@ -1970,7 +1977,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); f->shared()->set_name_should_print_as_anonymous(true); return isolate->heap()->undefined_value(); } @@ -1980,7 +1987,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); Object* obj = f->RemovePrototype(); if (obj->IsFailure()) return obj; @@ -1992,7 +1999,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, fun, args[0]); + CONVERT_ARG_CHECKED(JSFunction, fun, 0); Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate); if (!script->IsScript()) return isolate->heap()->undefined_value(); @@ -2004,7 +2011,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, f, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0); Handle<SharedFunctionInfo> shared(f->shared()); return *shared->GetSourceCode(); } @@ -2014,7 +2021,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, fun, args[0]); + CONVERT_ARG_CHECKED(JSFunction, fun, 0); int pos = fun->shared()->start_position(); return Smi::FromInt(pos); } @@ -2023,7 +2030,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) { ASSERT(args.length() == 2); - CONVERT_CHECKED(Code, code, args[0]); + CONVERT_ARG_CHECKED(Code, code, 0); CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]); RUNTIME_ASSERT(0 <= offset && offset < code->Size()); @@ -2037,8 +2044,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSFunction, fun, args[0]); - CONVERT_CHECKED(String, name, args[1]); + CONVERT_ARG_CHECKED(JSFunction, fun, 0); + CONVERT_ARG_CHECKED(String, name, 1); fun->SetInstanceClassName(name); return isolate->heap()->undefined_value(); } @@ -2048,10 +2055,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSFunction, fun, args[0]); - CONVERT_CHECKED(Smi, length, args[1]); - fun->shared()->set_length(length->value()); - return length; + CONVERT_ARG_CHECKED(JSFunction, fun, 0); + CONVERT_SMI_ARG_CHECKED(length, 1); + fun->shared()->set_length(length); + return isolate->heap()->undefined_value(); } @@ -2059,7 +2066,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSFunction, fun, args[0]); + CONVERT_ARG_CHECKED(JSFunction, fun, 0); ASSERT(fun->should_have_prototype()); Object* obj; { MaybeObject* maybe_obj = @@ -2073,7 +2080,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) { NoHandleAllocation ha; RUNTIME_ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, function, args[0]); + CONVERT_ARG_CHECKED(JSFunction, function, 0); MaybeObject* maybe_name = isolate->heap()->AllocateStringFromAscii(CStrVector("prototype")); @@ -2129,7 +2136,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean(f->shared()->IsApiFunction()); } @@ -2138,7 +2145,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean(f->IsBuiltin()); } @@ -2147,7 +2154,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSFunction, target, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0); Handle<Object> code = args.at<Object>(1); Handle<Context> context(target->context()); @@ -2211,7 +2218,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_SMI_ARG_CHECKED(num, 1); RUNTIME_ASSERT(num >= 0); SetExpectedNofProperties(function, num); @@ -2235,7 +2242,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(String, subject, args[0]); + CONVERT_ARG_CHECKED(String, subject, 0); Object* index = args[1]; RUNTIME_ASSERT(index->IsNumber()); @@ -3212,7 +3219,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) { ASSERT(args.length() == 4); - CONVERT_CHECKED(String, subject, args[0]); + CONVERT_ARG_CHECKED(String, subject, 0); if (!subject->IsFlat()) { Object* flat_subject; { MaybeObject* maybe_flat_subject = subject->TryFlatten(); @@ -3223,7 +3230,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) { subject = String::cast(flat_subject); } - CONVERT_CHECKED(String, replacement, args[2]); + CONVERT_ARG_CHECKED(String, replacement, 2); if (!replacement->IsFlat()) { Object* flat_replacement; { MaybeObject* maybe_flat_replacement = replacement->TryFlatten(); @@ -3234,8 +3241,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) { replacement = String::cast(flat_replacement); } - CONVERT_CHECKED(JSRegExp, regexp, args[1]); - CONVERT_CHECKED(JSArray, last_match_info, args[3]); + CONVERT_ARG_CHECKED(JSRegExp, regexp, 1); + CONVERT_ARG_CHECKED(JSArray, last_match_info, 3); ASSERT(last_match_info->HasFastElements()); @@ -3305,9 +3312,9 @@ Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) { ASSERT(args.length() == 3); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(String, subject, 0); - CONVERT_ARG_CHECKED(String, search, 1); - CONVERT_ARG_CHECKED(String, replace, 2); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); + CONVERT_ARG_HANDLE_CHECKED(String, search, 1); + CONVERT_ARG_HANDLE_CHECKED(String, replace, 2); // If the cons string tree is too deep, we simply abort the recursion and // retry with a flattened subject string. @@ -3386,8 +3393,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) { HandleScope scope(isolate); // create a new handle scope ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(String, sub, 0); - CONVERT_ARG_CHECKED(String, pat, 1); + CONVERT_ARG_HANDLE_CHECKED(String, sub, 0); + CONVERT_ARG_HANDLE_CHECKED(String, pat, 1); Object* index = args[2]; uint32_t start_index; @@ -3438,8 +3445,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) { HandleScope scope(isolate); // create a new handle scope ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(String, sub, 0); - CONVERT_ARG_CHECKED(String, pat, 1); + CONVERT_ARG_HANDLE_CHECKED(String, sub, 0); + CONVERT_ARG_HANDLE_CHECKED(String, pat, 1); Object* index = args[2]; uint32_t start_index; @@ -3497,8 +3504,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(String, str1, args[0]); - CONVERT_CHECKED(String, str2, args[1]); + CONVERT_ARG_CHECKED(String, str1, 0); + CONVERT_ARG_CHECKED(String, str2, 1); if (str1 == str2) return Smi::FromInt(0); // Equal. int str1_length = str1->length(); @@ -3545,7 +3552,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) { NoHandleAllocation ha; ASSERT(args.length() == 3); - CONVERT_CHECKED(String, value, args[0]); + CONVERT_ARG_CHECKED(String, value, 0); int start, end; // We have a fast integer-only case here to avoid a conversion to double in // the common case where from and to are Smis. @@ -3571,9 +3578,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) { ASSERT_EQ(3, args.length()); - CONVERT_ARG_CHECKED(String, subject, 0); - CONVERT_ARG_CHECKED(JSRegExp, regexp, 1); - CONVERT_ARG_CHECKED(JSArray, regexp_info, 2); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); + CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1); + CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2); HandleScope handles; Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info); @@ -3964,11 +3971,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { ASSERT(args.length() == 4); HandleScope handles(isolate); - CONVERT_ARG_CHECKED(String, subject, 1); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 1); if (!subject->IsFlat()) FlattenString(subject); - CONVERT_ARG_CHECKED(JSRegExp, regexp, 0); - CONVERT_ARG_CHECKED(JSArray, last_match_info, 2); - CONVERT_ARG_CHECKED(JSArray, result_array, 3); + CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2); + CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3); ASSERT(last_match_info->HasFastElements()); ASSERT(regexp->GetFlags().is_global()); @@ -4317,19 +4324,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) { ASSERT(args.length() == 5); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSObject, obj, 0); - CONVERT_CHECKED(String, name, args[1]); - CONVERT_CHECKED(Smi, flag_setter, args[2]); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_SMI_ARG_CHECKED(flag_setter, 2); Object* fun = args[3]; - CONVERT_CHECKED(Smi, flag_attr, args[4]); + CONVERT_SMI_ARG_CHECKED(unchecked, 4); - int unchecked = flag_attr->value(); RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked); RUNTIME_ASSERT(!obj->IsNull()); RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined()); - return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr); + return obj->DefineAccessor(name, flag_setter == 0, fun, attr); } // Implements part of 8.12.9 DefineOwnProperty. @@ -4341,12 +4347,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) { ASSERT(args.length() == 4); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSObject, js_object, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); Handle<Object> obj_value = args.at<Object>(2); - CONVERT_CHECKED(Smi, flag, args[3]); + CONVERT_SMI_ARG_CHECKED(unchecked, 3); - int unchecked = flag->value(); RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked); @@ -4667,7 +4672,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) { StrictModeFlag strict_mode = kNonStrictMode; if (args.length() == 5) { - CONVERT_STRICT_MODE_ARG(strict_mode_flag, 4); + CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4); strict_mode = strict_mode_flag; } @@ -4715,10 +4720,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) { RUNTIME_ASSERT(args.length() == 5); - CONVERT_ARG_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_SMI_ARG_CHECKED(store_index, 1); Handle<Object> value = args.at<Object>(2); - CONVERT_ARG_CHECKED(FixedArray, literals, 3); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3); CONVERT_SMI_ARG_CHECKED(literal_index, 4); HandleScope scope; @@ -4758,13 +4763,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) { NoHandleAllocation ha; RUNTIME_ASSERT(args.length() == 3 || args.length() == 4); - CONVERT_CHECKED(JSObject, object, args[0]); - CONVERT_CHECKED(String, name, args[1]); + CONVERT_ARG_CHECKED(JSObject, object, 0); + CONVERT_ARG_CHECKED(String, name, 1); // Compute attributes. PropertyAttributes attributes = NONE; if (args.length() == 4) { - CONVERT_CHECKED(Smi, value_obj, args[3]); - int unchecked_value = value_obj->value(); + CONVERT_SMI_ARG_CHECKED(unchecked_value, 3); // Only attribute bits should be set. RUNTIME_ASSERT( (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); @@ -4780,9 +4784,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) { NoHandleAllocation ha; ASSERT(args.length() == 3); - CONVERT_CHECKED(JSReceiver, object, args[0]); - CONVERT_CHECKED(String, key, args[1]); - CONVERT_STRICT_MODE_ARG(strict_mode, 2); + CONVERT_ARG_CHECKED(JSReceiver, object, 0); + CONVERT_ARG_CHECKED(String, key, 1); + CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2); return object->DeleteProperty(key, (strict_mode == kStrictMode) ? JSReceiver::STRICT_DELETION : JSReceiver::NORMAL_DELETION); @@ -4810,7 +4814,7 @@ static Object* HasLocalPropertyImplementation(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(String, key, args[1]); + CONVERT_ARG_CHECKED(String, key, 1); uint32_t index; const bool key_is_array_index = key->AsArrayIndex(&index); @@ -4848,8 +4852,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { NoHandleAllocation na; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSReceiver, receiver, args[0]); - CONVERT_CHECKED(String, key, args[1]); + CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); + CONVERT_ARG_CHECKED(String, key, 1); bool result = receiver->HasProperty(key); if (isolate->has_pending_exception()) return Failure::Exception(); @@ -4860,10 +4864,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) { NoHandleAllocation na; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSReceiver, receiver, args[0]); - CONVERT_CHECKED(Smi, index, args[1]); + CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); + CONVERT_SMI_ARG_CHECKED(index, 1); - bool result = receiver->HasElement(index->value()); + bool result = receiver->HasElement(index); if (isolate->has_pending_exception()) return Failure::Exception(); return isolate->heap()->ToBoolean(result); } @@ -4873,8 +4877,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(JSObject, object, args[0]); - CONVERT_CHECKED(String, key, args[1]); + CONVERT_ARG_CHECKED(JSObject, object, 0); + CONVERT_ARG_CHECKED(String, key, 1); uint32_t index; if (key->AsArrayIndex(&index)) { @@ -4919,7 +4923,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSReceiver, object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0); bool threw = false; Handle<JSArray> result = GetKeysFor(object, &threw); if (threw) return Failure::Exception(); @@ -4935,7 +4939,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSReceiver, raw_object, args[0]); + CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0); if (raw_object->IsSimpleEnum()) return raw_object->map(); @@ -4976,7 +4980,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) { if (!args[0]->IsJSObject()) { return isolate->heap()->undefined_value(); } - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); // Skip the global proxy as it has no properties and always delegates to the // real global object. @@ -5063,7 +5067,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) { if (!args[0]->IsJSObject()) { return isolate->heap()->undefined_value(); } - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE)); Handle<FixedArray> names = isolate->factory()->NewFixedArray(n); @@ -5080,7 +5084,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) { if (!args[0]->IsJSObject()) { return Smi::FromInt(0); } - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); int result = 0; if (obj->HasNamedInterceptor()) result |= 2; @@ -5095,7 +5099,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); if (obj->HasNamedInterceptor()) { v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj); @@ -5110,7 +5114,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); if (obj->HasIndexedInterceptor()) { v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj); @@ -5122,7 +5126,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) { ASSERT_EQ(args.length(), 1); - CONVERT_CHECKED(JSObject, raw_object, args[0]); + CONVERT_ARG_CHECKED(JSObject, raw_object, 0); HandleScope scope(isolate); Handle<JSObject> object(raw_object); @@ -5314,7 +5318,7 @@ static int ParseDecimalInteger(const char*s, int from, int to) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(String, subject, args[0]); + CONVERT_ARG_CHECKED(String, subject, 0); subject->TryFlatten(); // Fast case: short integer or some sorts of junk values. @@ -5370,7 +5374,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSArray, codes, args[0]); + CONVERT_ARG_CHECKED(JSArray, codes, 0); int length = Smi::cast(codes->length())->value(); // Check if the string can be ASCII. @@ -5450,7 +5454,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) { const char hex_chars[] = "0123456789ABCDEF"; NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(String, source, args[0]); + CONVERT_ARG_CHECKED(String, source, 0); source->TryFlatten(); @@ -5568,7 +5572,7 @@ static inline int Unescape(String* source, RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(String, source, args[0]); + CONVERT_ARG_CHECKED(String, source, 0); source->TryFlatten(); @@ -5825,7 +5829,7 @@ static MaybeObject* QuoteJsonString(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) { NoHandleAllocation ha; - CONVERT_CHECKED(String, str, args[0]); + CONVERT_ARG_CHECKED(String, str, 0); if (!str->IsFlat()) { MaybeObject* try_flatten = str->TryFlatten(); Object* flat; @@ -5849,7 +5853,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) { RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) { NoHandleAllocation ha; - CONVERT_CHECKED(String, str, args[0]); + CONVERT_ARG_CHECKED(String, str, 0); if (!str->IsFlat()) { MaybeObject* try_flatten = str->TryFlatten(); Object* flat; @@ -5926,7 +5930,7 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSArray, array, args[0]); + CONVERT_ARG_CHECKED(JSArray, array, 0); if (!array->HasFastElements()) return isolate->heap()->undefined_value(); FixedArray* elements = FixedArray::cast(array->elements()); @@ -5968,7 +5972,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) { NoHandleAllocation ha; - CONVERT_CHECKED(String, s, args[0]); + CONVERT_ARG_CHECKED(String, s, 0); CONVERT_SMI_ARG_CHECKED(radix, 1); s->TryFlatten(); @@ -5981,7 +5985,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) { NoHandleAllocation ha; - CONVERT_CHECKED(String, str, args[0]); + CONVERT_ARG_CHECKED(String, str, 0); // ECMA-262 section 15.1.2.3, empty string is NaN double value = StringToDouble(isolate->unicode_cache(), @@ -6230,7 +6234,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase( Isolate* isolate, unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) { NoHandleAllocation ha; - CONVERT_CHECKED(String, s, args[0]); + CONVERT_ARG_CHECKED(String, s, 0); s = s->TryFlattenGetString(); const int length = s->length(); @@ -6292,9 +6296,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) { NoHandleAllocation ha; ASSERT(args.length() == 3); - CONVERT_CHECKED(String, s, args[0]); - CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]); - CONVERT_BOOLEAN_CHECKED(trimRight, args[2]); + CONVERT_ARG_CHECKED(String, s, 0); + CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1); + CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2); s->TryFlatten(); int length = s->length(); @@ -6319,8 +6323,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { ASSERT(args.length() == 3); HandleScope handle_scope(isolate); - CONVERT_ARG_CHECKED(String, subject, 0); - CONVERT_ARG_CHECKED(String, pattern, 1); + CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); + CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]); int subject_length = subject->length(); @@ -6441,7 +6445,7 @@ static int CopyCachedAsciiCharsToArray(Heap* heap, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(String, s, 0); + CONVERT_ARG_HANDLE_CHECKED(String, s, 0); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]); s = FlattenGetString(s); @@ -6492,7 +6496,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(String, value, args[0]); + CONVERT_ARG_CHECKED(String, value, 0); return value->ToObject(); } @@ -6683,8 +6687,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(String, str1, args[0]); - CONVERT_CHECKED(String, str2, args[1]); + CONVERT_ARG_CHECKED(String, str1, 0); + CONVERT_ARG_CHECKED(String, str2, 1); isolate->counters()->string_add_runtime()->Increment(); return isolate->heap()->AllocateConsString(str1, str2); } @@ -6732,13 +6736,13 @@ static inline void StringBuilderConcatHelper(String* special, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { NoHandleAllocation ha; ASSERT(args.length() == 3); - CONVERT_CHECKED(JSArray, array, args[0]); + CONVERT_ARG_CHECKED(JSArray, array, 0); if (!args[1]->IsSmi()) { isolate->context()->mark_out_of_memory(); return Failure::OutOfMemoryException(); } int array_length = args.smi_at(1); - CONVERT_CHECKED(String, special, args[2]); + CONVERT_ARG_CHECKED(String, special, 2); // This assumption is used by the slice encoding in one or two smis. ASSERT(Smi::kMaxValue >= String::kMaxLength); @@ -6848,13 +6852,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) { NoHandleAllocation ha; ASSERT(args.length() == 3); - CONVERT_CHECKED(JSArray, array, args[0]); + CONVERT_ARG_CHECKED(JSArray, array, 0); if (!args[1]->IsSmi()) { isolate->context()->mark_out_of_memory(); return Failure::OutOfMemoryException(); } int array_length = args.smi_at(1); - CONVERT_CHECKED(String, separator, args[2]); + CONVERT_ARG_CHECKED(String, separator, 2); if (!array->HasFastElements()) { return isolate->Throw(isolate->heap()->illegal_argument_symbol()); @@ -6972,11 +6976,11 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements, RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { NoHandleAllocation ha; ASSERT(args.length() == 3); - CONVERT_CHECKED(JSArray, elements_array, args[0]); + CONVERT_ARG_CHECKED(JSArray, elements_array, 0); RUNTIME_ASSERT(elements_array->HasFastElements() || elements_array->HasFastSmiOnlyElements()); CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]); - CONVERT_CHECKED(String, separator, args[2]); + CONVERT_ARG_CHECKED(String, separator, 2); // elements_array is fast-mode JSarray of alternating positions // (increasing order) and strings. // array_length is length of original array (used to add separators); @@ -6998,7 +7002,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { FixedArray* elements = FixedArray::cast(elements_array->elements()); for (int i = 0; i < elements_length; i += 2) { RUNTIME_ASSERT(elements->get(i)->IsNumber()); - CONVERT_CHECKED(String, string, elements->get(i + 1)); + RUNTIME_ASSERT(elements->get(i + 1)->IsString()); + String* string = String::cast(elements->get(i + 1)); int length = string->length(); if (is_ascii && !string->IsAsciiRepresentation()) { is_ascii = false; @@ -7156,8 +7161,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(String, x, args[0]); - CONVERT_CHECKED(String, y, args[1]); + CONVERT_ARG_CHECKED(String, x, 0); + CONVERT_ARG_CHECKED(String, y, 1); bool not_equal = !x->Equals(y); // This is slightly convoluted because the value that signifies @@ -7188,12 +7193,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) { NoHandleAllocation ha; ASSERT(args.length() == 2); - - // Extract the integer values from the Smis. - CONVERT_CHECKED(Smi, x, args[0]); - CONVERT_CHECKED(Smi, y, args[1]); - int x_value = x->value(); - int y_value = y->value(); + CONVERT_SMI_ARG_CHECKED(x_value, 0); + CONVERT_SMI_ARG_CHECKED(y_value, 1); // If the integers are equal so are the string representations. if (x_value == y_value) return Smi::FromInt(EQUAL); @@ -7333,8 +7334,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) { NoHandleAllocation ha; ASSERT(args.length() == 2); - CONVERT_CHECKED(String, x, args[0]); - CONVERT_CHECKED(String, y, args[1]); + CONVERT_ARG_CHECKED(String, x, 0); + CONVERT_ARG_CHECKED(String, y, 1); isolate->counters()->string_compare_runtime()->Increment(); @@ -7941,7 +7942,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) { ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(t, 0); - CONVERT_CHECKED(JSArray, res_array, args[1]); + CONVERT_ARG_CHECKED(JSArray, res_array, 1); int year, month, day; DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day); @@ -8096,9 +8097,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(Context, context, 0); - CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1); - CONVERT_BOOLEAN_CHECKED(pretenure, args[2]); + CONVERT_ARG_HANDLE_CHECKED(Context, context, 0); + CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1); + CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2); // The caller ensures that we pretenure closures that are assigned // directly to properties. @@ -8164,7 +8165,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments( RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) { HandleScope scope(isolate); ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(JSFunction, bound_function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0); RUNTIME_ASSERT(args[3]->IsNumber()); Handle<Object> bindee = args.at<Object>(1); @@ -8222,7 +8223,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) { RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) { HandleScope handles(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSReceiver, callable, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0); if (callable->IsJSFunction()) { Handle<JSFunction> function = Handle<JSFunction>::cast(callable); if (function->shared()->bound()) { @@ -8239,7 +8240,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) { HandleScope scope(isolate); ASSERT(args.length() == 1); // First argument is a function to use as a constructor. - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); RUNTIME_ASSERT(function->shared()->bound()); // The argument is a bound function. Extract its bound arguments @@ -8380,7 +8381,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); function->shared()->CompleteInobjectSlackTracking(); TrySettingInlineConstructStub(isolate, function); @@ -8569,7 +8570,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); if (!function->IsOptimized()) return isolate->heap()->undefined_value(); Deoptimizer::DeoptimizeFunction(*function); @@ -8589,10 +8590,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) { RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) { HandleScope scope(isolate); - ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + RUNTIME_ASSERT(args.length() == 1 || args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + if (!function->IsOptimizable()) return isolate->heap()->undefined_value(); function->MarkForLazyRecompilation(); + + Code* unoptimized = function->shared()->code(); + if (args.length() == 2 && + unoptimized->kind() == Code::FUNCTION) { + CONVERT_ARG_HANDLE_CHECKED(String, type, 1); + CHECK(type->IsEqualTo(CStrVector("osr"))); + isolate->runtime_profiler()->AttemptOnStackReplacement(*function); + unoptimized->set_allow_osr_at_loop_nesting_level( + Code::kMaxLoopNestingMarker); + } + return isolate->heap()->undefined_value(); } @@ -8608,7 +8621,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { if (FLAG_always_opt) { return Smi::FromInt(3); // 3 == "always". } - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes". : Smi::FromInt(2); // 2 == "no". } @@ -8617,7 +8630,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); return Smi::FromInt(function->shared()->opt_count()); } @@ -8625,7 +8638,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); // We're not prepared to handle a function with arguments object. ASSERT(!function->shared()->uses_arguments()); @@ -8711,8 +8724,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { function->PrintName(); PrintF("]\n"); } - StackCheckStub check_stub; - Handle<Code> check_code = check_stub.GetCode(); + Handle<Code> check_code; +#ifdef V8_TARGET_ARCH_IA32 + if (FLAG_count_based_interrupts) { + InterruptStub interrupt_stub; + check_code = interrupt_stub.GetCode(); + } else // NOLINT +#endif + { // NOLINT + StackCheckStub check_stub; + check_code = check_stub.GetCode(); + } Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement(); Deoptimizer::RevertStackCheckCode(*unoptimized, *check_code, @@ -8745,9 +8767,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) { HandleScope scope(isolate); ASSERT(args.length() >= 2); - CONVERT_CHECKED(JSReceiver, fun, args[args.length() - 1]); - Object* receiver = args[0]; int argc = args.length() - 2; + CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1); + Object* receiver = args[0]; // If there are too many arguments, allocate argv via malloc. const int argv_small_size = 10; @@ -8781,9 +8803,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) { HandleScope scope(isolate); ASSERT(args.length() == 5); - CONVERT_ARG_CHECKED(JSReceiver, fun, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0); Handle<Object> receiver = args.at<Object>(1); - CONVERT_ARG_CHECKED(JSObject, arguments, 2); + CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2); CONVERT_SMI_ARG_CHECKED(offset, 3); CONVERT_SMI_ARG_CHECKED(argc, 4); ASSERT(offset >= 0); @@ -8833,7 +8855,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, function, args[0]); + CONVERT_ARG_CHECKED(JSFunction, function, 0); int length = function->shared()->scope_info()->ContextLength(); Object* result; { MaybeObject* maybe_result = @@ -8945,8 +8967,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(Context, context, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Context, context, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); int index; PropertyAttributes attributes; @@ -9138,8 +9160,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) { ASSERT(args.length() == 4); Handle<Object> value(args[0], isolate); - CONVERT_ARG_CHECKED(Context, context, 1); - CONVERT_ARG_CHECKED(String, name, 2); + CONVERT_ARG_HANDLE_CHECKED(Context, context, 1); + CONVERT_ARG_HANDLE_CHECKED(String, name, 2); CONVERT_LANGUAGE_MODE_ARG(language_mode, 3); StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE) ? kNonStrictMode : kStrictMode; @@ -9266,6 +9288,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) { + ASSERT(args.length() == 0); + return Execution::HandleStackGuardInterrupt(); +} + + static int StackSize() { int n = 0; for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++; @@ -9366,10 +9394,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(String, str, 0); + CONVERT_ARG_HANDLE_CHECKED(String, str, 0); FlattenString(str); - CONVERT_ARG_CHECKED(JSArray, output, 1); + CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1); MaybeObject* maybe_result_array = output->EnsureCanContainHeapObjectElements(); @@ -9439,7 +9467,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) { HandleScope scope(isolate); ASSERT_EQ(1, args.length()); - CONVERT_ARG_CHECKED(String, source, 0); + CONVERT_ARG_HANDLE_CHECKED(String, source, 0); source = Handle<String>(source->TryFlattenGetString()); // Optimized fast case where we only have ASCII characters. @@ -9478,7 +9506,7 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) { HandleScope scope(isolate); ASSERT_EQ(1, args.length()); - CONVERT_ARG_CHECKED(String, source, 0); + CONVERT_ARG_HANDLE_CHECKED(String, source, 0); // Extract global context. Handle<Context> context(isolate->context()->global_context()); @@ -9569,7 +9597,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) { // as specified in ECMA262, 15.3.5.2. HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, func, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); Handle<Map> map = func->shared()->is_classic_mode() ? isolate->function_instance_map() @@ -9587,7 +9615,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) { // Use as fallback for allocation in generated code when NewSpace // is full. ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(Smi, size_smi, 0); + CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0); int size = size_smi->value(); RUNTIME_ASSERT(IsAligned(size, kPointerSize)); RUNTIME_ASSERT(size > 0); @@ -9609,8 +9637,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) { // false otherwise. RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) { ASSERT(args.length() == 2); - CONVERT_CHECKED(JSArray, array, args[0]); - CONVERT_CHECKED(JSObject, element, args[1]); + CONVERT_ARG_CHECKED(JSArray, array, 0); + CONVERT_ARG_CHECKED(JSObject, element, 1); RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements()); int length = Smi::cast(array->length())->value(); FixedArray* elements = FixedArray::cast(array->elements()); @@ -10101,7 +10129,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { ASSERT(args.length() == 1); HandleScope handle_scope(isolate); - CONVERT_ARG_CHECKED(JSArray, arguments, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0); int argument_count = static_cast<int>(arguments->length()->Number()); RUNTIME_ASSERT(arguments->HasFastElements()); Handle<FixedArray> elements(FixedArray::cast(arguments->elements())); @@ -10196,7 +10224,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(String, string, args[0]); + CONVERT_ARG_CHECKED(String, string, 0); StringInputBuffer buffer(string); while (buffer.has_more()) { uint16_t character = buffer.GetNext(); @@ -10212,7 +10240,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) { // Returns the number of non-undefined elements collected. RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) { ASSERT(args.length() == 2); - CONVERT_CHECKED(JSObject, object, args[0]); + CONVERT_ARG_CHECKED(JSObject, object, 0); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]); return object->PrepareElementsForSort(limit); } @@ -10221,8 +10249,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) { // Move contents of argument 0 (an array) to argument 1 (an array) RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { ASSERT(args.length() == 2); - CONVERT_CHECKED(JSArray, from, args[0]); - CONVERT_CHECKED(JSArray, to, args[1]); + CONVERT_ARG_CHECKED(JSArray, from, 0); + CONVERT_ARG_CHECKED(JSArray, to, 1); FixedArrayBase* new_elements = from->elements(); MaybeObject* maybe_new_map; ElementsKind elements_kind; @@ -10253,7 +10281,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { // How many elements does this object/array have? RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSObject, object, args[0]); + CONVERT_ARG_CHECKED(JSObject, object, 0); HeapObject* elements = object->elements(); if (elements->IsDictionary()) { int result = SeededNumberDictionary::cast(elements)->NumberOfElements(); @@ -10271,7 +10299,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) { ASSERT_EQ(3, args.length()); - CONVERT_ARG_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); Handle<Object> key1 = args.at<Object>(1); Handle<Object> key2 = args.at<Object>(2); @@ -10304,7 +10332,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSObject, array, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0); CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]); if (array->elements()->IsDictionary()) { // Create an array and get all the keys into it, then remove all the @@ -10343,37 +10371,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { } -// DefineAccessor takes an optional final argument which is the -// property attributes (e.g. DONT_ENUM, DONT_DELETE). IMPORTANT: due -// to the way accessors are implemented, it is set for both the getter -// and setter on the first call to DefineAccessor and ignored on -// subsequent calls. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) { - RUNTIME_ASSERT(args.length() == 4 || args.length() == 5); - // Compute attributes. - PropertyAttributes attributes = NONE; - if (args.length() == 5) { - CONVERT_CHECKED(Smi, attrs, args[4]); - int value = attrs->value(); - // Only attribute bits should be set. - ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); - attributes = static_cast<PropertyAttributes>(value); - } - - CONVERT_CHECKED(JSObject, obj, args[0]); - CONVERT_CHECKED(String, name, args[1]); - CONVERT_CHECKED(Smi, flag, args[2]); - CONVERT_CHECKED(JSFunction, fun, args[3]); - return obj->DefineAccessor(name, flag->value() == 0, fun, attributes); -} - - RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) { ASSERT(args.length() == 3); - CONVERT_CHECKED(JSObject, obj, args[0]); - CONVERT_CHECKED(String, name, args[1]); - CONVERT_CHECKED(Smi, flag, args[2]); - return obj->LookupAccessor(name, flag->value() == 0); + CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_SMI_ARG_CHECKED(flag, 2); + return obj->LookupAccessor(name, flag == 0); } @@ -10391,8 +10394,8 @@ static Smi* WrapFrameId(StackFrame::Id id) { } -static StackFrame::Id UnwrapFrameId(Smi* wrapped) { - return static_cast<StackFrame::Id>(wrapped->value() << 2); +static StackFrame::Id UnwrapFrameId(int wrapped) { + return static_cast<StackFrame::Id>(wrapped << 2); } @@ -10495,8 +10498,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) { ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSObject, obj, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); // Make sure to set the current context to the context before the debugger was // entered (if the debugger is entered). The reason for switching context here @@ -10593,8 +10596,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) { ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSObject, obj, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); LookupResult result(isolate); obj->Lookup(*name, &result); @@ -10609,9 +10612,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) { // args[0]: smi with property details. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) { ASSERT(args.length() == 1); - CONVERT_CHECKED(Smi, details, args[0]); - PropertyType type = PropertyDetails(details).type(); - return Smi::FromInt(static_cast<int>(type)); + CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); + return Smi::FromInt(static_cast<int>(details.type())); } @@ -10619,9 +10621,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) { // args[0]: smi with property details. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) { ASSERT(args.length() == 1); - CONVERT_CHECKED(Smi, details, args[0]); - PropertyAttributes attributes = PropertyDetails(details).attributes(); - return Smi::FromInt(static_cast<int>(attributes)); + CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); + return Smi::FromInt(static_cast<int>(details.attributes())); } @@ -10629,9 +10630,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) { // args[0]: smi with property details. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) { ASSERT(args.length() == 1); - CONVERT_CHECKED(Smi, details, args[0]); - int index = PropertyDetails(details).index(); - return Smi::FromInt(index); + CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); + return Smi::FromInt(details.index()); } @@ -10641,9 +10641,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(obj->HasNamedInterceptor()); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(String, name, 1); PropertyAttributes attributes; return obj->GetPropertyWithInterceptor(*obj, *name, &attributes); @@ -10656,7 +10656,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSObject, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(obj->HasIndexedInterceptor()); CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]); @@ -11268,6 +11268,29 @@ static Handle<JSObject> MaterializeBlockScope( } +// Create a plain JSObject which materializes the module scope for the specified +// module context. +static Handle<JSObject> MaterializeModuleScope( + Isolate* isolate, + Handle<Context> context) { + ASSERT(context->IsModuleContext()); + Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension())); + + // Allocate and initialize a JSObject with all the members of the debugged + // module. + Handle<JSObject> module_scope = + isolate->factory()->NewJSObject(isolate->object_function()); + + // Fill all context locals. + if (!CopyContextLocalsToScopeObject( + isolate, scope_info, context, module_scope)) { + return Handle<JSObject>(); + } + + return module_scope; +} + + // Iterate over the actual scopes visible from a stack frame. The iteration // proceeds from the innermost visible nested scope outwards. All scopes are // backed by an actual context except the local scope, which is inserted @@ -11280,7 +11303,8 @@ class ScopeIterator { ScopeTypeWith, ScopeTypeClosure, ScopeTypeCatch, - ScopeTypeBlock + ScopeTypeBlock, + ScopeTypeModule }; ScopeIterator(Isolate* isolate, @@ -11403,6 +11427,9 @@ class ScopeIterator { ASSERT(context_->IsFunctionContext() || !scope_info->HasContext()); return ScopeTypeLocal; + case MODULE_SCOPE: + ASSERT(context_->IsModuleContext()); + return ScopeTypeModule; case GLOBAL_SCOPE: ASSERT(context_->IsGlobalContext()); return ScopeTypeGlobal; @@ -11433,6 +11460,9 @@ class ScopeIterator { if (context_->IsBlockContext()) { return ScopeTypeBlock; } + if (context_->IsModuleContext()) { + return ScopeTypeModule; + } ASSERT(context_->IsWithContext()); return ScopeTypeWith; } @@ -11456,6 +11486,8 @@ class ScopeIterator { return MaterializeClosure(isolate_, CurrentContext()); case ScopeIterator::ScopeTypeBlock: return MaterializeBlockScope(isolate_, CurrentContext()); + case ScopeIterator::ScopeTypeModule: + return MaterializeModuleScope(isolate_, CurrentContext()); } UNREACHABLE(); return Handle<JSObject>(); @@ -11560,7 +11592,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) { RUNTIME_ARGUMENTS(isolate, args)); if (!maybe_check->ToObject(&check)) return maybe_check; } - CONVERT_CHECKED(Smi, wrapped_id, args[1]); + CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); // Get the frame where the debugging is performed. StackFrame::Id id = UnwrapFrameId(wrapped_id); @@ -11602,7 +11634,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) { RUNTIME_ARGUMENTS(isolate, args)); if (!maybe_check->ToObject(&check)) return maybe_check; } - CONVERT_CHECKED(Smi, wrapped_id, args[1]); + CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]); @@ -11742,7 +11774,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_BOOLEAN_CHECKED(disable_break, args[0]); + CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0); isolate->debug()->set_disable_break(disable_break); return isolate->heap()->undefined_value(); } @@ -11752,7 +11784,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSFunction, fun, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); Handle<SharedFunctionInfo> shared(fun->shared()); // Find the number of break points Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared); @@ -11770,7 +11802,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSFunction, fun, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); Handle<SharedFunctionInfo> shared(fun->shared()); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); RUNTIME_ASSERT(source_position >= 0); @@ -11876,7 +11908,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) { HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSValue, wrapper, 0); + CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); RUNTIME_ASSERT(source_position >= 0); Handle<Object> break_point_object_arg = args.at<Object>(2); @@ -11926,7 +11958,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) { HandleScope scope(isolate); ASSERT(args.length() == 2); RUNTIME_ASSERT(args[0]->IsNumber()); - CONVERT_BOOLEAN_CHECKED(enable, args[1]); + CONVERT_BOOLEAN_ARG_CHECKED(enable, 1); // If the number doesn't match an enum value, the ChangeBreakOnException // function will default to affecting caught exceptions. @@ -12141,10 +12173,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { return maybe_check_result; } } - CONVERT_CHECKED(Smi, wrapped_id, args[1]); + CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); - CONVERT_ARG_CHECKED(String, source, 3); - CONVERT_BOOLEAN_CHECKED(disable_break, args[4]); + CONVERT_ARG_HANDLE_CHECKED(String, source, 3); + CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4); Handle<Object> additional_context(args[5]); // Handle the processing of break. @@ -12280,8 +12312,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) { return maybe_check_result; } } - CONVERT_ARG_CHECKED(String, source, 1); - CONVERT_BOOLEAN_CHECKED(disable_break, args[2]); + CONVERT_ARG_HANDLE_CHECKED(String, source, 1); + CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2); Handle<Object> additional_context(args[3]); // Handle the processing of break. @@ -12454,7 +12486,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { // Object* locals that are not protected by handles. // Check parameters. - CONVERT_CHECKED(JSObject, target, args[0]); + CONVERT_ARG_CHECKED(JSObject, target, 0); Object* instance_filter = args[1]; RUNTIME_ASSERT(instance_filter->IsUndefined() || instance_filter->IsJSObject()); @@ -12542,7 +12574,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { "%DebugConstructedBy"); // Check parameters. - CONVERT_CHECKED(JSFunction, constructor, args[0]); + CONVERT_ARG_CHECKED(JSFunction, constructor, 0); CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]); RUNTIME_ASSERT(max_references >= 0); @@ -12586,7 +12618,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) { ASSERT(args.length() == 1); - CONVERT_CHECKED(JSObject, obj, args[0]); + CONVERT_ARG_CHECKED(JSObject, obj, 0); // Use the __proto__ accessor. return Accessors::ObjectPrototype.getter(obj, NULL); @@ -12605,7 +12637,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) { HandleScope scope(isolate); ASSERT(args.length() == 1); // Get the function and make sure it is compiled. - CONVERT_ARG_CHECKED(JSFunction, func, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); Handle<SharedFunctionInfo> shared(func->shared()); if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) { return Failure::Exception(); @@ -12621,7 +12653,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) { HandleScope scope(isolate); ASSERT(args.length() == 1); // Get the function and make sure it is compiled. - CONVERT_ARG_CHECKED(JSFunction, func, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); Handle<SharedFunctionInfo> shared(func->shared()); if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) { return Failure::Exception(); @@ -12636,7 +12668,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) { NoHandleAllocation ha; ASSERT(args.length() == 1); - CONVERT_CHECKED(JSFunction, f, args[0]); + CONVERT_ARG_CHECKED(JSFunction, f, 0); return f->shared()->inferred_name(); } @@ -12673,7 +12705,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFindSharedFunctionInfosForScript) { ASSERT(args.length() == 1); HandleScope scope(isolate); - CONVERT_CHECKED(JSValue, script_value, args[0]); + CONVERT_ARG_CHECKED(JSValue, script_value, 0); Handle<Script> script = Handle<Script>(Script::cast(script_value->value())); @@ -12719,8 +12751,8 @@ RUNTIME_FUNCTION(MaybeObject*, RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_CHECKED(JSValue, script, args[0]); - CONVERT_ARG_CHECKED(String, source, 1); + CONVERT_ARG_CHECKED(JSValue, script, 0); + CONVERT_ARG_HANDLE_CHECKED(String, source, 1); Handle<Script> script_handle = Handle<Script>(Script::cast(script->value())); JSArray* result = LiveEdit::GatherCompileInfo(script_handle, source); @@ -12738,13 +12770,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) { ASSERT(args.length() == 3); HandleScope scope(isolate); - CONVERT_CHECKED(JSValue, original_script_value, args[0]); - CONVERT_ARG_CHECKED(String, new_source, 1); + CONVERT_ARG_CHECKED(JSValue, original_script_value, 0); + CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1); Handle<Object> old_script_name(args[2], isolate); - CONVERT_CHECKED(Script, original_script_pointer, - original_script_value->value()); - Handle<Script> original_script(original_script_pointer); + RUNTIME_ASSERT(original_script_value->value()->IsScript()); + Handle<Script> original_script(Script::cast(original_script_value->value())); Object* old_script = LiveEdit::ChangeScriptSource(original_script, new_source, @@ -12762,7 +12793,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) { ASSERT(args.length() == 1); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSArray, shared_info, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0); return LiveEdit::FunctionSourceUpdated(shared_info); } @@ -12771,8 +12802,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0); - CONVERT_ARG_CHECKED(JSArray, shared_info, 1); + CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1); return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info); } @@ -12787,7 +12818,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) { if (function_object->IsJSValue()) { Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object); if (script_object->IsJSValue()) { - CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value()); + RUNTIME_ASSERT(JSValue::cast(*script_object)->value()->IsScript()); + Script* script = Script::cast(JSValue::cast(*script_object)->value()); script_object = Handle<Object>(script, isolate); } @@ -12807,9 +12839,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) { ASSERT(args.length() == 3); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0); - CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1); - CONVERT_ARG_CHECKED(JSValue, subst_wrapper, 2); + CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0); + CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1); + CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2); LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper, subst_wrapper); @@ -12826,8 +12858,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSArray, shared_array, 0); - CONVERT_ARG_CHECKED(JSArray, position_change_array, 1); + CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1); return LiveEdit::PatchFunctionPositions(shared_array, position_change_array); } @@ -12840,8 +12872,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSArray, shared_array, 0); - CONVERT_BOOLEAN_CHECKED(do_drop, args[1]); + CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0); + CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1); return *LiveEdit::CheckAndDropActivations(shared_array, do_drop); } @@ -12852,8 +12884,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(String, s1, 0); - CONVERT_ARG_CHECKED(String, s2, 1); + CONVERT_ARG_HANDLE_CHECKED(String, s1, 0); + CONVERT_ARG_HANDLE_CHECKED(String, s2, 1); return *LiveEdit::CompareStrings(s1, s2); } @@ -12864,7 +12896,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); Handle<Code> code(function->code(), isolate); @@ -12901,8 +12933,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) { ASSERT(args.length() == 2); HandleScope scope(isolate); - CONVERT_ARG_CHECKED(JSFunction, function, 0); - CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1); Handle<Object> result; bool pending_exception; @@ -12926,7 +12958,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) { // Sets a v8 flag. RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) { - CONVERT_CHECKED(String, arg, args[0]); + CONVERT_ARG_CHECKED(String, arg, 0); SmartArrayPointer<char> flags = arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); FlagList::SetFlagsFromString(*flags, StrLength(*flags)); @@ -12996,7 +13028,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) { CONVERT_SMI_ARG_CHECKED(id2, 1); CONVERT_SMI_ARG_CHECKED(start, 2); CONVERT_SMI_ARG_CHECKED(count, 3); - CONVERT_ARG_CHECKED(JSObject, filter_obj, 4); + CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 4); EnterDebugger enter_debugger; return LiveObjectList::Dump(id1, id2, start, count, filter_obj); #else @@ -13023,7 +13055,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) { #ifdef LIVE_OBJECT_LIST HandleScope scope; - CONVERT_ARG_CHECKED(String, address, 0); + CONVERT_ARG_HANDLE_CHECKED(String, address, 0); Object* result = LiveObjectList::GetObjId(address); return result; #else @@ -13041,7 +13073,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) { RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean()); RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi()); RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi()); - CONVERT_ARG_CHECKED(JSObject, filter_obj, 5); + CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 5); Handle<JSObject> instance_filter; if (args[1]->IsJSObject()) { @@ -13142,7 +13174,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) { HandleScope scope; CONVERT_SMI_ARG_CHECKED(id1, 0); CONVERT_SMI_ARG_CHECKED(id2, 1); - CONVERT_ARG_CHECKED(JSObject, filter_obj, 2); + CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 2); EnterDebugger enter_debugger; return LiveObjectList::Summarize(id1, id2, filter_obj); @@ -13210,7 +13242,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) { ASSERT(args.length() == 1); - CONVERT_CHECKED(String, script_name, args[0]); + CONVERT_ARG_CHECKED(String, script_name, 0); // Find the requested script. Handle<Object> result = @@ -13265,7 +13297,7 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, // native code offset. RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) { ASSERT_EQ(args.length(), 3); - CONVERT_ARG_CHECKED(JSObject, error_object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0); Handle<Object> caller = args.at<Object>(1); CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]); @@ -13350,7 +13382,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) { // This is only called from codegen, so checks might be more lax. - CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]); + CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0); Object* key = args[1]; int finger_index = cache->finger_index(); @@ -13446,8 +13478,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) { HandleScope scope(isolate); - CONVERT_ARG_CHECKED(String, type, 0); - CONVERT_ARG_CHECKED(JSArray, arguments, 1); + CONVERT_ARG_HANDLE_CHECKED(String, type, 0); + CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1); return *isolate->factory()->NewJSMessageObject( type, arguments, @@ -13460,25 +13492,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) { RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) { - CONVERT_CHECKED(JSMessageObject, message, args[0]); + CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return message->type(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) { - CONVERT_CHECKED(JSMessageObject, message, args[0]); + CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return message->arguments(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) { - CONVERT_CHECKED(JSMessageObject, message, args[0]); + CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return Smi::FromInt(message->start_position()); } RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) { - CONVERT_CHECKED(JSMessageObject, message, args[0]); + CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return message->script(); } @@ -13532,8 +13564,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) { ASSERT(args.length() == 2); - CONVERT_CHECKED(String, format, args[0]); - CONVERT_CHECKED(JSArray, elms, args[1]); + CONVERT_ARG_CHECKED(String, format, 0); + CONVERT_ARG_CHECKED(JSArray, elms, 1); String::FlatContent format_content = format->GetFlatContent(); RUNTIME_ASSERT(format_content.IsAscii()); Vector<const char> chars = format_content.ToAsciiVector(); @@ -13550,7 +13582,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) { #define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \ RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \ - CONVERT_CHECKED(JSObject, obj, args[0]); \ + CONVERT_ARG_CHECKED(JSObject, obj, 0); \ return isolate->heap()->ToBoolean(obj->Has##Name()); \ } @@ -13574,8 +13606,8 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements) RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) { ASSERT(args.length() == 2); - CONVERT_CHECKED(JSObject, obj1, args[0]); - CONVERT_CHECKED(JSObject, obj2, args[1]); + CONVERT_ARG_CHECKED(JSObject, obj1, 0); + CONVERT_ARG_CHECKED(JSObject, obj2, 1); return isolate->heap()->ToBoolean(obj1->map() == obj2->map()); } diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index fd818de6df..bd6568f45e 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -91,7 +91,7 @@ namespace internal { F(NotifyOSR, 0, 1) \ F(DeoptimizeFunction, 1, 1) \ F(RunningInSimulator, 0, 1) \ - F(OptimizeFunctionOnNextCall, 1, 1) \ + F(OptimizeFunctionOnNextCall, -1, 1) \ F(GetOptimizationStatus, 1, 1) \ F(GetOptimizationCount, 1, 1) \ F(CompileForOnStackReplacement, 1, 1) \ @@ -274,7 +274,6 @@ namespace internal { F(SwapElements, 3, 1) \ \ /* Getters and Setters */ \ - F(DefineAccessor, -1 /* 4 or 5 */, 1) \ F(LookupAccessor, 3, 1) \ \ /* Literals */ \ @@ -319,6 +318,7 @@ namespace internal { F(ReThrow, 1, 1) \ F(ThrowReferenceError, 1, 1) \ F(StackGuard, 0, 1) \ + F(Interrupt, 0, 1) \ F(PromoteScheduledException, 0, 1) \ \ /* Contexts */ \ diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js index d0cdb3ef60..53d9a39731 100644 --- a/deps/v8/src/runtime.js +++ b/deps/v8/src/runtime.js @@ -39,16 +39,16 @@ ----------------------------------- */ -// The following const declarations are shared with other native JS files. -// They are all declared at this one spot to avoid const redeclaration errors. -const $Object = global.Object; -const $Array = global.Array; -const $String = global.String; -const $Number = global.Number; -const $Function = global.Function; -const $Boolean = global.Boolean; -const $NaN = 0/0; -const builtins = this; +// The following declarations are shared with other native JS files. +// They are all declared at this one spot to avoid redeclaration errors. +var $Object = global.Object; +var $Array = global.Array; +var $String = global.String; +var $Number = global.Number; +var $Function = global.Function; +var $Boolean = global.Boolean; +var $NaN = 0/0; +var builtins = this; // ECMA-262 Section 11.9.3. function EQUALS(y) { diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 35d804dff6..9835108751 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -704,6 +704,7 @@ static const char* Header(ScopeType type) { switch (type) { case EVAL_SCOPE: return "eval"; case FUNCTION_SCOPE: return "function"; + case MODULE_SCOPE: return "module"; case GLOBAL_SCOPE: return "global"; case CATCH_SCOPE: return "catch"; case BLOCK_SCOPE: return "block"; diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h index 06202c493b..5b645f29c2 100644 --- a/deps/v8/src/scopes.h +++ b/deps/v8/src/scopes.h @@ -263,6 +263,7 @@ class Scope: public ZoneObject { // Specific scope types. bool is_eval_scope() const { return type_ == EVAL_SCOPE; } bool is_function_scope() const { return type_ == FUNCTION_SCOPE; } + bool is_module_scope() const { return type_ == MODULE_SCOPE; } bool is_global_scope() const { return type_ == GLOBAL_SCOPE; } bool is_catch_scope() const { return type_ == CATCH_SCOPE; } bool is_block_scope() const { return type_ == BLOCK_SCOPE; } diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index 2d6896120e..02f5c98cd3 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -28,8 +28,8 @@ // This file relies on the fact that the following declaration has been made // in runtime.js: -// const $String = global.String; -// const $NaN = 0/0; +// var $String = global.String; +// var $NaN = 0/0; // Set the String function and constructor. @@ -588,11 +588,8 @@ function StringSplit(separator, limit) { limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit); // ECMA-262 says that if separator is undefined, the result should - // be an array of size 1 containing the entire string. SpiderMonkey - // and KJS have this behavior only when no separator is given. If - // undefined is explicitly given, they convert it to a string and - // use that. We do as SpiderMonkey and KJS. - if (%_ArgumentsLength() === 0) { + // be an array of size 1 containing the entire string. + if (IS_UNDEFINED(separator)) { return [subject]; } diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index c7f4f94386..4bbfe17687 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -397,12 +397,16 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement( Handle<JSObject> receiver, KeyedIC::StubKind stub_kind, StrictModeFlag strict_mode) { + KeyedAccessGrowMode grow_mode = + KeyedIC::GetGrowModeFromStubKind(stub_kind); + Code::ExtraICState extra_state = + Code::ComputeExtraICState(grow_mode, strict_mode); Code::Flags flags = Code::ComputeMonomorphicFlags( stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC : Code::KEYED_STORE_IC, NORMAL, - strict_mode); + extra_state); Handle<String> name; switch (stub_kind) { case KeyedIC::LOAD: @@ -411,6 +415,9 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement( case KeyedIC::STORE_NO_TRANSITION: name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol(); break; + case KeyedIC::STORE_AND_GROW_NO_TRANSITION: + name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol(); + break; default: UNREACHABLE(); break; @@ -426,8 +433,15 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement( code = compiler.CompileLoadElement(receiver_map); break; } + case KeyedIC::STORE_AND_GROW_NO_TRANSITION: { + KeyedStoreStubCompiler compiler(isolate_, strict_mode, + ALLOW_JSARRAY_GROWTH); + code = compiler.CompileStoreElement(receiver_map); + break; + } case KeyedIC::STORE_NO_TRANSITION: { - KeyedStoreStubCompiler compiler(isolate_, strict_mode); + KeyedStoreStubCompiler compiler(isolate_, strict_mode, + DO_NOT_ALLOW_JSARRAY_GROWTH); code = compiler.CompileStoreElement(receiver_map); break; } @@ -519,7 +533,8 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name, Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); if (probe->IsCode()) return Handle<Code>::cast(probe); - KeyedStoreStubCompiler compiler(isolate(), strict_mode); + KeyedStoreStubCompiler compiler(isolate(), strict_mode, + DO_NOT_ALLOW_JSARRAY_GROWTH); Handle<Code> code = compiler.CompileStoreField(receiver, field_index, transition, name); PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name)); @@ -1349,8 +1364,10 @@ Handle<Code> StoreStubCompiler::GetCode(PropertyType type, Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type, Handle<String> name, InlineCacheState state) { + Code::ExtraICState extra_state = + Code::ComputeExtraICState(grow_mode_, strict_mode_); Code::Flags flags = - Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type); + Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type); Handle<Code> code = GetCodeWithFlags(flags, name); PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name)); GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code)); diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 398d9f415c..fdd8a12da6 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -670,8 +670,12 @@ class StoreStubCompiler: public StubCompiler { class KeyedStoreStubCompiler: public StubCompiler { public: - KeyedStoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode) - : StubCompiler(isolate), strict_mode_(strict_mode) { } + KeyedStoreStubCompiler(Isolate* isolate, + StrictModeFlag strict_mode, + KeyedAccessGrowMode grow_mode) + : StubCompiler(isolate), + strict_mode_(strict_mode), + grow_mode_(grow_mode) { } Handle<Code> CompileStoreField(Handle<JSObject> object, int index, @@ -686,10 +690,12 @@ class KeyedStoreStubCompiler: public StubCompiler { static void GenerateStoreFastElement(MacroAssembler* masm, bool is_js_array, - ElementsKind element_kind); + ElementsKind element_kind, + KeyedAccessGrowMode grow_mode); static void GenerateStoreFastDoubleElement(MacroAssembler* masm, - bool is_js_array); + bool is_js_array, + KeyedAccessGrowMode grow_mode); static void GenerateStoreExternalArray(MacroAssembler* masm, ElementsKind elements_kind); @@ -702,6 +708,7 @@ class KeyedStoreStubCompiler: public StubCompiler { InlineCacheState state = MONOMORPHIC); StrictModeFlag strict_mode_; + KeyedAccessGrowMode grow_mode_; }; diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc index e663998ccb..2fb4e2aad3 100644 --- a/deps/v8/src/type-info.cc +++ b/deps/v8/src/type-info.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -110,7 +110,11 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) { if (map_or_code->IsMap()) return true; if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); + bool allow_growth = + Code::GetKeyedAccessGrowMode(code->extra_ic_state()) == + ALLOW_JSARRAY_GROWTH; return code->is_keyed_store_stub() && + !allow_growth && code->ic_state() == MONOMORPHIC && Code::ExtractTypeFromFlags(code->flags()) == NORMAL && code->FindFirstMap() != NULL && @@ -125,7 +129,11 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) { if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); Builtins* builtins = isolate_->builtins(); + bool allow_growth = + Code::GetKeyedAccessGrowMode(code->extra_ic_state()) == + ALLOW_JSARRAY_GROWTH; return code->is_keyed_store_stub() && + !allow_growth && *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) && *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) && code->ic_state() == MEGAMORPHIC; @@ -565,7 +573,11 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code, void TypeFeedbackOracle::CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos) { DisableAssertNoAllocation allocation_allowed; - int length = infos->length() + code->type_feedback_cells()->CellCount(); + int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo() + ? TypeFeedbackInfo::cast(code->type_feedback_info())-> + type_feedback_cells()->CellCount() + : 0; + int length = infos->length() + cell_count; byte* old_start = code->instruction_start(); dictionary_ = FACTORY->NewUnseededNumberDictionary(length); byte* new_start = code->instruction_start(); @@ -635,7 +647,10 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) { void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) { - Handle<TypeFeedbackCells> cache(code->type_feedback_cells()); + Object* raw_info = code->type_feedback_info(); + if (!raw_info->IsTypeFeedbackInfo()) return; + Handle<TypeFeedbackCells> cache( + TypeFeedbackInfo::cast(raw_info)->type_feedback_cells()); for (int i = 0; i < cache->CellCount(); i++) { unsigned ast_id = cache->AstId(i)->value(); Object* value = cache->Cell(i)->value(); diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js index e76104a707..b195f3da79 100644 --- a/deps/v8/src/uri.js +++ b/deps/v8/src/uri.js @@ -250,7 +250,7 @@ function Decode(uri, reserved) { // ECMA-262 - 15.1.3.1. function URIDecode(uri) { - function reservedPredicate(cc) { + var reservedPredicate = function(cc) { // #$ if (35 <= cc && cc <= 36) return true; // & @@ -267,7 +267,7 @@ function URIDecode(uri) { if (63 <= cc && cc <= 64) return true; return false; - } + }; var string = ToString(uri); return Decode(string, reservedPredicate); } @@ -275,7 +275,7 @@ function URIDecode(uri) { // ECMA-262 - 15.1.3.2. function URIDecodeComponent(component) { - function reservedPredicate(cc) { return false; } + var reservedPredicate = function(cc) { return false; }; var string = ToString(component); return Decode(string, reservedPredicate); } @@ -296,7 +296,7 @@ function isAlphaNumeric(cc) { // ECMA-262 - 15.1.3.3. function URIEncode(uri) { - function unescapePredicate(cc) { + var unescapePredicate = function(cc) { if (isAlphaNumeric(cc)) return true; // ! if (cc == 33) return true; @@ -316,7 +316,7 @@ function URIEncode(uri) { if (cc == 126) return true; return false; - } + }; var string = ToString(uri); return Encode(string, unescapePredicate); @@ -325,7 +325,7 @@ function URIEncode(uri) { // ECMA-262 - 15.1.3.4 function URIEncodeComponent(component) { - function unescapePredicate(cc) { + var unescapePredicate = function(cc) { if (isAlphaNumeric(cc)) return true; // ! if (cc == 33) return true; @@ -339,7 +339,7 @@ function URIEncodeComponent(component) { if (cc == 126) return true; return false; - } + }; var string = ToString(component); return Encode(string, unescapePredicate); diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h index ff3ad8d748..bfc5e23390 100644 --- a/deps/v8/src/v8globals.h +++ b/deps/v8/src/v8globals.h @@ -461,6 +461,7 @@ enum CallKind { enum ScopeType { EVAL_SCOPE, // The top-level scope for an eval source. FUNCTION_SCOPE, // The top-level scope for a function. + MODULE_SCOPE, // The scope introduced by a module literal GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval. CATCH_SCOPE, // The scope introduced by catch. BLOCK_SCOPE, // The scope introduced by a new block. diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 1d54e28e97..381d34139e 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -28,18 +28,18 @@ // This file relies on the fact that the following declarations have been made // // in runtime.js: -// const $Object = global.Object; -// const $Boolean = global.Boolean; -// const $Number = global.Number; -// const $Function = global.Function; -// const $Array = global.Array; -// const $NaN = 0/0; +// var $Object = global.Object; +// var $Boolean = global.Boolean; +// var $Number = global.Number; +// var $Function = global.Function; +// var $Array = global.Array; +// var $NaN = 0/0; // // in math.js: -// const $floor = MathFloor +// var $floor = MathFloor -const $isNaN = GlobalIsNaN; -const $isFinite = GlobalIsFinite; +var $isNaN = GlobalIsNaN; +var $isFinite = GlobalIsFinite; // ---------------------------------------------------------------------------- diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 03b16ce1ff..0f50f1c643 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 #define MINOR_VERSION 9 -#define BUILD_NUMBER 5 +#define BUILD_NUMBER 9 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h index ca1b1d8b8e..87f078dc0c 100644 --- a/deps/v8/src/win32-headers.h +++ b/deps/v8/src/win32-headers.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -64,10 +64,10 @@ #undef _WIN32_WINNT #define _WIN32_WINNT 0x501 #endif // __MINGW32__ -#ifndef __MINGW32__ +#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) #include <dbghelp.h> // For SymLoadModule64 and al. #include <errno.h> // For STRUNCATE -#endif // __MINGW32__ +#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR) #include <limits.h> // For INT_MAX and al. #include <tlhelp32.h> // For Module32First and al. diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index eb8d7d4d99..5397cd5eaa 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -775,7 +775,7 @@ void Assembler::immediate_arithmetic_op_8(byte subcode, Register dst, Immediate src) { EnsureSpace ensure_space(this); - if (dst.code() > 3) { + if (!dst.is_byte_register()) { // Use 64-bit mode byte registers. emit_rex_64(dst); } @@ -1059,7 +1059,7 @@ void Assembler::decl(const Operand& dst) { void Assembler::decb(Register dst) { EnsureSpace ensure_space(this); - if (dst.code() > 3) { + if (!dst.is_byte_register()) { // Register is not one of al, bl, cl, dl. Its encoding needs REX. emit_rex_32(dst); } @@ -1387,7 +1387,7 @@ void Assembler::leave() { void Assembler::movb(Register dst, const Operand& src) { EnsureSpace ensure_space(this); - if (dst.code() > 3) { + if (!dst.is_byte_register()) { // Register is not one of al, bl, cl, dl. Its encoding needs REX. emit_rex_32(dst, src); } else { @@ -1400,7 +1400,7 @@ void Assembler::movb(Register dst, const Operand& src) { void Assembler::movb(Register dst, Immediate imm) { EnsureSpace ensure_space(this); - if (dst.code() > 3) { + if (!dst.is_byte_register()) { emit_rex_32(dst); } emit(0xB0 + dst.low_bits()); @@ -1410,7 +1410,7 @@ void Assembler::movb(Register dst, Immediate imm) { void Assembler::movb(const Operand& dst, Register src) { EnsureSpace ensure_space(this); - if (src.code() > 3) { + if (!src.is_byte_register()) { emit_rex_32(src, dst); } else { emit_optional_rex_32(src, dst); @@ -1640,6 +1640,8 @@ void Assembler::movsxlq(Register dst, const Operand& src) { void Assembler::movzxbq(Register dst, const Operand& src) { EnsureSpace ensure_space(this); + // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore + // there is no need to make this a 64 bit operation. emit_optional_rex_32(dst, src); emit(0x0F); emit(0xB6); @@ -1931,7 +1933,7 @@ void Assembler::setcc(Condition cc, Register reg) { } EnsureSpace ensure_space(this); ASSERT(is_uint4(cc)); - if (reg.code() > 3) { // Use x64 byte registers, where different. + if (!reg.is_byte_register()) { // Use x64 byte registers, where different. emit_rex_32(reg); } emit(0x0F); @@ -1996,7 +1998,7 @@ void Assembler::testb(Register dst, Register src) { emit(0x84); emit_modrm(src, dst); } else { - if (dst.code() > 3 || src.code() > 3) { + if (!dst.is_byte_register() || !src.is_byte_register()) { // Register is not one of al, bl, cl, dl. Its encoding needs REX. emit_rex_32(dst, src); } @@ -2013,7 +2015,7 @@ void Assembler::testb(Register reg, Immediate mask) { emit(0xA8); emit(mask.value_); // Low byte emitted. } else { - if (reg.code() > 3) { + if (!reg.is_byte_register()) { // Register is not one of al, bl, cl, dl. Its encoding needs REX. emit_rex_32(reg); } @@ -2036,7 +2038,7 @@ void Assembler::testb(const Operand& op, Immediate mask) { void Assembler::testb(const Operand& op, Register reg) { EnsureSpace ensure_space(this); - if (reg.code() > 3) { + if (!reg.is_byte_register()) { // Register is not one of al, bl, cl, dl. Its encoding needs REX. emit_rex_32(reg, op); } else { diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 745850d822..7af33e126f 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -30,7 +30,7 @@ // The original source code covered by the above license above has been // modified significantly by Google Inc. -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // A lightweight X64 Assembler. @@ -131,6 +131,8 @@ struct Register { } bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } bool is(Register reg) const { return code_ == reg.code_; } + // rax, rbx, rcx and rdx are byte registers, the rest are not. + bool is_byte_register() const { return code_ <= 3; } int code() const { ASSERT(is_valid()); return code_; diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index d9361fdd02..2ea68b33b0 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -1160,7 +1160,7 @@ static void AllocateJSArray(MacroAssembler* masm, static void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array, - has_non_smi_element; + has_non_smi_element, finish, cant_transition_map, not_double; // Check for array construction with zero arguments. __ testq(rax, rax); @@ -1265,11 +1265,11 @@ static void ArrayNativeCode(MacroAssembler* masm, __ movq(rcx, rax); __ jmp(&entry); __ bind(&loop); - __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0)); + __ movq(r8, Operand(r9, rcx, times_pointer_size, 0)); if (FLAG_smi_only_arrays) { - __ JumpIfNotSmi(kScratchRegister, &has_non_smi_element); + __ JumpIfNotSmi(r8, &has_non_smi_element); } - __ movq(Operand(rdx, 0), kScratchRegister); + __ movq(Operand(rdx, 0), r8); __ addq(rdx, Immediate(kPointerSize)); __ bind(&entry); __ decq(rcx); @@ -1280,6 +1280,7 @@ static void ArrayNativeCode(MacroAssembler* masm, // rbx: JSArray // esp[0]: return address // esp[8]: last argument + __ bind(&finish); __ pop(rcx); __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize)); __ push(rcx); @@ -1287,8 +1288,38 @@ static void ArrayNativeCode(MacroAssembler* masm, __ ret(0); __ bind(&has_non_smi_element); + // Double values are handled by the runtime. + __ CheckMap(r8, + masm->isolate()->factory()->heap_number_map(), + ¬_double, + DONT_DO_SMI_CHECK); + __ bind(&cant_transition_map); __ UndoAllocationInNewSpace(rbx); __ jmp(call_generic_code); + + __ bind(¬_double); + // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // rbx: JSArray + __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset)); + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + FAST_ELEMENTS, + r11, + kScratchRegister, + &cant_transition_map); + + __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11); + __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8, + kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Finish the array initialization loop. + Label loop2; + __ bind(&loop2); + __ movq(r8, Operand(r9, rcx, times_pointer_size, 0)); + __ movq(Operand(rdx, 0), r8); + __ addq(rdx, Immediate(kPointerSize)); + __ decq(rcx); + __ j(greater_equal, &loop2); + __ jmp(&finish); } diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 9feef086e8..61404fa2d1 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -3054,7 +3054,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ Throw(rax); __ bind(&termination_exception); - __ ThrowUncatchable(TERMINATION, rax); + __ ThrowUncatchable(rax); // External string. Short external strings have already been ruled out. // rdi: subject string (expected to be external) @@ -3579,6 +3579,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void InterruptStub::Generate(MacroAssembler* masm) { + __ TailCallRuntime(Runtime::kInterrupt, 0, 1); +} + + static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and @@ -3775,12 +3780,6 @@ void CEntryStub::GenerateAheadOfTime() { } -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // Throw exception in eax. - __ Throw(rax); -} - - void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -3921,12 +3920,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - __ ThrowUncatchable(type, rax); -} - - void CEntryStub::Generate(MacroAssembler* masm) { // rax: number of arguments including receiver // rbx: pointer to C function (C callee-saved) @@ -3990,13 +3983,25 @@ void CEntryStub::Generate(MacroAssembler* masm) { true); __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + // Set external caught exception to false. + Isolate* isolate = masm->isolate(); + ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, + isolate); + __ Set(rax, static_cast<int64_t>(false)); + __ Store(external_caught, rax); + + // Set pending exception and rax to out of memory exception. + ExternalReference pending_exception(Isolate::kPendingExceptionAddress, + isolate); + __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); + __ Store(pending_exception, rax); + // Fall through to the next label. __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); + __ ThrowUncatchable(rax); __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); + __ Throw(rax); } @@ -5966,11 +5971,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreIC::GenerateGeneric. { rbx, rdx, rcx, EMIT_REMEMBERED_SET}, // KeyedStoreStubCompiler::GenerateStoreFastElement. - { rdi, rdx, rcx, EMIT_REMEMBERED_SET}, + { rdi, rbx, rcx, EMIT_REMEMBERED_SET}, + { rdx, rdi, rbx, EMIT_REMEMBERED_SET}, // ElementsTransitionGenerator::GenerateSmiOnlyToObject // and ElementsTransitionGenerator::GenerateSmiOnlyToObject // and ElementsTransitionGenerator::GenerateDoubleToObject { rdx, rbx, rdi, EMIT_REMEMBERED_SET}, + { rdx, rbx, rdi, OMIT_REMEMBERED_SET}, // ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { rdx, r11, r15, EMIT_REMEMBERED_SET}, diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index f7e8fc114f..8947f70055 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -182,12 +182,17 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( // -- rsp[0] : return address // ----------------------------------- // The fail label is not actually used since we do not allocate. - Label allocated, cow_array; + Label allocated, cow_array, only_change_map, done; + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); + __ j(equal, &only_change_map); // Check backing store for COW-ness. If the negative case, we do not have to // allocate a new array, since FixedArray and FixedDoubleArray do not differ // in size. - __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), Heap::kFixedCOWArrayMapRootIndex); @@ -241,6 +246,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11); __ jmp(&allocated); + __ bind(&only_change_map); + // Set transitioned map. + __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); + __ RecordWriteField(rdx, + HeapObject::kMapOffset, + rbx, + rdi, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ jmp(&done); + // Conversion loop. __ bind(&loop); __ movq(rbx, @@ -264,6 +281,8 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( __ bind(&entry); __ decq(r9); __ j(not_sign, &loop); + + __ bind(&done); } @@ -276,7 +295,14 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label loop, entry, convert_hole, gc_required; + Label loop, entry, convert_hole, gc_required, only_change_map; + + // Check for empty arrays, which only require a map transition and no changes + // to the backing store. + __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); + __ j(equal, &only_change_map); + __ push(rax); __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -345,15 +371,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ decq(r9); __ j(not_sign, &loop); - // Set transitioned map. - __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); - __ RecordWriteField(rdx, - HeapObject::kMapOffset, - rbx, - rdi, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); // Replace receiver's backing store with newly created and filled FixedArray. __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11); __ RecordWriteField(rdx, @@ -365,6 +382,17 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( OMIT_SMI_CHECK); __ pop(rax); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + + __ bind(&only_change_map); + // Set transitioned map. + __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); + __ RecordWriteField(rdx, + HeapObject::kMapOffset, + rbx, + rdi, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc index 69e77eee9d..80e22c6297 100644 --- a/deps/v8/src/x64/cpu-x64.cc +++ b/deps/v8/src/x64/cpu-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -27,7 +27,7 @@ // CPU specific code for x64 independent of OS goes here. -#ifdef __GNUC__ +#if defined(__GNUC__) && !defined(__MINGW64__) #include "third_party/valgrind/valgrind.h" #endif diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index 292c7b4698..50572201cc 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -113,10 +113,8 @@ class JumpPatchSite BASE_EMBEDDED { // // The function builds a JS frame. Please see JavaScriptFrameConstants in // frames-x64.h for its layout. -void FullCodeGenerator::Generate(CompilationInfo* info) { - ASSERT(info_ == NULL); - info_ = info; - scope_ = info->scope(); +void FullCodeGenerator::Generate() { + CompilationInfo* info = info_; handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); SetFunctionPosition(function()); @@ -132,7 +130,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // We can optionally optimize based on counters rather than statistical // sampling. if (info->ShouldSelfOptimize()) { - if (FLAG_trace_opt) { + if (FLAG_trace_opt_verbose) { PrintF("[adding self-optimization header to %s]\n", *info->function()->debug_name()->ToCString()); } @@ -317,7 +315,8 @@ void FullCodeGenerator::ClearAccumulator() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { +void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, + Label* back_edge_target) { Comment cmnt(masm_, "[ Stack check"); Label ok; __ CompareRoot(rsp, Heap::kStackLimitRootIndex); @@ -902,6 +901,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ cmpq(rax, null_value); __ j(equal, &exit); + PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); + // Convert the object to a JS object. Label convert, done_convert; __ JumpIfSmi(rax, &convert); @@ -923,47 +924,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // the JSObject::IsSimpleEnum cache validity checks. If we cannot // guarantee cache validity, call the runtime system to check cache // validity or get the property names in a fixed array. - Label next; - Register empty_fixed_array_value = r8; - __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); - Register empty_descriptor_array_value = r9; - __ LoadRoot(empty_descriptor_array_value, - Heap::kEmptyDescriptorArrayRootIndex); - __ movq(rcx, rax); - __ bind(&next); - - // Check that there are no elements. Register rcx contains the - // current JS object we've reached through the prototype chain. - __ cmpq(empty_fixed_array_value, - FieldOperand(rcx, JSObject::kElementsOffset)); - __ j(not_equal, &call_runtime); - - // Check that instance descriptors are not empty so that we can - // check for an enum cache. Leave the map in rbx for the subsequent - // prototype load. - __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset)); - __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset)); - __ JumpIfSmi(rdx, &call_runtime); - - // Check that there is an enum cache in the non-empty instance - // descriptors (rdx). This is the case if the next enumeration - // index field does not contain a smi. - __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset)); - __ JumpIfSmi(rdx, &call_runtime); - - // For all objects but the receiver, check that the cache is empty. - Label check_prototype; - __ cmpq(rcx, rax); - __ j(equal, &check_prototype, Label::kNear); - __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset)); - __ cmpq(rdx, empty_fixed_array_value); - __ j(not_equal, &call_runtime); - - // Load the prototype from the map and loop if non-null. - __ bind(&check_prototype); - __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset)); - __ cmpq(rcx, null_value); - __ j(not_equal, &next); + __ CheckEnumCache(null_value, &call_runtime); // The enum cache is valid. Load the map of the object being // iterated over and use the cache for the iteration. @@ -1015,6 +976,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ Push(Smi::FromInt(0)); // Initial index. // Generate code for doing the condition check. + PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); __ bind(&loop); __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index. __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length. @@ -1060,7 +1022,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ movq(result_register(), rbx); // Perform the assignment as if via '='. { EffectContext context(this); - EmitAssignment(stmt->each(), stmt->AssignmentId()); + EmitAssignment(stmt->each()); } // Generate code for the body of the loop. @@ -1071,7 +1033,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(loop_statement.continue_label()); __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1)); - EmitStackCheck(stmt); + EmitStackCheck(stmt, &loop); __ jmp(&loop); // Remove the pointers stored on the stack. @@ -1079,6 +1041,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ addq(rsp, Immediate(5 * kPointerSize)); // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); __ bind(&exit); decrement_loop_depth(); } @@ -1483,7 +1446,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Smi::FromInt(1) : Smi::FromInt(0)); VisitForStackValue(value); - __ CallRuntime(Runtime::kDefineAccessor, 4); + __ Push(Smi::FromInt(NONE)); + __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); break; } } @@ -1801,7 +1765,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, } -void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { +void FullCodeGenerator::EmitAssignment(Expression* expr) { // Invalid left-hand sides are rewritten to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { @@ -1853,7 +1817,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { break; } } - PrepareForBailoutForId(bailout_ast_id, TOS_REG); context()->Plug(rax); } diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 7c445cb994..2151cf4979 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -1793,11 +1793,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true, // Faster code path to avoid two compares: subtract lower bound from the // actual type and do a signed compare with the width of the type range. __ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ movq(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ subb(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ cmpb(temp2, - Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE))); + __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ j(above, is_false); } @@ -2605,6 +2604,14 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { } +void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + __ push(rsi); // The context is the first argument. + __ PushHeapObject(instr->hydrogen()->pairs()); + __ Push(Smi::FromInt(instr->hydrogen()->flags())); + CallRuntime(Runtime::kDeclareGlobals, 3, instr); +} + + void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register result = ToRegister(instr->result()); __ movq(result, GlobalObjectOperand()); @@ -3232,17 +3239,25 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - if (instr->index()->IsConstantOperand()) { - if (instr->length()->IsRegister()) { - __ cmpq(ToRegister(instr->length()), + if (instr->length()->IsRegister()) { + Register reg = ToRegister(instr->length()); + if (FLAG_debug_code) { + __ AbortIfNotZeroExtended(reg); + } + if (instr->index()->IsConstantOperand()) { + __ cmpq(reg, Immediate(ToInteger32(LConstantOperand::cast(instr->index())))); } else { - __ cmpq(ToOperand(instr->length()), - Immediate(ToInteger32(LConstantOperand::cast(instr->index())))); + Register reg2 = ToRegister(instr->index()); + if (FLAG_debug_code) { + __ AbortIfNotZeroExtended(reg2); + } + __ cmpq(reg, reg2); } } else { - if (instr->length()->IsRegister()) { - __ cmpq(ToRegister(instr->length()), ToRegister(instr->index())); + if (instr->index()->IsConstantOperand()) { + __ cmpq(ToOperand(instr->length()), + Immediate(ToInteger32(LConstantOperand::cast(instr->index())))); } else { __ cmpq(ToOperand(instr->length()), ToRegister(instr->index())); } @@ -3968,26 +3983,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, ASSERT(!source.is(rcx)); ASSERT(!result.is(rcx)); + // Only elements backing stores for non-COW arrays need to be copied. + Handle<FixedArrayBase> elements(object->elements()); + bool has_elements = elements->length() > 0 && + elements->map() != isolate()->heap()->fixed_cow_array_map(); + // Increase the offset so that subsequent objects end up right after - // this one. - int current_offset = *offset; - int size = object->map()->instance_size(); - *offset += size; + // this object and its backing store. + int object_offset = *offset; + int object_size = object->map()->instance_size(); + int elements_offset = *offset + object_size; + int elements_size = has_elements ? elements->Size() : 0; + *offset += object_size + elements_size; // Copy object header. ASSERT(object->properties()->length() == 0); - ASSERT(object->elements()->length() == 0 || - object->elements()->map() == isolate()->heap()->fixed_cow_array_map()); int inobject_properties = object->map()->inobject_properties(); - int header_size = size - inobject_properties * kPointerSize; + int header_size = object_size - inobject_properties * kPointerSize; for (int i = 0; i < header_size; i += kPointerSize) { - __ movq(rcx, FieldOperand(source, i)); - __ movq(FieldOperand(result, current_offset + i), rcx); + if (has_elements && i == JSObject::kElementsOffset) { + __ lea(rcx, Operand(result, elements_offset)); + } else { + __ movq(rcx, FieldOperand(source, i)); + } + __ movq(FieldOperand(result, object_offset + i), rcx); } // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { - int total_offset = current_offset + object->GetInObjectPropertyOffset(i); + int total_offset = object_offset + object->GetInObjectPropertyOffset(i); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); @@ -4003,10 +4027,41 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ movq(FieldOperand(result, total_offset), rcx); } } + + // Copy elements backing store header. + ASSERT(!has_elements || elements->IsFixedArray()); + if (has_elements) { + __ LoadHeapObject(source, elements); + for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) { + __ movq(rcx, FieldOperand(source, i)); + __ movq(FieldOperand(result, elements_offset + i), rcx); + } + } + + // Copy elements backing store content. + ASSERT(!has_elements || elements->IsFixedArray()); + int elements_length = has_elements ? elements->length() : 0; + for (int i = 0; i < elements_length; i++) { + int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); + Handle<Object> value = JSObject::GetElement(object, i); + if (value->IsJSObject()) { + Handle<JSObject> value_object = Handle<JSObject>::cast(value); + __ lea(rcx, Operand(result, *offset)); + __ movq(FieldOperand(result, total_offset), rcx); + __ LoadHeapObject(source, value_object); + EmitDeepCopy(value_object, result, source, offset); + } else if (value->IsHeapObject()) { + __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value)); + __ movq(FieldOperand(result, total_offset), rcx); + } else { + __ movq(rcx, value, RelocInfo::NONE); + __ movq(FieldOperand(result, total_offset), rcx); + } + } } -void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { +void LCodeGen::DoFastLiteral(LFastLiteral* instr) { int size = instr->hydrogen()->total_size(); // Allocate all objects that are part of the literal in one big @@ -4027,13 +4082,13 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { } -void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { +void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { + Handle<FixedArray> literals(instr->environment()->closure()->literals()); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); // Set up the parameters to the stub/runtime call. - __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - __ push(FieldOperand(rax, JSFunction::kLiteralsOffset)); + __ PushHeapObject(literals); __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); __ Push(constant_properties); int flags = instr->hydrogen()->fast_elements() @@ -4044,7 +4099,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { : ObjectLiteral::kNoFlags; __ Push(Smi::FromInt(flags)); - // Pick the right runtime function to call. + // Pick the right runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); @@ -4406,6 +4461,88 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) { osr_pc_offset_ = masm()->pc_offset(); } + +void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(equal, instr->environment()); + + Register null_value = rdi; + __ LoadRoot(null_value, Heap::kNullValueRootIndex); + __ cmpq(rax, null_value); + DeoptimizeIf(equal, instr->environment()); + + Condition cc = masm()->CheckSmi(rax); + DeoptimizeIf(cc, instr->environment()); + + STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); + __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); + DeoptimizeIf(below_equal, instr->environment()); + + Label use_cache, call_runtime; + __ CheckEnumCache(null_value, &call_runtime); + + __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); + __ jmp(&use_cache, Label::kNear); + + // Get the set of properties to enumerate. + __ bind(&call_runtime); + __ push(rax); + CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); + + __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), + Heap::kMetaMapRootIndex); + DeoptimizeIf(not_equal, instr->environment()); + __ bind(&use_cache); +} + + +void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { + Register map = ToRegister(instr->map()); + Register result = ToRegister(instr->result()); + __ LoadInstanceDescriptors(map, result); + __ movq(result, + FieldOperand(result, DescriptorArray::kEnumerationIndexOffset)); + __ movq(result, + FieldOperand(result, FixedArray::SizeFor(instr->idx()))); + Condition cc = masm()->CheckSmi(result); + DeoptimizeIf(NegateCondition(cc), instr->environment()); +} + + +void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { + Register object = ToRegister(instr->value()); + __ cmpq(ToRegister(instr->map()), + FieldOperand(object, HeapObject::kMapOffset)); + DeoptimizeIf(not_equal, instr->environment()); +} + + +void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { + Register object = ToRegister(instr->object()); + Register index = ToRegister(instr->index()); + + Label out_of_object, done; + __ SmiToInteger32(index, index); + __ cmpl(index, Immediate(0)); + __ j(less, &out_of_object); + __ movq(object, FieldOperand(object, + index, + times_pointer_size, + JSObject::kHeaderSize)); + __ jmp(&done, Label::kNear); + + __ bind(&out_of_object); + __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset)); + __ negl(index); + // Index is now equal to out of object property index plus 1. + __ movq(object, FieldOperand(object, + index, + times_pointer_size, + FixedArray::kHeaderSize - kPointerSize)); + __ bind(&done); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc index bf5d31d72e..877ea8cd3b 100644 --- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc +++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc @@ -204,8 +204,9 @@ void LGapResolver::EmitMove(int index) { ASSERT(destination->IsStackSlot()); Operand dst = cgen_->ToOperand(destination); if (cgen_->IsInteger32Constant(constant_source)) { - // Allow top 32 bits of an untagged Integer32 to be arbitrary. - __ movl(dst, Immediate(cgen_->ToInteger32(constant_source))); + // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged + // value. + __ movq(dst, Immediate(cgen_->ToInteger32(constant_source))); } else { __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source)); __ movq(dst, kScratchRegister); diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 901e4b7dac..d373e191fe 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -1121,6 +1121,11 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { } +LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { + return MarkAsCall(new LDeclareGlobals, instr); +} + + LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { return DefineAsRegister(new LGlobalObject); } @@ -2089,19 +2094,18 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { } -LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { - return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr); +LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { + return MarkAsCall(DefineFixed(new LFastLiteral, rax), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteralFast, rax), instr); +LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { + return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr); } -LInstruction* LChunkBuilder::DoObjectLiteralGeneric( - HObjectLiteralGeneric* instr) { - return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, rax), instr); +LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) { + return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr); } @@ -2265,6 +2269,34 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) { } +LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* object = UseFixed(instr->enumerable(), rax); + LForInPrepareMap* result = new LForInPrepareMap(object); + return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY); +} + + +LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { + LOperand* map = UseRegister(instr->map()); + return AssignEnvironment(DefineAsRegister( + new LForInCacheArray(map))); +} + + +LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + LOperand* map = UseRegisterAtStart(instr->map()); + return AssignEnvironment(new LCheckMapValue(value, map)); +} + + +LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* index = UseTempRegister(instr->index()); + return DefineSameAsFirst(new LLoadFieldByIndex(object, index)); +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index 8fb259d969..b91cbc457a 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -87,11 +87,13 @@ class LCodeGen; V(ConstantI) \ V(ConstantT) \ V(Context) \ + V(DeclareGlobals) \ V(DeleteProperty) \ V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ V(ElementsKind) \ + V(FastLiteral) \ V(FixedArrayBaseLength) \ V(FunctionLiteral) \ V(GetCachedArrayIndex) \ @@ -134,8 +136,7 @@ class LCodeGen; V(NumberTagD) \ V(NumberTagI) \ V(NumberUntagD) \ - V(ObjectLiteralFast) \ - V(ObjectLiteralGeneric) \ + V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ V(Parameter) \ @@ -171,7 +172,11 @@ class LCodeGen; V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ V(UnknownOSRValue) \ - V(ValueOf) + V(ValueOf) \ + V(ForInPrepareMap) \ + V(ForInCacheArray) \ + V(CheckMapValue) \ + V(LoadFieldByIndex) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -1345,6 +1350,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> { }; +class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") + DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) +}; + + class LGlobalObject: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") @@ -1898,24 +1910,24 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> { }; -class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { +class LFastLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") - DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) + DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") + DECLARE_HYDROGEN_ACCESSOR(FastLiteral) }; -class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> { +class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) + DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") + DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) }; -class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> { +class LObjectLiteral: public LTemplateInstruction<1, 0, 0> { public: - DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") - DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) + DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal") + DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral) }; @@ -2032,6 +2044,62 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> { }; +class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInPrepareMap(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") +}; + + +class LForInCacheArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LForInCacheArray(LOperand* map) { + inputs_[0] = map; + } + + LOperand* map() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") + + int idx() { + return HForInCacheArray::cast(this->hydrogen_value())->idx(); + } +}; + + +class LCheckMapValue: public LTemplateInstruction<0, 2, 0> { + public: + LCheckMapValue(LOperand* value, LOperand* map) { + inputs_[0] = value; + inputs_[1] = map; + } + + LOperand* value() { return inputs_[0]; } + LOperand* map() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value") +}; + + +class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> { + public: + LLoadFieldByIndex(LOperand* object, LOperand* index) { + inputs_[0] = object; + inputs_[1] = index; + } + + LOperand* object() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index") +}; + + class LChunkBuilder; class LChunk: public ZoneObject { public: diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 9b5b35511c..2118886e95 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -2552,8 +2552,7 @@ void MacroAssembler::Throw(Register value) { } -void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, - Register value) { +void MacroAssembler::ThrowUncatchable(Register value) { // Adjust this code if not the case. STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); @@ -2563,22 +2562,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); // The exception is expected in rax. - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, - isolate()); - Set(rax, static_cast<int64_t>(false)); - Store(external_caught, rax); - - // Set pending exception and rax to out of memory exception. - ExternalReference pending_exception(Isolate::kPendingExceptionAddress, - isolate()); - movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); - Store(pending_exception, rax); - } else if (!value.is(rax)) { + if (!value.is(rax)) { movq(rax, value); } - // Drop the stack pointer to the top of the top stack handler. ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); Load(rsp, handler_address); @@ -2866,6 +2852,14 @@ void MacroAssembler::AbortIfNotSmi(const Operand& object) { } +void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) { + ASSERT(!int32_register.is(kScratchRegister)); + movq(kScratchRegister, 0x100000000l, RelocInfo::NONE); + cmpq(kScratchRegister, int32_register); + Assert(above_equal, "32 bit value in register is not zero-extended"); +} + + void MacroAssembler::AbortIfNotString(Register object) { testb(object, Immediate(kSmiTagMask)); Assert(not_equal, "Operand is not a string"); @@ -4378,6 +4372,52 @@ void MacroAssembler::EnsureNotWhite( bind(&done); } + +void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { + Label next; + Register empty_fixed_array_value = r8; + LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); + Register empty_descriptor_array_value = r9; + LoadRoot(empty_descriptor_array_value, + Heap::kEmptyDescriptorArrayRootIndex); + movq(rcx, rax); + bind(&next); + + // Check that there are no elements. Register rcx contains the + // current JS object we've reached through the prototype chain. + cmpq(empty_fixed_array_value, + FieldOperand(rcx, JSObject::kElementsOffset)); + j(not_equal, call_runtime); + + // Check that instance descriptors are not empty so that we can + // check for an enum cache. Leave the map in rbx for the subsequent + // prototype load. + movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset)); + movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset)); + JumpIfSmi(rdx, call_runtime); + + // Check that there is an enum cache in the non-empty instance + // descriptors (rdx). This is the case if the next enumeration + // index field does not contain a smi. + movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset)); + JumpIfSmi(rdx, call_runtime); + + // For all objects but the receiver, check that the cache is empty. + Label check_prototype; + cmpq(rcx, rax); + j(equal, &check_prototype, Label::kNear); + movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset)); + cmpq(rdx, empty_fixed_array_value); + j(not_equal, call_runtime); + + // Load the prototype from the map and loop if non-null. + bind(&check_prototype); + movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset)); + cmpq(rcx, null_value); + j(not_equal, &next); +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 52b58153ae..86eb312831 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -949,6 +949,10 @@ class MacroAssembler: public Assembler { void AbortIfNotSmi(Register object); void AbortIfNotSmi(const Operand& object); + // Abort execution if a 64 bit register containing a 32 bit payload does not + // have zeros in the top 32 bits. + void AbortIfNotZeroExtended(Register reg); + // Abort execution if argument is a string. Used in debug code. void AbortIfNotString(Register object); @@ -971,7 +975,7 @@ class MacroAssembler: public Assembler { void Throw(Register value); // Propagate an uncatchable exception out of the current JS stack. - void ThrowUncatchable(UncatchableExceptionType type, Register value); + void ThrowUncatchable(Register value); // --------------------------------------------------------------------------- // Inline caching support @@ -1295,6 +1299,11 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); + // Expects object in rax and returns map with validated enum cache + // in rax. Assumes that any other register can be used as a scratch. + void CheckEnumCache(Register null_value, + Label* call_runtime); + private: // Order general registers are pushed by Pushad. // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 6e609934c0..0e7f20676f 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -1384,19 +1384,19 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ CheckFastSmiOnlyElements(rbx, &call_builtin); // rdx: receiver // rbx: map + __ movq(r9, rdi); // Backup rdi as it is going to be trashed. __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, rbx, - r10, + rdi, &call_builtin); ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + __ movq(rdi, r9); __ bind(&fast_object); } else { __ CheckFastObjectElements(rbx, &call_builtin); } - __ CheckFastObjectElements(rbx, &call_builtin); - // Save new length. __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); @@ -2441,7 +2441,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( ElementsKind elements_kind = receiver_map->elements_kind(); bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; Handle<Code> stub = - KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); + KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode(); __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK); @@ -3499,14 +3499,16 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, - ElementsKind elements_kind) { + ElementsKind elements_kind, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, finish_store, grow; + Label check_capacity, slow; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3514,23 +3516,31 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi. __ JumpIfNotSmi(rcx, &miss_force_generic); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(rax, &transition_elements_kind); + } + // Get the elements array and make sure it is a fast element array, not 'cow'. __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset)); - __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset), - Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &miss_force_generic); - // Check that the key is within bounds. if (is_js_array) { __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset)); - __ j(above_equal, &miss_force_generic); + if (grow_mode == ALLOW_JSARRAY_GROWTH) { + __ j(above_equal, &grow); + } else { + __ j(above_equal, &miss_force_generic); + } } else { __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset)); __ j(above_equal, &miss_force_generic); } + __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset), + Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &miss_force_generic); + + __ bind(&finish_store); if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(rax, &transition_elements_kind); __ SmiToInteger32(rcx, rcx); __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), rax); @@ -3542,8 +3552,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize)); __ movq(Operand(rcx, 0), rax); // Make sure to preserve the value in register rax. - __ movq(rdx, rax); - __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs); + __ movq(rbx, rax); + __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs); } // Done. @@ -3558,19 +3568,89 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ bind(&transition_elements_kind); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ jmp(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Grow the array by a single element if possible. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. Flags are already set by previous + // compare. + __ j(not_equal, &miss_force_generic); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex); + __ j(not_equal, &check_capacity); + + int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT); + + // rax: value + // rcx: key + // rdx: receiver + // rdi: elements + // Make sure that the backing store can hold additional elements. + __ Move(FieldOperand(rdi, JSObject::kMapOffset), + masm->isolate()->factory()->fixed_array_map()); + __ Move(FieldOperand(rdi, FixedArray::kLengthOffset), + Smi::FromInt(JSArray::kPreallocatedArrayElements)); + __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) { + __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx); + } + + // Store the element at index zero. + __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax); + + // Install the new backing store in the JSArray. + __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi); + __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1)); + __ ret(0); + + __ bind(&check_capacity); + // Check for cow elements, in general they are not handled by this stub. + __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset), + Heap::kFixedCOWArrayMapRootIndex); + __ j(equal, &miss_force_generic); + + // rax: value + // rcx: key + // rdx: receiver + // rdi: elements + // Make sure that the backing store can hold additional elements. + __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset)); + __ j(above_equal, &slow); + + // Grow the array and finish the store. + __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset), + Smi::FromInt(1)); + __ jmp(&finish_store); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ jmp(ic_slow, RelocInfo::CODE_TARGET); + } } void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, - bool is_js_array) { + bool is_js_array, + KeyedAccessGrowMode grow_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, transition_elements_kind, finish_store; + Label grow, slow, check_capacity; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3584,13 +3664,19 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset)); + __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset)); + if (grow_mode == ALLOW_JSARRAY_GROWTH) { + __ j(above_equal, &grow); + } else { + __ j(above_equal, &miss_force_generic); + } } else { __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset)); + __ j(above_equal, &miss_force_generic); } - __ j(above_equal, &miss_force_generic); // Handle smi values specially + __ bind(&finish_store); __ SmiToInteger32(rcx, rcx); __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, &transition_elements_kind); @@ -3607,6 +3693,71 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ Integer32ToSmi(rcx, rcx); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); __ jmp(ic_miss, RelocInfo::CODE_TARGET); + + if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + // Grow the array by a single element if possible. + __ bind(&grow); + + // Make sure the array is only growing by a single element, anything else + // must be handled by the runtime. Flags are already set by previous + // compare. + __ j(not_equal, &miss_force_generic); + + // Transition on values that can't be stored in a FixedDoubleArray. + Label value_is_smi; + __ JumpIfSmi(rax, &value_is_smi); + __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + __ j(not_equal, &transition_elements_kind); + __ bind(&value_is_smi); + + // Check for the empty array, and preallocate a small backing store if + // possible. + __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex); + __ j(not_equal, &check_capacity); + + int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT); + + // rax: value + // rcx: key + // rdx: receiver + // rdi: elements + // Initialize the new FixedDoubleArray. Leave elements unitialized for + // efficiency, they are guaranteed to be initialized before use. + __ Move(FieldOperand(rdi, JSObject::kMapOffset), + masm->isolate()->factory()->fixed_double_array_map()); + __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset), + Smi::FromInt(JSArray::kPreallocatedArrayElements)); + + // Install the new backing store in the JSArray. + __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi); + __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx, + kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + + // Increment the length of the array. + __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1)); + __ jmp(&finish_store); + + __ bind(&check_capacity); + // rax: value + // rcx: key + // rdx: receiver + // rdi: elements + // Make sure that the backing store can hold additional elements. + __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset)); + __ j(above_equal, &slow); + + // Grow the array and finish the store. + __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset), + Smi::FromInt(1)); + __ jmp(&finish_store); + + __ bind(&slow); + Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); + __ jmp(ic_slow, RelocInfo::CODE_TARGET); + } } diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h index 9a2618c5a8..ee96ec052e 100644 --- a/deps/v8/src/zone-inl.h +++ b/deps/v8/src/zone-inl.h @@ -39,19 +39,7 @@ namespace v8 { namespace internal { -AssertNoZoneAllocation::AssertNoZoneAllocation() - : prev_(Isolate::Current()->zone_allow_allocation()) { - Isolate::Current()->set_zone_allow_allocation(false); -} - - -AssertNoZoneAllocation::~AssertNoZoneAllocation() { - Isolate::Current()->set_zone_allow_allocation(prev_); -} - - inline void* Zone::New(int size) { - ASSERT(Isolate::Current()->zone_allow_allocation()); ASSERT(ZoneScope::nesting() > 0); // Round up the requested size to fit the alignment. size = RoundUp(size, kAlignment); diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h index 420afc23f4..25c4f9a7a4 100644 --- a/deps/v8/src/zone.h +++ b/deps/v8/src/zone.h @@ -163,15 +163,6 @@ class ZoneObject { }; -class AssertNoZoneAllocation { - public: - inline AssertNoZoneAllocation(); - inline ~AssertNoZoneAllocation(); - private: - bool prev_; -}; - - // The ZoneListAllocationPolicy is used to specialize the GenericList // implementation to allocate ZoneLists and their elements in the // Zone. diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 87d5453756..8cd73f2a84 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -11050,14 +11050,18 @@ THREADED_TEST(TurnOnAccessCheck) { } -v8::Handle<v8::String> a; -v8::Handle<v8::String> h; +static const char* kPropertyA = "a"; +static const char* kPropertyH = "h"; static bool NamedGetAccessBlockAandH(Local<v8::Object> obj, Local<Value> name, v8::AccessType type, Local<Value> data) { - return !(name->Equals(a) || name->Equals(h)); + if (!name->IsString()) return false; + i::Handle<i::String> name_handle = + v8::Utils::OpenHandle(String::Cast(*name)); + return !name_handle->IsEqualTo(i::CStrVector(kPropertyA)) + && !name_handle->IsEqualTo(i::CStrVector(kPropertyH)); } @@ -11066,9 +11070,7 @@ THREADED_TEST(TurnOnAccessCheckAndRecompile) { // Create an environment with access check to the global object disabled by // default. When the registered access checker will block access to properties - // a and h - a = v8_str("a"); - h = v8_str("h"); + // a and h. v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); global_template->SetAccessCheckCallbacks(NamedGetAccessBlockAandH, IndexedGetAccessBlocker, @@ -12902,8 +12904,15 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type, "}" "ext_array[7];"); CHECK_EQ(0, result->Int32Value()); - CHECK_EQ( - 0, static_cast<int>(jsobj->GetElement(7)->ToObjectChecked()->Number())); + if (array_type == v8::kExternalDoubleArray || + array_type == v8::kExternalFloatArray) { + CHECK_EQ( + static_cast<int>(0x80000000), + static_cast<int>(jsobj->GetElement(7)->ToObjectChecked()->Number())); + } else { + CHECK_EQ(0, static_cast<int>( + jsobj->GetElement(7)->ToObjectChecked()->Number())); + } result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[6] = '2.3';" @@ -13711,58 +13720,65 @@ TEST(SourceURLInStackTrace) { THREADED_TEST(IdleNotification) { v8::HandleScope scope; LocalContext env; - CompileRun("function binom(n, m) {" - " var C = [[1]];" - " for (var i = 1; i <= n; ++i) {" - " C[i] = [1];" - " for (var j = 1; j < i; ++j) {" - " C[i][j] = C[i-1][j-1] + C[i-1][j];" - " }" - " C[i][i] = 1;" - " }" - " return C[n][m];" - "};" - "binom(1000, 500)"); - bool rv = false; - for (int i = 0; i < 100; i++) { - rv = v8::V8::IdleNotification(); - if (rv) - break; + { + // Create garbage in old-space to generate work for idle notification. + i::AlwaysAllocateScope always_allocate; + for (int i = 0; i < 100; i++) { + FACTORY->NewFixedArray(1000, i::TENURED); + } } - CHECK(rv == true); + bool finshed_idle_work = false; + for (int i = 0; i < 100 && !finshed_idle_work; i++) { + finshed_idle_work = v8::V8::IdleNotification(); + } + CHECK(finshed_idle_work); } // Test that idle notification can be handled and eventually returns true. // This just checks the contract of the IdleNotification() function, // and does not verify that it does reasonable work. -TEST(IdleNotificationWithHint) { +TEST(IdleNotificationWithSmallHint) { v8::HandleScope scope; LocalContext env; { + // Create garbage in old-space to generate work for idle notification. i::AlwaysAllocateScope always_allocate; - CompileRun("function binom(n, m) {" - " var C = [[1]];" - " for (var i = 1; i <= n; ++i) {" - " C[i] = [1];" - " for (var j = 1; j < i; ++j) {" - " C[i][j] = C[i-1][j-1] + C[i-1][j];" - " }" - " C[i][i] = 1;" - " }" - " return C[n][m];" - "};" - "binom(1000, 500)"); + for (int i = 0; i < 100; i++) { + FACTORY->NewFixedArray(1000, i::TENURED); + } } - bool rv = false; intptr_t old_size = HEAP->SizeOfObjects(); + bool finshed_idle_work = false; bool no_idle_work = v8::V8::IdleNotification(10); - for (int i = 0; i < 200; i++) { - rv = v8::V8::IdleNotification(10); - if (rv) - break; + for (int i = 0; i < 200 && !finshed_idle_work; i++) { + finshed_idle_work = v8::V8::IdleNotification(10); + } + intptr_t new_size = HEAP->SizeOfObjects(); + CHECK(finshed_idle_work); + CHECK(no_idle_work || new_size < old_size); +} + + +// This just checks the contract of the IdleNotification() function, +// and does not verify that it does reasonable work. +TEST(IdleNotificationWithLargeHint) { + v8::HandleScope scope; + LocalContext env; + { + // Create garbage in old-space to generate work for idle notification. + i::AlwaysAllocateScope always_allocate; + for (int i = 0; i < 100; i++) { + FACTORY->NewFixedArray(1000, i::TENURED); + } + } + intptr_t old_size = HEAP->SizeOfObjects(); + bool finshed_idle_work = false; + bool no_idle_work = v8::V8::IdleNotification(900); + for (int i = 0; i < 200 && !finshed_idle_work; i++) { + finshed_idle_work = v8::V8::IdleNotification(900); } - CHECK(rv == true); intptr_t new_size = HEAP->SizeOfObjects(); + CHECK(finshed_idle_work); CHECK(no_idle_work || new_size < old_size); } diff --git a/deps/v8/test/cctest/test-dataflow.cc b/deps/v8/test/cctest/test-dataflow.cc index ad48f55033..a63008d210 100644 --- a/deps/v8/test/cctest/test-dataflow.cc +++ b/deps/v8/test/cctest/test-dataflow.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -36,16 +36,17 @@ using namespace v8::internal; TEST(BitVector) { v8::internal::V8::Initialize(NULL); - ZoneScope zone(Isolate::Current(), DELETE_ON_EXIT); + ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT); + Zone* zone = ZONE; { - BitVector v(15); + BitVector v(15, zone); v.Add(1); CHECK(v.Contains(1)); v.Remove(0); CHECK(!v.Contains(0)); v.Add(0); v.Add(1); - BitVector w(15); + BitVector w(15, zone); w.Add(1); v.Intersect(w); CHECK(!v.Contains(0)); @@ -53,7 +54,7 @@ TEST(BitVector) { } { - BitVector v(64); + BitVector v(64, zone); v.Add(27); v.Add(30); v.Add(31); @@ -71,9 +72,9 @@ TEST(BitVector) { } { - BitVector v(15); + BitVector v(15, zone); v.Add(0); - BitVector w(15); + BitVector w(15, zone); w.Add(1); v.Union(w); CHECK(v.Contains(0)); @@ -81,13 +82,13 @@ TEST(BitVector) { } { - BitVector v(15); + BitVector v(15, zone); v.Add(0); - BitVector w(15); + BitVector w(15, zone); w = v; CHECK(w.Contains(0)); w.Add(1); - BitVector u(w); + BitVector u(w, zone); CHECK(u.Contains(0)); CHECK(u.Contains(1)); v.Union(w); @@ -96,9 +97,9 @@ TEST(BitVector) { } { - BitVector v(35); + BitVector v(35, zone); v.Add(0); - BitVector w(35); + BitVector w(35, zone); w.Add(33); v.Union(w); CHECK(v.Contains(0)); @@ -106,15 +107,15 @@ TEST(BitVector) { } { - BitVector v(35); + BitVector v(35, zone); v.Add(32); v.Add(33); - BitVector w(35); + BitVector w(35, zone); w.Add(33); v.Intersect(w); CHECK(!v.Contains(32)); CHECK(v.Contains(33)); - BitVector r(35); + BitVector r(35, zone); r.CopyFrom(v); CHECK(!r.Contains(32)); CHECK(r.Contains(33)); diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc index d66f094df7..783c36d1de 100644 --- a/deps/v8/test/cctest/test-debug.cc +++ b/deps/v8/test/cctest/test-debug.cc @@ -4222,9 +4222,9 @@ TEST(InterceptorPropertyMirror) { // Get mirrors for the three objects with interceptor. CompileRun( - "named_mirror = debug.MakeMirror(intercepted_named);" - "indexed_mirror = debug.MakeMirror(intercepted_indexed);" - "both_mirror = debug.MakeMirror(intercepted_both)"); + "var named_mirror = debug.MakeMirror(intercepted_named);" + "var indexed_mirror = debug.MakeMirror(intercepted_indexed);" + "var both_mirror = debug.MakeMirror(intercepted_both)"); CHECK(CompileRun( "named_mirror instanceof debug.ObjectMirror")->BooleanValue()); CHECK(CompileRun( @@ -4265,7 +4265,7 @@ TEST(InterceptorPropertyMirror) { CHECK_EQ(5, CompileRun(source)->Int32Value()); // Get the interceptor properties for the object with only named interceptor. - CompileRun("named_values = named_mirror.properties()"); + CompileRun("var named_values = named_mirror.properties()"); // Check that the properties are interceptor properties. for (int i = 0; i < 3; i++) { @@ -4284,7 +4284,7 @@ TEST(InterceptorPropertyMirror) { // Get the interceptor properties for the object with only indexed // interceptor. - CompileRun("indexed_values = indexed_mirror.properties()"); + CompileRun("var indexed_values = indexed_mirror.properties()"); // Check that the properties are interceptor properties. for (int i = 0; i < 2; i++) { @@ -4296,7 +4296,7 @@ TEST(InterceptorPropertyMirror) { // Get the interceptor properties for the object with both types of // interceptors. - CompileRun("both_values = both_mirror.properties()"); + CompileRun("var both_values = both_mirror.properties()"); // Check that the properties are interceptor properties. for (int i = 0; i < 5; i++) { @@ -4352,10 +4352,10 @@ TEST(HiddenPrototypePropertyMirror) { // Get mirrors for the four objects. CompileRun( - "o0_mirror = debug.MakeMirror(o0);" - "o1_mirror = debug.MakeMirror(o1);" - "o2_mirror = debug.MakeMirror(o2);" - "o3_mirror = debug.MakeMirror(o3)"); + "var o0_mirror = debug.MakeMirror(o0);" + "var o1_mirror = debug.MakeMirror(o1);" + "var o2_mirror = debug.MakeMirror(o2);" + "var o3_mirror = debug.MakeMirror(o3)"); CHECK(CompileRun("o0_mirror instanceof debug.ObjectMirror")->BooleanValue()); CHECK(CompileRun("o1_mirror instanceof debug.ObjectMirror")->BooleanValue()); CHECK(CompileRun("o2_mirror instanceof debug.ObjectMirror")->BooleanValue()); @@ -4441,11 +4441,11 @@ TEST(NativeGetterPropertyMirror) { CHECK_EQ(10, CompileRun("instance.x")->Int32Value()); // Get mirror for the object with property getter. - CompileRun("instance_mirror = debug.MakeMirror(instance);"); + CompileRun("var instance_mirror = debug.MakeMirror(instance);"); CHECK(CompileRun( "instance_mirror instanceof debug.ObjectMirror")->BooleanValue()); - CompileRun("named_names = instance_mirror.propertyNames();"); + CompileRun("var named_names = instance_mirror.propertyNames();"); CHECK_EQ(1, CompileRun("named_names.length")->Int32Value()); CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue()); CHECK(CompileRun( @@ -4477,7 +4477,7 @@ TEST(NativeGetterThrowingErrorPropertyMirror) { env->Global()->Set(v8::String::New("instance"), named->NewInstance()); // Get mirror for the object with property getter. - CompileRun("instance_mirror = debug.MakeMirror(instance);"); + CompileRun("var instance_mirror = debug.MakeMirror(instance);"); CHECK(CompileRun( "instance_mirror instanceof debug.ObjectMirror")->BooleanValue()); CompileRun("named_names = instance_mirror.propertyNames();"); diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc index ea34a75371..ee57d65b24 100644 --- a/deps/v8/test/cctest/test-deoptimization.cc +++ b/deps/v8/test/cctest/test-deoptimization.cc @@ -97,6 +97,14 @@ class AllowNativesSyntaxNoInlining { }; +// Abort any ongoing incremental marking to make sure that all weak global +// handle callbacks are processed. +static void NonIncrementalGC() { + // TODO(1608): This should use kAbortIncrementalMarking. + HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask); +} + + static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj, const char* property_name) { v8::Local<v8::Function> fun = @@ -107,9 +115,7 @@ static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj, TEST(DeoptimizeSimple) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; // Test lazy deoptimization of a simple function. { @@ -119,9 +125,9 @@ TEST(DeoptimizeSimple) { "function h() { %DeoptimizeFunction(f); }" "function g() { count++; h(); }" "function f() { g(); };" - "f();" - "gc(); gc()"); + "f();"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized()); @@ -135,9 +141,9 @@ TEST(DeoptimizeSimple) { "var count = 0;" "function g() { count++; %DeoptimizeFunction(f); f(false); }" "function f(x) { if (x) { g(); } else { return } };" - "f(true);" - "gc(); gc()"); + "f(true);"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized()); @@ -147,9 +153,7 @@ TEST(DeoptimizeSimple) { TEST(DeoptimizeSimpleWithArguments) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; // Test lazy deoptimization of a simple function with some arguments. { @@ -159,9 +163,9 @@ TEST(DeoptimizeSimpleWithArguments) { "function h(x) { %DeoptimizeFunction(f); }" "function g(x, y) { count++; h(x); }" "function f(x, y, z) { g(1,x); y+z; };" - "f(1, \"2\", false);" - "gc(); gc()"); + "f(1, \"2\", false);"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized()); @@ -176,9 +180,9 @@ TEST(DeoptimizeSimpleWithArguments) { "var count = 0;" "function g(x, y) { count++; %DeoptimizeFunction(f); f(false, 1, y); }" "function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };" - "f(true, 1, \"2\");" - "gc(); gc()"); + "f(true, 1, \"2\");"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized()); @@ -188,9 +192,7 @@ TEST(DeoptimizeSimpleWithArguments) { TEST(DeoptimizeSimpleNested) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; // Test lazy deoptimization of a simple function. Have a nested function call // do the deoptimization. @@ -202,8 +204,8 @@ TEST(DeoptimizeSimpleNested) { "function h(x, y, z) { return x + y + z; }" "function g(z) { count++; %DeoptimizeFunction(f); return z;}" "function f(x,y,z) { return h(x, y, g(z)); };" - "result = f(1, 2, 3);" - "gc(); gc()"); + "result = f(1, 2, 3);"); + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value()); @@ -215,9 +217,7 @@ TEST(DeoptimizeSimpleNested) { TEST(DeoptimizeRecursive) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; { // Test lazy deoptimization of a simple function called recursively. Call @@ -228,8 +228,9 @@ TEST(DeoptimizeRecursive) { "var calls = 0;" "function g() { count++; %DeoptimizeFunction(f); }" "function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };" - "f(10); gc(); gc()"); + "f(10);"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value()); @@ -243,9 +244,7 @@ TEST(DeoptimizeRecursive) { TEST(DeoptimizeMultiple) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; { AlwaysOptimizeAllowNativesSyntaxNoInlining options; @@ -261,9 +260,9 @@ TEST(DeoptimizeMultiple) { "function f3(x, y, z) { f4(); return x + y + z; };" "function f2(x, y) { return x + f3(y + 1, y + 1, y + 1) + y; };" "function f1(x) { return f2(x + 1, x + 1) + x; };" - "result = f1(1);" - "gc(); gc()"); + "result = f1(1);"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value()); @@ -273,9 +272,7 @@ TEST(DeoptimizeMultiple) { TEST(DeoptimizeConstructor) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; { AlwaysOptimizeAllowNativesSyntaxNoInlining options; @@ -284,9 +281,9 @@ TEST(DeoptimizeConstructor) { "function g() { count++;" " %DeoptimizeFunction(f); }" "function f() { g(); };" - "result = new f() instanceof f;" - "gc(); gc()"); + "result = new f() instanceof f;"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK(env->Global()->Get(v8_str("result"))->IsTrue()); @@ -301,9 +298,9 @@ TEST(DeoptimizeConstructor) { " %DeoptimizeFunction(f); }" "function f(x, y) { this.x = x; g(); this.y = y; };" "result = new f(1, 2);" - "result = result.x + result.y;" - "gc(); gc()"); + "result = result.x + result.y;"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value()); @@ -313,9 +310,7 @@ TEST(DeoptimizeConstructor) { TEST(DeoptimizeConstructorMultiple) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; { AlwaysOptimizeAllowNativesSyntaxNoInlining options; @@ -332,9 +327,9 @@ TEST(DeoptimizeConstructorMultiple) { "function f2(x, y) {" " this.result = x + new f3(y + 1, y + 1, y + 1).result + y; };" "function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };" - "result = new f1(1).result;" - "gc(); gc()"); + "result = new f1(1).result;"); } + NonIncrementalGC(); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value()); @@ -344,9 +339,7 @@ TEST(DeoptimizeConstructorMultiple) { TEST(DeoptimizeBinaryOperationADDString) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; const char* f_source = "function f(x, y) { return x + y; };"; @@ -376,9 +369,9 @@ TEST(DeoptimizeBinaryOperationADDString) { // Call f and force deoptimization while processing the binary operation. CompileRun("deopt = true;" - "var result = f('a+', new X());" - "gc(); gc();"); + "var result = f('a+', new X());"); } + NonIncrementalGC(); CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized()); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); @@ -428,18 +421,15 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env, // Call f and force deoptimization while processing the binary operation. CompileRun("deopt = true;" - "var result = f(7, new X());" - "gc(); gc();"); - + "var result = f(7, new X());"); + NonIncrementalGC(); CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized()); } TEST(DeoptimizeBinaryOperationADD) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; TestDeoptimizeBinaryOpHelper(&env, "+"); @@ -451,9 +441,7 @@ TEST(DeoptimizeBinaryOperationADD) { TEST(DeoptimizeBinaryOperationSUB) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; TestDeoptimizeBinaryOpHelper(&env, "-"); @@ -465,9 +453,7 @@ TEST(DeoptimizeBinaryOperationSUB) { TEST(DeoptimizeBinaryOperationMUL) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; TestDeoptimizeBinaryOpHelper(&env, "*"); @@ -479,9 +465,7 @@ TEST(DeoptimizeBinaryOperationMUL) { TEST(DeoptimizeBinaryOperationDIV) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; TestDeoptimizeBinaryOpHelper(&env, "/"); @@ -493,9 +477,7 @@ TEST(DeoptimizeBinaryOperationDIV) { TEST(DeoptimizeBinaryOperationMOD) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; TestDeoptimizeBinaryOpHelper(&env, "%"); @@ -507,9 +489,7 @@ TEST(DeoptimizeBinaryOperationMOD) { TEST(DeoptimizeCompare) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; const char* f_source = "function f(x, y) { return x < y; };"; @@ -539,9 +519,9 @@ TEST(DeoptimizeCompare) { // Call f and force deoptimization while processing the comparison. CompileRun("deopt = true;" - "var result = f('a', new X());" - "gc(); gc();"); + "var result = f('a', new X());"); } + NonIncrementalGC(); CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized()); CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value()); @@ -552,9 +532,7 @@ TEST(DeoptimizeCompare) { TEST(DeoptimizeLoadICStoreIC) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; // Functions to generate load/store/keyed load/keyed store IC calls. const char* f1_source = "function f1(x) { return x.y; };"; @@ -618,9 +596,9 @@ TEST(DeoptimizeLoadICStoreIC) { "var result = f1(new X());" "g1(new X());" "f2(new X(), 'z');" - "g2(new X(), 'z');" - "gc(); gc();"); + "g2(new X(), 'z');"); } + NonIncrementalGC(); CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized()); CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized()); @@ -634,9 +612,7 @@ TEST(DeoptimizeLoadICStoreIC) { TEST(DeoptimizeLoadICStoreICNested) { v8::HandleScope scope; - const char* extension_list[] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(1, extension_list); - LocalContext env(&extensions); + LocalContext env; // Functions to generate load/store/keyed load/keyed store IC calls. const char* f1_source = "function f1(x) { return x.y; };"; @@ -701,9 +677,9 @@ TEST(DeoptimizeLoadICStoreICNested) { // Call functions and force deoptimization while processing the ics. CompileRun("deopt = true;" - "var result = f1(new X());" - "gc(); gc();"); + "var result = f1(new X());"); } + NonIncrementalGC(); CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized()); CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized()); diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc index f57477e7f4..bf7e91b268 100644 --- a/deps/v8/test/cctest/test-heap-profiler.cc +++ b/deps/v8/test/cctest/test-heap-profiler.cc @@ -147,6 +147,43 @@ TEST(HeapSnapshotObjectSizes) { } +TEST(BoundFunctionInSnapshot) { + v8::HandleScope scope; + LocalContext env; + CompileRun( + "function myFunction(a, b) { this.a = a; this.b = b; }\n" + "function AAAAA() {}\n" + "boundFunction = myFunction.bind(new AAAAA(), 20, new Number(12)); \n"); + const v8::HeapSnapshot* snapshot = + v8::HeapProfiler::TakeSnapshot(v8_str("sizes")); + const v8::HeapGraphNode* global = GetGlobalObject(snapshot); + const v8::HeapGraphNode* f = + GetProperty(global, v8::HeapGraphEdge::kShortcut, "boundFunction"); + CHECK(f); + CHECK_EQ(v8::String::New("native_bind"), f->GetName()); + const v8::HeapGraphNode* bindings = + GetProperty(f, v8::HeapGraphEdge::kInternal, "bindings"); + CHECK_NE(NULL, bindings); + CHECK_EQ(v8::HeapGraphNode::kArray, bindings->GetType()); + CHECK_EQ(4, bindings->GetChildrenCount()); + + const v8::HeapGraphNode* bound_this = GetProperty( + f, v8::HeapGraphEdge::kShortcut, "bound_this"); + CHECK(bound_this); + CHECK_EQ(v8::HeapGraphNode::kObject, bound_this->GetType()); + + const v8::HeapGraphNode* bound_function = GetProperty( + f, v8::HeapGraphEdge::kShortcut, "bound_function"); + CHECK(bound_function); + CHECK_EQ(v8::HeapGraphNode::kClosure, bound_function->GetType()); + + const v8::HeapGraphNode* bound_argument = GetProperty( + f, v8::HeapGraphEdge::kShortcut, "bound_argument_1"); + CHECK(bound_argument); + CHECK_EQ(v8::HeapGraphNode::kObject, bound_argument->GetType()); +} + + TEST(HeapSnapshotEntryChildren) { v8::HandleScope scope; LocalContext env; diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index a4d4be4816..a6dd83054b 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -1328,35 +1328,6 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) { CHECK(old_capacity == new_capacity); } -// This just checks the contract of the IdleNotification() function, -// and does not verify that it does reasonable work. -TEST(IdleNotificationAdvancesIncrementalMarking) { - if (!FLAG_incremental_marking || !FLAG_incremental_marking_steps) return; - InitializeVM(); - v8::HandleScope scope; - const char* source = "function binom(n, m) {" - " var C = [[1]];" - " for (var i = 1; i <= n; ++i) {" - " C[i] = [1];" - " for (var j = 1; j < i; ++j) {" - " C[i][j] = C[i-1][j-1] + C[i-1][j];" - " }" - " C[i][i] = 1;" - " }" - " return C[n][m];" - "};" - "binom(1000, 500)"; - { - AlwaysAllocateScope aa_scope; - CompileRun(source); - } - intptr_t old_size = HEAP->SizeOfObjects(); - bool no_idle_work = v8::V8::IdleNotification(900); - while (!v8::V8::IdleNotification(900)) ; - intptr_t new_size = HEAP->SizeOfObjects(); - CHECK(no_idle_work || new_size < old_size); -} - static int NumberOfGlobalObjects() { int count = 0; diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc index 3070e16446..d941d0f7b0 100644 --- a/deps/v8/test/cctest/test-regexp.cc +++ b/deps/v8/test/cctest/test-regexp.cc @@ -449,6 +449,7 @@ static bool IsWhiteSpace(uc16 c) { case 0xA0: case 0x2028: case 0x2029: + case 0xFEFF: return true; default: return unibrow::Space::Is(c); diff --git a/deps/v8/test/mjsunit/array-store-and-grow.js b/deps/v8/test/mjsunit/array-store-and-grow.js new file mode 100644 index 0000000000..131d4ebc51 --- /dev/null +++ b/deps/v8/test/mjsunit/array-store-and-grow.js @@ -0,0 +1,183 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verifies that the KeyedStoreIC correctly handles out-of-bounds stores +// to an array that grow it by a single element. Test functions are +// called twice to make sure that the IC is used, first call is handled +// by the runtime in the miss stub. + +function array_store_1(a,b,c) { + return (a[b] = c); +} + +// Check handling of the empty array. +var a = []; +array_store_1(a, 0, 1); +a = []; +array_store_1(a, 0, 1); +assertEquals(1, a[0]); +assertEquals(1, array_store_1([], 0, 1)); + +a = []; +for (x=0;x<100000;++x) { + assertEquals(x, array_store_1(a, x, x)); +} + +for (x=0;x<100000;++x) { + assertEquals(x, array_store_1([], 0, x)); +} + +function array_store_2(a,b,c) { + return (a[b] = c); +} + +a = []; +array_store_2(a, 0, 0.5); +a = []; +array_store_2(a, 0, 0.5); +assertEquals(0.5, a[0]); +assertEquals(0.5, array_store_2([], 0, 0.5)); + +function array_store_3(a,b,c) { + return (a[b] = c); +} + +x = new Object(); +a = []; +array_store_3(a, 0, x); +a = []; +array_store_3(a, 0, x); +assertEquals(x, a[0]); +assertEquals(x, array_store_3([], 0, x)); + +// Check the handling of COW arrays +function makeCOW() { + return [1]; +} + +function array_store_4(a,b,c) { + return (a[b] = c); +} + +a = makeCOW(); +array_store_4(a, 1, 1); +a = makeCOW(); +array_store_4(a, 1, 1); +assertEquals(1, a[1]); +assertEquals(1, array_store_4([], 1, 1)); + +function array_store_5(a,b,c) { + return (a[b] = c); +} + +a = makeCOW(); +array_store_5(a, 1, 0.5); +a = makeCOW(); +array_store_5(a, 1, 0.5); +assertEquals(0.5, a[1]); +assertEquals(0.5, array_store_5([], 1, 0.5)); + +function array_store_6(a,b,c) { + return (a[b] = c); +} + +a = makeCOW(); +array_store_6(a, 1, x); +a = makeCOW(); +array_store_6(a, 1, x); +assertEquals(x, a[1]); +assertEquals(x, array_store_6([], 1, x)); + +// Check the handling of mutable arrays. +a = new Array(1,2,3); +array_store_4(a, 3, 1); +a = new Array(1,2,3); +array_store_4(a, 3, 1); +assertEquals(1, a[3]); +assertEquals(1, array_store_4([], 3, 1)); + +function array_store_5(a,b,c) { + return (a[b] = c); +} + +a = new Array(1,2,3); +array_store_5(a, 3, 0.5); +a = new Array(1,2,3); +array_store_5(a, 3, 0.5); +assertEquals(0.5, a[3]); +assertEquals(0.5, array_store_5([], 3, 0.5)); + +function array_store_6(a,b,c) { + return (a[b] = c); +} + +a = new Array(1,2,3); +array_store_6(a, 3, x); +a = new Array(1,2,3); +array_store_6(a, 3, x); +assertEquals(x, a[3]); +assertEquals(x, array_store_6([], 3, x)); + +function array_store_7(a,b,c) { + return (a[b] = c); +} + +// Check the handling of mutable arrays of doubles +var a = new Array(0.5, 1.5); +array_store_7(a, 2, .5); +a = new Array(0.5, 1.5); +array_store_7(a, 2, .5); +assertEquals(0.5, a[2]); +a = new Array(0.5, 1.5); +assertEquals(0.5, array_store_7(a, 2, 0.5)); + +for (x=0;x<100000;++x) { + a = new Array(0.5, 1.5); + assertEquals(x, array_store_7(a, 2, x)); +} + +function array_store_8(a,b,c) { + return (a[b] = c); +} + +var a = new Array(0.5, 1.5); +array_store_8(a, 2, .5); +a = new Array(0.5, 1.5); +array_store_8(a, 10, .5); +assertEquals(0.5, a[10]); + +// Grow the empty array with a double store. +function array_store_9(a,b,c) { + return (a[b] = c); +} + +var a = []; +array_store_9(a, 0, 0.5); +a = []; +array_store_1(a, 0, 0.5); +assertEquals(0.5, a[0]); +assertEquals(0.5, array_store_1([], 0, 0.5)); diff --git a/deps/v8/test/mjsunit/builtins.js b/deps/v8/test/mjsunit/builtins.js index f2ad5446a0..e43b5891a5 100644 --- a/deps/v8/test/mjsunit/builtins.js +++ b/deps/v8/test/mjsunit/builtins.js @@ -27,8 +27,7 @@ // Flags: --expose-natives-as=builtins -// Checks that all function properties of the builtin object are neither -// writable nor configurable. Also, theose functions that are actually +// Checks that all function properties of the builtin object that are actually // constructors (recognized by having properties on their .prototype object), // have only unconfigurable properties on the prototype, and the methods // are also non-writable. @@ -75,8 +74,6 @@ for (var i = 0; i < names.length; i++) { assertTrue(desc.hasOwnProperty("value")); var value = desc.value; if (isFunction(value)) { - assertFalse(desc.writable, name); - assertFalse(desc.configurable, name); checkConstructor(value, name); } } diff --git a/deps/v8/test/mjsunit/compiler/inline-literals.js b/deps/v8/test/mjsunit/compiler/inline-literals.js new file mode 100644 index 0000000000..f78abe82d1 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/inline-literals.js @@ -0,0 +1,50 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Test that we can inline functions containing materialized literals. + +function o2(b, c) { + return { 'b':b, 'c':c, 'y':b + c }; +} + +function o1(a, b, c) { + return { 'a':a, 'x':o2(b, c) }; +} + +function TestObjectLiteral(a, b, c) { + var expected = { 'a':a, 'x':{ 'b':b, 'c':c, 'y':b + c } }; + var result = o1(a, b, c); + assertEquals(expected, result, "TestObjectLiteral"); +} + +TestObjectLiteral(1, 2, 3); +TestObjectLiteral(1, 2, 3); +%OptimizeFunctionOnNextCall(TestObjectLiteral); +TestObjectLiteral(1, 2, 3); +TestObjectLiteral('a', 'b', 'c'); diff --git a/deps/v8/test/mjsunit/compiler/literals-optimized.js b/deps/v8/test/mjsunit/compiler/literals-optimized.js new file mode 100644 index 0000000000..049e21a3a5 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/literals-optimized.js @@ -0,0 +1,121 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Test optimized versions of array and object literals. + +function TestOptimizedLiteral(create, verify) { + verify(create(1, 2, 3), 1, 2, 3); + verify(create(3, 5, 7), 3, 5, 7); + %OptimizeFunctionOnNextCall(create); + verify(create(11, 23, 42), 11, 23, 42); +} + + +// Test shallow array literal. +function create_arr_shallow(a, b, c) { + return [0, a, 0, b, 0, c]; +} +function verify_arr_shallow(array, a, b, c) { + assertSame(6, array.length); + assertSame(0, array[0]); + assertSame(a, array[1]); + assertSame(0, array[2]); + assertSame(b, array[3]); + assertSame(0, array[4]); + assertSame(c, array[5]); +} +TestOptimizedLiteral(create_arr_shallow, verify_arr_shallow); + + +// Test nested array literal. +function create_arr_nested(a, b, c) { + return [[0, a], [b, c], [1, 2], 3]; +} +function verify_arr_nested(array, a, b, c) { + assertSame(4, array.length); + assertSame(2, array[0].length); + assertSame(0, array[0][0]); + assertSame(a, array[0][1]); + assertSame(2, array[1].length); + assertSame(b, array[1][0]); + assertSame(c, array[1][1]); + assertSame(2, array[2].length); + assertSame(1, array[2][0]); + assertSame(2, array[2][1]); + assertSame(3, array[3]); +} +TestOptimizedLiteral(create_arr_nested, verify_arr_nested); + + +// Test shallow object literal. +function create_obj_shallow(a, b, c) { + return { x:a, y:b, z:c, v:'foo', 9:'bar' }; +} +function verify_obj_shallow(object, a, b, c) { + assertSame(a, object.x); + assertSame(b, object.y); + assertSame(c, object.z); + assertSame('foo', object.v); + assertSame('bar', object[9]); +} +TestOptimizedLiteral(create_obj_shallow, verify_obj_shallow); + + +// Test nested object literal. +function create_obj_nested(a, b, c) { + return { x:{ v:a, w:b }, y:{ v:1, w:2 }, z:c, v:'foo', 9:'bar' }; +} +function verify_obj_nested(object, a, b, c) { + assertSame(a, object.x.v); + assertSame(b, object.x.w); + assertSame(1, object.y.v); + assertSame(2, object.y.w); + assertSame(c, object.z); + assertSame('foo', object.v); + assertSame('bar', object[9]); +} +TestOptimizedLiteral(create_obj_nested, verify_obj_nested); + + +// Test mixed array and object literal. +function create_mixed_nested(a, b, c) { + return { x:[1, 2], y:[a, b], z:c, v:{ v:'foo' }, 9:'bar' }; +} +function verify_mixed_nested(object, a, b, c) { + assertSame(2, object.x.length); + assertSame(1, object.x[0]); + assertSame(2, object.x[1]); + assertSame(2, object.y.length); + assertSame(a, object.y[0]); + assertSame(b, object.y[1]); + assertSame(c, object.z); + assertSame('foo', object.v.v); + assertSame('bar', object[9]); +} +TestOptimizedLiteral(create_mixed_nested, verify_mixed_nested); diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js new file mode 100644 index 0000000000..8b16101ee2 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js @@ -0,0 +1,298 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +// Test for-in support in Crankshaft. For simplicity this tests assumes certain +// fixed iteration order for properties and will have to be adjusted if V8 +// stops following insertion order. + + +function a(t) { + var result = []; + for (var i in t) { + result.push(i + t[i]); + } + return result.join(''); +} + +// Check that we correctly deoptimize on map check. +function b(t) { + var result = []; + for (var i in t) { + result.push(i + t[i]); + delete t[i]; + } + return result.join(''); +} + +// Check that we correctly deoptimize during preparation step. +function c(t) { + var result = []; + for (var i in t) { + result.push(i + t[i]); + } + return result.join(''); +} + +// Check that we deoptimize to the place after side effect in the right state. +function d(t) { + var result = []; + var o; + for (var i in (o = t())) { + result.push(i + o[i]); + } + return result.join(''); +} + +// Check that we correctly deoptimize on map check inserted for fused load. +function e(t) { + var result = []; + for (var i in t) { + delete t[i]; + t[i] = i; + result.push(i + t[i]); + } + return result.join(''); +} + +// Nested for-in loops. +function f(t) { + var result = []; + for (var i in t) { + for (var j in t) { + result.push(i + j + t[i] + t[j]); + } + } + return result.join(''); +} + +// Deoptimization from the inner for-in loop. +function g(t) { + var result = []; + for (var i in t) { + for (var j in t) { + result.push(i + j + t[i] + t[j]); + var v = t[i]; + delete t[i]; + t[i] = v; + } + } + return result.join(''); +} + + +// Break from the inner for-in loop. +function h(t, deopt) { + var result = []; + for (var i in t) { + for (var j in t) { + result.push(i + j + t[i] + t[j]); + break; + } + } + deopt.deopt; + return result.join(''); +} + +// Continue in the inner loop. +function j(t, deopt) { + var result = []; + for (var i in t) { + for (var j in t) { + result.push(i + j + t[i] + t[j]); + continue; + } + } + deopt.deopt; + return result.join(''); +} + +// Continue of the outer loop. +function k(t, deopt) { + var result = []; + outer: for (var i in t) { + for (var j in t) { + result.push(i + j + t[i] + t[j]); + continue outer; + } + } + deopt.deopt; + return result.join(''); +} + +// Break of the outer loop. +function l(t, deopt) { + var result = []; + outer: for (var i in t) { + for (var j in t) { + result.push(i + j + t[i] + t[j]); + break outer; + } + } + deopt.deopt; + return result.join(''); +} + +// Test deoptimization from inlined frame (currently it is not inlined). +function m0(t, deopt) { + for (var i in t) { + for (var j in t) { + deopt.deopt; + return i + j + t[i] + t[j]; + } + } +} + +function m(t, deopt) { + return m0(t, deopt); +} + + +function tryFunction(s, mkT, f) { + var d = {deopt: false}; + assertEquals(s, f(mkT(), d)); + assertEquals(s, f(mkT(), d)); + assertEquals(s, f(mkT(), d)); + %OptimizeFunctionOnNextCall(f); + assertEquals(s, f(mkT(), d)); + assertEquals(s, f(mkT(), {})); +} + +var s = "a1b2c3d4"; +function mkTable() { return { a: "1", b: "2", c: "3", d: "4" }; } + + +tryFunction(s, mkTable, a); +tryFunction(s, mkTable, b); +tryFunction("0a1b2c3d", function () { return "abcd"; }, c); +tryFunction("0a1b2c3d", function () { + var cnt = false; + return function () { + cnt = true; + return "abcd"; + } +}, d); +tryFunction("aabbccdd", mkTable, e); + +function mkSmallTable() { return { a: "1", b: "2" }; } + +tryFunction("aa11ab12ba21bb22", mkSmallTable, f); +tryFunction("aa11ab12bb22ba21", mkSmallTable, g); +tryFunction("aa11ba21", mkSmallTable, h); +tryFunction("aa11ab12ba21bb22", mkSmallTable, j); +tryFunction("aa11ba21", mkSmallTable, h); +tryFunction("aa11ba21", mkSmallTable, k); +tryFunction("aa11", mkSmallTable, l); +tryFunction("aa11", mkSmallTable, m); + +// Test handling of null. +tryFunction("", function () { + return function () { return null; } +}, function (t) { + for (var i in t()) { return i; } + return ""; +}); + +// Test smis. +tryFunction("", function () { + return function () { return 11; } +}, function (t) { + for (var i in t()) { return i; } + return ""; +}); + +// Test LoadFieldByIndex for out of object properties. +function O() { this.a = 1; } +for (var i = 0; i < 10; i++) new O(); +tryFunction("a1b2c3d4e5f6", function () { + var o = new O(); + o.b = 2; + o.c = 3; + o.d = 4; + o.e = 5; + o.f = 6; + return o; +}, function (t) { + var r = []; + for (var i in t) r.push(i + t[i]); + return r.join(''); +}); + +// Test OSR inside for-in. +function osr_inner(t, limit) { + var r = 1; + for (var x in t) { + for (var i = 0; i < t[x].length; i++) { + r += t[x][i]; + if (i === limit) { + %OptimizeFunctionOnNextCall(osr_inner, "osr"); + } + } + r += x; + } + return r; +} + +function osr_outer(t, osr_after) { + var r = 1; + for (var x in t) { + for (var i = 0; i < t[x].length; i++) { + r += t[x][i]; + } + if (x === osr_after) { + %OptimizeFunctionOnNextCall(osr_outer, "osr"); + } + r += x; + } + return r; +} + +function osr_outer_and_deopt(t, osr_after) { + var r = 1; + for (var x in t) { + r += x; + if (x == osr_after) { + %OptimizeFunctionOnNextCall(osr_outer_and_deopt, "osr"); + } + } + return r; +} + +function test_osr() { + with ({}) {} // Disable optimizations of this function. + var arr = new Array(20); + for (var i = 0; i < arr.length; i++) { + arr[i] = i + 1; + } + arr.push(":"); // Force deopt at the end of the loop. + assertEquals("211:x", osr_inner({x: arr}, (arr.length / 2) | 0)); + assertEquals("7x456y", osr_outer({x: [1,2,3], y: [4,5,6]}, "x")); + assertEquals("101234567", osr_outer_and_deopt([1,2,3,4,5,6,7,8], "5")); +} + +test_osr(); diff --git a/deps/v8/test/mjsunit/count-based-osr.js b/deps/v8/test/mjsunit/count-based-osr.js new file mode 100644 index 0000000000..125c4e26d5 --- /dev/null +++ b/deps/v8/test/mjsunit/count-based-osr.js @@ -0,0 +1,38 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges --allow-natives-syntax + +// Test that OSR works properly when using count-based interrupting/profiling. + +function osr_this() { + var a = 1; + // Trigger OSR. + while (%GetOptimizationStatus(osr_this) == 2) {} + return a; +} +assertEquals(1, osr_this()); diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js index c0bc333a6b..4aa79de659 100644 --- a/deps/v8/test/mjsunit/elements-kind.js +++ b/deps/v8/test/mjsunit/elements-kind.js @@ -147,6 +147,7 @@ assertKind(elements_kind.external_pixel, new PixelArray(512)); // Crankshaft support for smi-only array elements. function monomorphic(array) { + assertKind(elements_kind.fast_smi_only, array); for (var i = 0; i < 3; i++) { array[i] = i + 10; } @@ -157,6 +158,7 @@ function monomorphic(array) { } } var smi_only = new Array(1, 2, 3); +assertKind(elements_kind.fast_smi_only, smi_only); for (var i = 0; i < 3; i++) monomorphic(smi_only); %OptimizeFunctionOnNextCall(monomorphic); monomorphic(smi_only); diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js index 53dc940919..5e78f10a0b 100644 --- a/deps/v8/test/mjsunit/elements-transition-hoisting.js +++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Flags: --allow-natives-syntax --smi-only-arrays +// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc // Ensure that ElementsKind transitions in various situations are hoisted (or // not hoisted) correctly, don't change the semantics programs and don't trigger @@ -39,6 +39,11 @@ if (support_smi_only_arrays) { print("Tests do NOT include smi-only arrays."); } +// Force existing ICs from previous stress runs to be flushed, otherwise the +// assumptions in this test about when deoptimizations get triggered are not +// valid. +gc(); + if (support_smi_only_arrays) { // Make sure that a simple elements array transitions inside a loop before // stores to an array gets hoisted in a way that doesn't generate a deopt in @@ -165,4 +170,42 @@ if (support_smi_only_arrays) { testHoistingWithSideEffect(new Array(5)); testHoistingWithSideEffect(new Array(5)); assertTrue(2 != %GetOptimizationStatus(testHoistingWithSideEffect)); + + function testStraightLineDupeElinination(a,b,c,d,e,f) { + var count = 3; + do { + assertTrue(true); + a[0] = b; + a[1] = c; + a[2] = d; + assertTrue(true); + a[3] = e; // TransitionElementsKind should be eliminated despite call. + a[4] = f; + } while (--count > 3); + } + + testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,0,0,.5); + testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,0,.5,0); + testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,.5,0,0); + testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,.5,0,0,0); + testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),.5,0,0,0,0); + testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,0,0,0,.5); + testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,0,0,.5,0); + testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,0,.5,0,0); + testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),0,.5,0,0,0); + testStraightLineDupeElinination(new Array(.1,.1,.1,.1,.1),.5,0,0,0,0); + testStraightLineDupeElinination(new Array(5),.5,0,0,0,0); + testStraightLineDupeElinination(new Array(5),0,.5,0,0,0); + testStraightLineDupeElinination(new Array(5),0,0,.5,0,0); + testStraightLineDupeElinination(new Array(5),0,0,0,.5,0); + testStraightLineDupeElinination(new Array(5),0,0,0,0,.5); + testStraightLineDupeElinination(new Array(5),.5,0,0,0,0); + testStraightLineDupeElinination(new Array(5),0,.5,0,0,0); + testStraightLineDupeElinination(new Array(5),0,0,.5,0,0); + testStraightLineDupeElinination(new Array(5),0,0,0,.5,0); + testStraightLineDupeElinination(new Array(5),0,0,0,0,.5); + %OptimizeFunctionOnNextCall(testStraightLineDupeElinination); + testStraightLineDupeElinination(new Array(5)); + testStraightLineDupeElinination(new Array(5)); + assertTrue(2 != %GetOptimizationStatus(testStraightLineDupeElinination)); } diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js index 72cfd85956..32f78a72d4 100644 --- a/deps/v8/test/mjsunit/external-array.js +++ b/deps/v8/test/mjsunit/external-array.js @@ -317,3 +317,37 @@ for (var t = 0; t < types.length; t++) { %DeoptimizeFunction(array_load_set_smi_check2); gc(); // Makes V8 forget about type information for array_load_set_smi_check. } + +// Check handling of undefined in 32- and 64-bit external float arrays. + +function store_float32_undefined(ext_array) { + ext_array[0] = undefined; +} + +var float32_array = new Float32Array(1); +// Make sure runtime does it right +store_float32_undefined(float32_array); +assertTrue(isNaN(float32_array[0])); +// Make sure the ICs do it right +store_float32_undefined(float32_array); +assertTrue(isNaN(float32_array[0])); +// Make sure that Cranskshft does it right. +%OptimizeFunctionOnNextCall(store_float32_undefined); +store_float32_undefined(float32_array); +assertTrue(isNaN(float32_array[0])); + +function store_float64_undefined(ext_array) { + ext_array[0] = undefined; +} + +var float64_array = new Float64Array(1); +// Make sure runtime does it right +store_float64_undefined(float64_array); +assertTrue(isNaN(float64_array[0])); +// Make sure the ICs do it right +store_float64_undefined(float64_array); +assertTrue(isNaN(float64_array[0])); +// Make sure that Cranskshft does it right. +%OptimizeFunctionOnNextCall(store_float64_undefined); +store_float64_undefined(float64_array); +assertTrue(isNaN(float64_array[0])); diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts.js b/deps/v8/test/mjsunit/harmony/block-conflicts.js index ee2d9794ee..8388504bcd 100644 --- a/deps/v8/test/mjsunit/harmony/block-conflicts.js +++ b/deps/v8/test/mjsunit/harmony/block-conflicts.js @@ -130,5 +130,5 @@ for (var v = 0; v < varbinds.length; ++v) { // Test conflicting parameter/var bindings. for (var v = 0; v < varbinds.length; ++v) { - TestConflict('(function (x) { ' + varbinds[v] + '; })()'); + TestNoConflict('(function (x) { ' + varbinds[v] + '; })()'); } diff --git a/deps/v8/test/mjsunit/harmony/module-parsing.js b/deps/v8/test/mjsunit/harmony/module-parsing.js new file mode 100644 index 0000000000..ac398636da --- /dev/null +++ b/deps/v8/test/mjsunit/harmony/module-parsing.js @@ -0,0 +1,80 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --harmony-modules + +// Test basic module syntax, with and without ASI. + +module A {} + +module A1 = A +module A2 = A; +module A3 = A2 + +module B { + var x + var x, y; + var x = 0, y + let x, y + let z = 1 + const c = 9 + function f() {} + module C { + let x + module D {} + let y + } + let zz = "" +} + +module C1 = B.C; +module D1 = B.C.D +module D2 = C1.D +module D3 = D2 + +module E1 at "http://where" +module E2 at "http://where"; +module E3 = E1.F + + +// Check that ASI does not interfere. + +module +X +{ +let x +} + +module +Y += +X + +module +Z +at +"file://local" diff --git a/deps/v8/test/mjsunit/math-min-max.js b/deps/v8/test/mjsunit/math-min-max.js index 7717b3bff2..e4fd313538 100644 --- a/deps/v8/test/mjsunit/math-min-max.js +++ b/deps/v8/test/mjsunit/math-min-max.js @@ -146,6 +146,14 @@ function crankshaft_test_1(arg) { // Double representation. assertEquals(v0, Math.max(v0++, v9++)); assertEquals(v9, Math.min(v0++, v9++)); + // Mixed representation. + assertEquals(v1, Math.min(v1++, v9++)); // int32, double + assertEquals(v0, Math.max(v0++, v2++)); // double, int32 + assertEquals(v1, Math.min(v1++, v6)); // int32, tagged + assertEquals(v2, Math.max(v5, v2++)); // tagged, int32 + assertEquals(v6, Math.min(v6, v9++)); // tagged, double + assertEquals(v0, Math.max(v0++, v5)); // double, tagged + // Minus zero. assertEquals(Infinity, 1/Math.max(v7, v8)); assertEquals(-Infinity, 1/Math.min(v7, v8)); diff --git a/deps/v8/test/mjsunit/object-prevent-extensions.js b/deps/v8/test/mjsunit/object-prevent-extensions.js index 322a2cb543..6b9184d88b 100644 --- a/deps/v8/test/mjsunit/object-prevent-extensions.js +++ b/deps/v8/test/mjsunit/object-prevent-extensions.js @@ -114,3 +114,15 @@ Object.preventExtensions(foo); foo.x = 29; assertEquals(undefined, foo.x); + +// when Object.isExtensible(o) === false +// assignment should return right hand side value +var o = Object.preventExtensions({}); +var v = o.v = 50; +assertEquals(undefined, o.v); +assertEquals(50, v); + +// test same behavior as above, but for integer properties +var n = o[0] = 100; +assertEquals(undefined, o[0]); +assertEquals(100, n); diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js index 3c4f883bdc..76fa44be9c 100644 --- a/deps/v8/test/mjsunit/regexp.js +++ b/deps/v8/test/mjsunit/regexp.js @@ -127,6 +127,17 @@ assertTrue(re.test("$")); assertTrue(/^[Z-\c-e]*$/.test("Z[\\cde")); +// Test that we handle \s and \S correctly on special Unicode characters. +re = /\s/; +assertTrue(re.test("\u2028")); +assertTrue(re.test("\u2029")); +assertTrue(re.test("\uFEFF")); + +re = /\S/; +assertFalse(re.test("\u2028")); +assertFalse(re.test("\u2029")); +assertFalse(re.test("\uFEFF")); + // Test that we handle \s and \S correctly inside some bizarre // character classes. re = /[\s-:]/; diff --git a/deps/v8/test/mjsunit/regress/regress-113924.js b/deps/v8/test/mjsunit/regress/regress-113924.js new file mode 100644 index 0000000000..3ecdec48f2 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-113924.js @@ -0,0 +1,31 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var count=12000; +while(count--) { + eval("var a = new Object(10); a[2] += 7;"); +} diff --git a/deps/v8/test/mjsunit/regress/regress-1790.js b/deps/v8/test/mjsunit/regress/regress-1790.js new file mode 100644 index 0000000000..8848eeaf6d --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1790.js @@ -0,0 +1,58 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Regression test checking that the sequence of element access in built-in +// array functions is specification conform (i.e. [[HasProperty]] might return +// bogus result after [[Get]] has been called). + +function CheckSequence(builtin, callback) { + var array = [1,2,3]; + var callback_count = 0; + var callback_wrapper = function() { + callback_count++; + return callback() + } + + // Define getter that will delete itself upon first invocation. + Object.defineProperty(array, '1', { + get: function () { delete array[1]; }, + configurable: true + }); + + assertTrue(array.hasOwnProperty('1')); + builtin.apply(array, [callback_wrapper, 'argument']); + assertFalse(array.hasOwnProperty('1')); + assertEquals(3, callback_count); +} + +CheckSequence(Array.prototype.every, function() { return true; }); +CheckSequence(Array.prototype.filter, function() { return true; }); +CheckSequence(Array.prototype.forEach, function() { return 0; }); +CheckSequence(Array.prototype.map, function() { return 0; }); +CheckSequence(Array.prototype.reduce, function() { return 0; }); +CheckSequence(Array.prototype.reduceRight, function() { return 0; }); +CheckSequence(Array.prototype.some, function() { return false; }); diff --git a/deps/v8/test/mjsunit/regress/regress-1878.js b/deps/v8/test/mjsunit/regress/regress-1878.js index 1b3c63aeb1..a1648b1217 100644 --- a/deps/v8/test/mjsunit/regress/regress-1878.js +++ b/deps/v8/test/mjsunit/regress/regress-1878.js @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -30,5 +30,15 @@ // Flags: --allow-natives-syntax --expose_natives_as=natives var a = Array(); -var ai = natives.InternalArray(); -assertFalse(%HaveSameMap(ai, a)); + +for (var i = 0; i < 1000; i++) { + var ai = natives.InternalArray(10000); + assertFalse(%HaveSameMap(ai, a)); + assertTrue(%HasFastElements(ai)); +} + +for (var i = 0; i < 1000; i++) { + var ai = new natives.InternalArray(10000); + assertFalse(%HaveSameMap(ai, a)); + assertTrue(%HasFastElements(ai)); +} diff --git a/deps/v8/test/mjsunit/regress/regress-1945.js b/deps/v8/test/mjsunit/regress/regress-1945.js new file mode 100644 index 0000000000..bffc775fc4 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1945.js @@ -0,0 +1,34 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax + +var _d = new Date(); +_d.setHours(0,0,0,0); +_d.setHours(0,0,0,0); +%OptimizeFunctionOnNextCall(_d.setHours); +_d.setHours(0,0,0,0); diff --git a/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js b/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js new file mode 100644 index 0000000000..9b7f7ac768 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js @@ -0,0 +1,53 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax --expose-gc + +function mkbaz(x) { + function baz() { + return function () { + return [x]; + } + } + return baz; +} + +var baz = mkbaz(1); + +function foo() { + var f = baz(); + return f(); +} + +// Tenure. +gc(); +gc(); + +assertArrayEquals([1], foo()); +assertArrayEquals([1], foo()); +%OptimizeFunctionOnNextCall(foo); +assertArrayEquals([1], foo()); diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js index 30b0ec23f9..c48d9f3a5e 100644 --- a/deps/v8/test/mjsunit/tools/tickprocessor.js +++ b/deps/v8/test/mjsunit/tools/tickprocessor.js @@ -376,8 +376,11 @@ function driveTickProcessorTest( } assertTrue(pathLen != -1); var testsPath = TEST_FILE_NAME.substr(0, pathLen + 1); - var tp = new TickProcessor( - new CppEntriesProviderMock(), separateIc, ignoreUnknown, stateFilter); + var tp = new TickProcessor(new CppEntriesProviderMock(), + separateIc, + TickProcessor.CALL_GRAPH_SIZE, + ignoreUnknown, + stateFilter); var pm = new PrintMonitor(testsPath + refOutput); tp.processLogFileInTest(testsPath + logInput); tp.printStatistics(); diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status index bc096d5ca1..cc2925d660 100644 --- a/deps/v8/test/mozilla/mozilla.status +++ b/deps/v8/test/mozilla/mozilla.status @@ -213,6 +213,10 @@ js1_5/Array/regress-101964: PASS || FAIL if $mode == debug # builtin to have undefined as the receiver. ecma/String/15.5.4.6-2: FAIL_OK +# Fail because it expects String.prototype.split to distinguish whether +# separator was undefined or not passed at all. +ecma/String/15.5.4.8-2: FAIL_OK + # Fail because of toLowerCase and toUpperCase conversion. ecma/String/15.5.4.11-2: FAIL_OK ecma/String/15.5.4.11-5: FAIL_OK diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status index a587a6d4a2..a4c7d57ff0 100644 --- a/deps/v8/test/sputnik/sputnik.status +++ b/deps/v8/test/sputnik/sputnik.status @@ -125,6 +125,13 @@ S15.5.2_D2: PASS || FAIL_OK S15.5.4.11_D1.1_T1: PASS || FAIL_OK S15.5.4.11_D1.1_T3: PASS || FAIL_OK S12.6.4_D1: PASS || FAIL_OK +S15.5.4.14_A1_T6: FAIL_OK +S15.5.4.14_A1_T7: FAIL_OK +S15.5.4.14_A1_T8: FAIL_OK +S15.5.4.14_A1_T9: FAIL_OK +S15.5.4.14_A2_T7: FAIL_OK +S15.10.2.12_A1_T1: FAIL_OK +S15.10.2.12_A2_T1: FAIL_OK # We allow function declarations within statements S12.6.2_A13_T1: FAIL_OK @@ -189,7 +196,6 @@ S15.3.4.3_A6_T4: FAIL_OK S15.4.4.2_A2_T1: FAIL_OK S15.4.4.3_A2_T1: FAIL_OK - ##################### SKIPPED TESTS ##################### # These tests take a looong time to run in debug mode. diff --git a/deps/v8/test/test262/README b/deps/v8/test/test262/README index 094356fcf0..dae18433a5 100644 --- a/deps/v8/test/test262/README +++ b/deps/v8/test/test262/README @@ -4,11 +4,11 @@ tests from http://hg.ecmascript.org/tests/test262 -at revision 271 as 'data' in this directory. Using later version +at revision 309 as 'data' in this directory. Using later version may be possible but the tests are only known to pass (and indeed run) with that revision. -hg clone -r 271 http://hg.ecmascript.org/tests/test262 data +hg clone -r 309 http://hg.ecmascript.org/tests/test262 data If you do update to a newer revision you may have to change the test harness adapter code since it uses internal functionality from the diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status index 1da988efc1..db99c80757 100644 --- a/deps/v8/test/test262/test262.status +++ b/deps/v8/test/test262/test262.status @@ -33,9 +33,6 @@ def FAIL_OK = FAIL, OKAY # '__proto__' should be treated as a normal property in JSON. S15.12.2_A1: FAIL -# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1196 -S8.7_A5_T2: FAIL - # V8 Bug: http://code.google.com/p/v8/issues/detail?id=1624 S10.4.2.1_A1: FAIL @@ -59,14 +56,6 @@ S10.4.2.1_A1: FAIL 15.2.3.7-6-a-284: FAIL 15.2.3.7-6-a-285: FAIL -# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1790 -15.4.4.22-9-9: FAIL - -# Invalid test cases (recent change adding var changes semantics) -S8.3_A1_T1: FAIL -S15.3_A3_T1: FAIL -S15.3_A3_T3: FAIL - ##################### DELIBERATE INCOMPATIBILITIES ##################### # We deliberately treat arguments to parseInt() with a leading zero as @@ -80,9 +69,6 @@ S15.8.2.16_A7: PASS || FAIL_OK S15.8.2.18_A7: PASS || FAIL_OK S15.8.2.13_A23: PASS || FAIL_OK -# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp. -S15.10.6_A2: FAIL_OK - # We are silent in some regexp cases where the spec wants us to give # errors, for compatibility. S15.10.2.11_A1_T2: FAIL @@ -90,15 +76,9 @@ S15.10.2.11_A1_T3: FAIL # We are more lenient in which string character escapes we allow than # the spec (7.8.4 p. 19) wants us to be. This is for compatibility. -S7.8.4_A4.3_T3: FAIL_OK -S7.8.4_A4.3_T4: FAIL_OK -S7.8.4_A4.3_T5: FAIL_OK -S7.8.4_A4.3_T6: FAIL_OK S7.8.4_A6.1_T4: FAIL_OK S7.8.4_A6.2_T1: FAIL_OK S7.8.4_A6.2_T2: FAIL_OK -S7.8.4_A6.4_T1: FAIL_OK -S7.8.4_A6.4_T2: FAIL_OK S7.8.4_A7.1_T4: FAIL_OK S7.8.4_A7.2_T1: FAIL_OK S7.8.4_A7.2_T2: FAIL_OK @@ -106,102 +86,12 @@ S7.8.4_A7.2_T3: FAIL_OK S7.8.4_A7.2_T4: FAIL_OK S7.8.4_A7.2_T5: FAIL_OK S7.8.4_A7.2_T6: FAIL_OK -S7.8.4_A7.4_T1: FAIL_OK -S7.8.4_A7.4_T2: FAIL_OK - -# Sputnik expects unicode escape sequences in RegExp flags to be interpreted. -# The specification requires them to be passed uninterpreted to the RegExp -# constructor. We now implement that. -S7.8.5_A3.1_T7: FAIL_OK -S7.8.5_A3.1_T8: FAIL_OK -S7.8.5_A3.1_T9: FAIL_OK - -# We allow some keywords to be used as identifiers. -S7.5.3_A1.15: FAIL_OK -S7.5.3_A1.18: FAIL_OK -S7.5.3_A1.21: FAIL_OK -S7.5.3_A1.22: FAIL_OK -S7.5.3_A1.23: FAIL_OK -S7.5.3_A1.24: FAIL_OK -S7.5.3_A1.26: FAIL_OK - -# This checks for non-262 behavior -S7.6_D1: PASS || FAIL_OK -S7.6_D2: PASS || FAIL_OK -S8.4_D1.1: PASS || FAIL_OK -S8.4_D2.1: PASS || FAIL_OK -S8.4_D2.2: PASS || FAIL_OK -S8.4_D2.3: PASS || FAIL_OK -S8.4_D2.4: PASS || FAIL_OK -S8.4_D2.5: PASS || FAIL_OK -S8.4_D2.6: PASS || FAIL_OK -S8.4_D2.7: PASS || FAIL_OK -S11.4.3_D1.2: PASS || FAIL_OK -S12.6.4_A14_T1: PASS || FAIL_OK -S12.6.4_D1: PASS || FAIL_OK -S12.6.4_R1: PASS || FAIL_OK -S12.6.4_R2: PASS || FAIL_OK -S13.2_D1.2: PASS || FAIL_OK -S13_D1_T1: PASS || FAIL_OK -S14_D4_T3: PASS || FAIL_OK -S14_D7: PASS || FAIL_OK -S15.1.2.2_D1.2: PASS || FAIL_OK -S15.5.2_D2: PASS || FAIL_OK -S15.5.4.11_D1.1_T1: PASS || FAIL_OK -S15.5.4.11_D1.1_T2: PASS || FAIL_OK -S15.5.4.11_D1.1_T3: PASS || FAIL_OK -S15.5.4.11_D1.1_T4: PASS || FAIL_OK - -# We allow function declarations within statements -S12.6.2_A13_T1: FAIL_OK -S12.6.2_A13_T2: FAIL_OK -S12.6.4_A13_T1: FAIL_OK -S12.6.4_A13_T2: FAIL_OK -S15.3.4.2_A1_T1: FAIL_OK # Linux and Mac defaults to extended 80 bit floating point format in the FPU. # We follow the other major JS engines by keeping this default. S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos -############################# ES3 TESTS ################################ -# These tests check for ES3 semantics, and differ from ES5. -# When we follow ES5 semantics, it's ok to fail the test. - -# Allow keywords as names of properties in object initialisers and -# in dot-notation property access. -S11.1.5_A4.1: FAIL_OK -S11.1.5_A4.2: FAIL_OK - -# Calls builtins without an explicit receiver which means that -# undefined is passed to the builtin. The tests expect the global -# object to be passed which was true in ES3 but not in ES5. -S11.1.1_A2: FAIL_OK -S15.5.4.4_A1_T3: FAIL_OK -S15.5.4.5_A1_T3: FAIL_OK -S15.5.4.6_A1_T3: FAIL_OK -S15.5.4.7_A1_T3: FAIL_OK -S15.5.4.8_A1_T3: FAIL_OK -S15.5.4.9_A1_T3: FAIL_OK -S15.5.4.10_A1_T3: FAIL_OK -S15.5.4.11_A1_T3: FAIL_OK -S15.5.4.12_A1_T3: FAIL_OK -S15.5.4.13_A1_T3: FAIL_OK -S15.5.4.14_A1_T3: FAIL_OK -S15.5.4.15_A1_T3: FAIL_OK - -# NaN, Infinity and undefined are read-only according to ES5. -S15.1.1.1_A2_T1: FAIL_OK # NaN -S15.1.1.1_A2_T2: FAIL_OK # NaN -S15.1.1.2_A2_T1: FAIL_OK # Infinity -# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45. -S15.1.1.3_A2_T1: FAIL_OK # undefined -S15.1.1.3_A2_T2: FAIL_OK # undefined - -# Array.prototype.to[Locale]String is generic in ES5. -S15.4.4.2_A2_T1: FAIL_OK -S15.4.4.3_A2_T1: FAIL_OK - ############################ SKIPPED TESTS ############################# # These tests take a looong time to run in debug mode. diff --git a/deps/v8/tools/disasm.py b/deps/v8/tools/disasm.py index c326382dfb..681b4256df 100644 --- a/deps/v8/tools/disasm.py +++ b/deps/v8/tools/disasm.py @@ -48,7 +48,8 @@ _DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)") _ARCH_MAP = { "ia32": "-m i386", "x64": "-m i386 -M x86-64", - "arm": "-m arm" # Not supported by our objdump build. + "arm": "-m arm", # Not supported by our objdump build. + "mips": "-m mips" # Not supported by our objdump build. } diff --git a/deps/v8/tools/gcmole/gcmole.cc b/deps/v8/tools/gcmole/gcmole.cc index 71ba24a33b..38ee6e07ef 100644 --- a/deps/v8/tools/gcmole/gcmole.cc +++ b/deps/v8/tools/gcmole/gcmole.cc @@ -69,6 +69,21 @@ static bool InV8Namespace(const clang::NamedDecl* decl) { } +static std::string EXTERNAL("EXTERNAL"); +static std::string STATE_TAG("enum v8::internal::StateTag"); + +static bool IsExternalVMState(const clang::ValueDecl* var) { + const clang::EnumConstantDecl* enum_constant = + dyn_cast<clang::EnumConstantDecl>(var); + if (enum_constant != NULL && enum_constant->getNameAsString() == EXTERNAL) { + clang::QualType type = enum_constant->getType(); + return (type.getAsString() == STATE_TAG); + } + + return false; +} + + struct Resolver { explicit Resolver(clang::ASTContext& ctx) : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) { @@ -121,6 +136,13 @@ class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> { return true; } + virtual bool VisitDeclRefExpr(clang::DeclRefExpr* expr) { + // If function mentions EXTERNAL VMState add artificial garbage collection + // mark. + if (IsExternalVMState(expr->getDecl())) AddCallee("CollectGarbage"); + return true; + } + void AnalyzeFunction(const clang::FunctionDecl* f) { MangledName name; if (InV8Namespace(f) && GetMangledName(ctx_, f, &name)) { @@ -278,6 +300,10 @@ class ExprEffect { return reinterpret_cast<Environment*>(effect_ & ~kAllEffects); } + static ExprEffect GC() { + return ExprEffect(kCausesGC, NULL); + } + private: ExprEffect(int effect, Environment* env) : effect_((effect & kAllEffects) | @@ -790,6 +816,9 @@ class FunctionAnalyzer { ExprEffect Use(const clang::Expr* parent, const clang::ValueDecl* var, const Environment& env) { + if (IsExternalVMState(var)) { + return ExprEffect::GC(); + } return Use(parent, var->getType(), var->getNameAsString(), env); } diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua index f8d3b6204a..09db54754f 100644 --- a/deps/v8/tools/gcmole/gcmole.lua +++ b/deps/v8/tools/gcmole/gcmole.lua @@ -106,7 +106,6 @@ function InvokeClangPluginForEachFile(filenames, cfg, func) cfg.plugin_args, cfg.triple, cfg.arch_define) - for _, filename in ipairs(filenames) do log("-- %s", filename) local action = cmd_line .. " src/" .. filename .. " 2>&1" @@ -218,7 +217,13 @@ local WHITELIST = { -- Callsites of such functions are safe as long as they are properly -- check return value and propagate the Failure to the caller. -- It should be possible to extend GCMole to understand this. - "Heap.*AllocateFunctionPrototype" + "Heap.*AllocateFunctionPrototype", + + -- Ignore all StateTag methods. + "StateTag", + + -- Ignore printing of elements transition. + "PrintElementsTransition" }; local function AddCause(name, cause) diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js index 4201e43d3f..9af5ab6c79 100644 --- a/deps/v8/tools/tickprocessor-driver.js +++ b/deps/v8/tools/tickprocessor-driver.js @@ -52,6 +52,7 @@ if (params.snapshotLogFileName) { var tickProcessor = new TickProcessor( new (entriesProviders[params.platform])(params.nm), params.separateIc, + params.callGraphSize, params.ignoreUnknown, params.stateFilter, snapshotLogProcessor); diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index 5f57835524..05a3369255 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -146,7 +146,12 @@ SnapshotLogProcessor.prototype.getSerializedEntryName = function(pos) { function TickProcessor( - cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) { + cppEntriesProvider, + separateIc, + callGraphSize, + ignoreUnknown, + stateFilter, + snapshotLogProcessor) { LogReader.call(this, { 'shared-library': { parsers: [null, parseInt, parseInt], processor: this.processSharedLibrary }, @@ -181,6 +186,7 @@ function TickProcessor( 'end-code-region': null }); this.cppEntriesProvider_ = cppEntriesProvider; + this.callGraphSize_ = callGraphSize; this.ignoreUnknown_ = ignoreUnknown; this.stateFilter_ = stateFilter; this.snapshotLogProcessor_ = snapshotLogProcessor; @@ -240,6 +246,7 @@ TickProcessor.CodeTypes = { TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0; +TickProcessor.CALL_GRAPH_SIZE = 5; /** * @override @@ -535,7 +542,7 @@ TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) { padLeft(rec.parentTotalPercent.toFixed(1), 5) + '% ' + indentStr + rec.internalFuncName); // Limit backtrace depth. - if (indent < 10) { + if (indent < 2 * self.callGraphSize_) { self.printHeavyProfile(rec.children, indent + 2); } // Delimit top-level functions. @@ -764,6 +771,8 @@ function ArgumentsProcessor(args) { 'Show only ticks from OTHER VM state'], '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL, 'Show only ticks from EXTERNAL VM state'], + '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE, + 'Set the call graph size'], '--ignore-unknown': ['ignoreUnknown', true, 'Exclude ticks of unknown code entries from processing'], '--separate-ic': ['separateIc', true, @@ -792,6 +801,7 @@ ArgumentsProcessor.DEFAULTS = { snapshotLogFileName: null, platform: 'unix', stateFilter: null, + callGraphSize: 5, ignoreUnknown: false, separateIc: false, nm: 'nm' |