summaryrefslogtreecommitdiff
path: root/deps/v8/tools/unittests
diff options
context:
space:
mode:
authorRefael Ackermann <refack@gmail.com>2019-05-28 08:46:21 -0400
committerRefael Ackermann <refack@gmail.com>2019-06-01 09:55:12 -0400
commited74896b1fae1c163b3906163f3bf46326618ddb (patch)
tree7fb05c5a19808e0c5cd95837528e9005999cf540 /deps/v8/tools/unittests
parent2a850cd0664a4eee51f44d0bb8c2f7a3fe444154 (diff)
downloadnode-new-ed74896b1fae1c163b3906163f3bf46326618ddb.tar.gz
deps: update V8 to 7.5.288.22
PR-URL: https://github.com/nodejs/node/pull/27375 Reviewed-By: Michaƫl Zasso <targos@protonmail.com> Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/tools/unittests')
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py574
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py2
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json12
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json8
4 files changed, 312 insertions, 284 deletions
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index f1028dee6a..5e009ebd6b 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -9,12 +9,12 @@ from __future__ import print_function
from collections import namedtuple
import coverage
import json
-from mock import MagicMock, patch
+import mock
import os
-from os import path, sys
import platform
import shutil
import subprocess
+import sys
import tempfile
import unittest
@@ -25,78 +25,77 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
RUN_PERF = os.path.join(BASE_DIR, 'run_perf.py')
TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata')
-TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
+TEST_WORKSPACE = os.path.join(tempfile.gettempdir(), 'test-v8-run-perf')
V8_JSON = {
- "path": ["."],
- "owners": ["username@chromium.org"],
- "binary": "d7",
- "flags": ["--flag"],
- "main": "run.js",
- "run_count": 1,
- "results_regexp": "^%s: (.+)$",
- "tests": [
- {"name": "Richards"},
- {"name": "DeltaBlue"},
+ 'path': ['.'],
+ 'owners': ['username@chromium.org'],
+ 'binary': 'd7',
+ 'flags': ['--flag'],
+ 'main': 'run.js',
+ 'run_count': 1,
+ 'results_regexp': '^%s: (.+)$',
+ 'tests': [
+ {'name': 'Richards'},
+ {'name': 'DeltaBlue'},
]
}
V8_NESTED_SUITES_JSON = {
- "path": ["."],
- "owners": ["username@chromium.org"],
- "flags": ["--flag"],
- "run_count": 1,
- "units": "score",
- "tests": [
- {"name": "Richards",
- "path": ["richards"],
- "binary": "d7",
- "main": "run.js",
- "resources": ["file1.js", "file2.js"],
- "run_count": 2,
- "results_regexp": "^Richards: (.+)$"},
- {"name": "Sub",
- "path": ["sub"],
- "tests": [
- {"name": "Leaf",
- "path": ["leaf"],
- "run_count_x64": 3,
- "units": "ms",
- "main": "run.js",
- "results_regexp": "^Simple: (.+) ms.$"},
+ 'path': ['.'],
+ 'owners': ['username@chromium.org'],
+ 'flags': ['--flag'],
+ 'run_count': 1,
+ 'units': 'score',
+ 'tests': [
+ {'name': 'Richards',
+ 'path': ['richards'],
+ 'binary': 'd7',
+ 'main': 'run.js',
+ 'resources': ['file1.js', 'file2.js'],
+ 'run_count': 2,
+ 'results_regexp': '^Richards: (.+)$'},
+ {'name': 'Sub',
+ 'path': ['sub'],
+ 'tests': [
+ {'name': 'Leaf',
+ 'path': ['leaf'],
+ 'run_count_x64': 3,
+ 'units': 'ms',
+ 'main': 'run.js',
+ 'results_regexp': '^Simple: (.+) ms.$'},
]
},
- {"name": "DeltaBlue",
- "path": ["delta_blue"],
- "main": "run.js",
- "flags": ["--flag2"],
- "results_regexp": "^DeltaBlue: (.+)$"},
- {"name": "ShouldntRun",
- "path": ["."],
- "archs": ["arm"],
- "main": "run.js"},
+ {'name': 'DeltaBlue',
+ 'path': ['delta_blue'],
+ 'main': 'run.js',
+ 'flags': ['--flag2'],
+ 'results_regexp': '^DeltaBlue: (.+)$'},
+ {'name': 'ShouldntRun',
+ 'path': ['.'],
+ 'archs': ['arm'],
+ 'main': 'run.js'},
]
}
V8_GENERIC_JSON = {
- "path": ["."],
- "owners": ["username@chromium.org"],
- "binary": "cc",
- "flags": ["--flag"],
- "generic": True,
- "run_count": 1,
- "units": "ms",
+ 'path': ['.'],
+ 'owners': ['username@chromium.org'],
+ 'binary': 'cc',
+ 'flags': ['--flag'],
+ 'generic': True,
+ 'run_count': 1,
+ 'units': 'ms',
}
-Output = namedtuple("Output", "stdout, stderr, timed_out, exit_code")
+Output = namedtuple('Output', 'stdout, stderr, timed_out, exit_code')
class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- cls.base = path.dirname(path.dirname(path.abspath(__file__)))
- sys.path.append(cls.base)
+ sys.path.insert(0, BASE_DIR)
cls._cov = coverage.coverage(
- include=([os.path.join(cls.base, "run_perf.py")]))
+ include=([os.path.join(BASE_DIR, 'run_perf.py')]))
cls._cov.start()
import run_perf
from testrunner.local import command
@@ -106,56 +105,56 @@ class PerfTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
cls._cov.stop()
- print("")
+ print('')
print(cls._cov.report())
def setUp(self):
self.maxDiff = None
- if path.exists(TEST_WORKSPACE):
+ if os.path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
os.makedirs(TEST_WORKSPACE)
def tearDown(self):
- patch.stopall()
- if path.exists(TEST_WORKSPACE):
+ mock.patch.stopall()
+ if os.path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
def _WriteTestInput(self, json_content):
- self._test_input = path.join(TEST_WORKSPACE, "test.json")
- with open(self._test_input, "w") as f:
+ self._test_input = os.path.join(TEST_WORKSPACE, 'test.json')
+ with open(self._test_input, 'w') as f:
f.write(json.dumps(json_content))
def _MockCommand(self, *args, **kwargs):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
stderr=None,
- timed_out=kwargs.get("timed_out", False),
- exit_code=kwargs.get("exit_code", 0))
+ timed_out=kwargs.get('timed_out', False),
+ exit_code=kwargs.get('exit_code', 0))
for arg in args[1]]
def create_cmd(*args, **kwargs):
- cmd = MagicMock()
+ cmd = mock.MagicMock()
def execute(*args, **kwargs):
return test_outputs.pop()
- cmd.execute = MagicMock(side_effect=execute)
+ cmd.execute = mock.MagicMock(side_effect=execute)
return cmd
- patch.object(
+ mock.patch.object(
run_perf.command, 'PosixCommand',
- MagicMock(side_effect=create_cmd)).start()
+ mock.MagicMock(side_effect=create_cmd)).start()
# Check that d8 is called from the correct cwd for each test run.
- dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
+ dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
- os.chdir = MagicMock(side_effect=chdir)
+ os.chdir = mock.MagicMock(side_effect=chdir)
- subprocess.check_call = MagicMock()
- platform.system = MagicMock(return_value='Linux')
+ subprocess.check_call = mock.MagicMock()
+ platform.system = mock.MagicMock(return_value='Linux')
def _CallMain(self, *args):
- self._test_output = path.join(TEST_WORKSPACE, "results.json")
+ self._test_output = os.path.join(TEST_WORKSPACE, 'results.json')
all_args=[
- "--json-test-results",
+ '--json-test-results',
self._test_output,
self._test_input,
]
@@ -168,17 +167,17 @@ class PerfTest(unittest.TestCase):
def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertEquals([
- {"units": units,
- "graphs": [suite, trace["name"]],
- "results": trace["results"],
- "stddev": trace["stddev"]} for trace in traces],
- self._LoadResults(file_name)["traces"])
+ {'units': units,
+ 'graphs': [suite, trace['name']],
+ 'results': trace['results'],
+ 'stddev': trace['stddev']} for trace in traces],
+ self._LoadResults(file_name)['traces'])
def _VerifyErrors(self, errors):
- self.assertEquals(errors, self._LoadResults()["errors"])
+ self.assertEquals(errors, self._LoadResults()['errors'])
def _VerifyMock(self, binary, *args, **kwargs):
- shell = path.join(path.dirname(self.base), binary)
+ shell = os.path.join(os.path.dirname(BASE_DIR), binary)
command.Command.assert_called_with(
cmd_prefix=[],
shell=shell,
@@ -190,7 +189,7 @@ class PerfTest(unittest.TestCase):
for arg, actual in zip(args, command.Command.call_args_list):
expected = {
'cmd_prefix': [],
- 'shell': path.join(path.dirname(self.base), arg[0]),
+ 'shell': os.path.join(os.path.dirname(BASE_DIR), arg[0]),
'args': list(arg[1:]),
'timeout': kwargs.get('timeout', 60)
}
@@ -198,305 +197,324 @@ class PerfTest(unittest.TestCase):
def testOneRun(self):
self._WriteTestInput(V8_JSON)
- self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
+ self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ self._VerifyMock(
+ os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunWithTestFlags(self):
test_input = dict(V8_JSON)
- test_input["test_flags"] = ["2", "test_name"]
+ test_input['test_flags'] = ['2', 'test_name']
self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567"])
+ self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js",
- "--", "2", "test_name")
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js', '--', '2', 'test_name')
def testTwoRuns_Units_SuiteName(self):
test_input = dict(V8_JSON)
- test_input["run_count"] = 2
- test_input["name"] = "v8"
- test_input["units"] = "ms"
+ test_input['run_count'] = 2
+ test_input['name'] = 'v8'
+ test_input['units'] = 'ms'
self._WriteTestInput(test_input)
- self._MockCommand([".", "."],
- ["Richards: 100\nDeltaBlue: 200\n",
- "Richards: 50\nDeltaBlue: 300\n"])
+ self._MockCommand(['.', '.'],
+ ['Richards: 100\nDeltaBlue: 200\n',
+ 'Richards: 50\nDeltaBlue: 300\n'])
self.assertEquals(0, self._CallMain())
- self._VerifyResults("v8", "ms", [
- {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
+ self._VerifyResults('v8', 'ms', [
+ {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js')
def testTwoRuns_SubRegexp(self):
test_input = dict(V8_JSON)
- test_input["run_count"] = 2
- del test_input["results_regexp"]
- test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
- test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
+ test_input['run_count'] = 2
+ del test_input['results_regexp']
+ test_input['tests'][0]['results_regexp'] = '^Richards: (.+)$'
+ test_input['tests'][1]['results_regexp'] = '^DeltaBlue: (.+)$'
self._WriteTestInput(test_input)
- self._MockCommand([".", "."],
- ["Richards: 100\nDeltaBlue: 200\n",
- "Richards: 50\nDeltaBlue: 300\n"])
+ self._MockCommand(['.', '.'],
+ ['Richards: 100\nDeltaBlue: 200\n',
+ 'Richards: 50\nDeltaBlue: 300\n'])
self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js')
def testNestedSuite(self):
self._WriteTestInput(V8_NESTED_SUITES_JSON)
- self._MockCommand(["delta_blue", "sub/leaf", "richards"],
- ["DeltaBlue: 200\n",
- "Simple: 1 ms.\n",
- "Simple: 2 ms.\n",
- "Simple: 3 ms.\n",
- "Richards: 100\n",
- "Richards: 50\n"])
+ self._MockCommand(['delta_blue', 'sub/leaf', 'richards'],
+ ['DeltaBlue: 200\n',
+ 'Simple: 1 ms.\n',
+ 'Simple: 2 ms.\n',
+ 'Simple: 3 ms.\n',
+ 'Richards: 100\n',
+ 'Richards: 50\n'])
self.assertEquals(0, self._CallMain())
self.assertEquals([
- {"units": "score",
- "graphs": ["test", "Richards"],
- "results": ["50.0", "100.0"],
- "stddev": ""},
- {"units": "ms",
- "graphs": ["test", "Sub", "Leaf"],
- "results": ["3.0", "2.0", "1.0"],
- "stddev": ""},
- {"units": "score",
- "graphs": ["test", "DeltaBlue"],
- "results": ["200.0"],
- "stddev": ""},
- ], self._LoadResults()["traces"])
+ {'units': 'score',
+ 'graphs': ['test', 'Richards'],
+ 'results': ['50.0', '100.0'],
+ 'stddev': ''},
+ {'units': 'ms',
+ 'graphs': ['test', 'Sub', 'Leaf'],
+ 'results': ['3.0', '2.0', '1.0'],
+ 'stddev': ''},
+ {'units': 'score',
+ 'graphs': ['test', 'DeltaBlue'],
+ 'results': ['200.0'],
+ 'stddev': ''},
+ ], self._LoadResults()['traces'])
self._VerifyErrors([])
self._VerifyMockMultiple(
- (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
+ (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+ (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+ (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+ (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+ (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+ (os.path.join('out', 'x64.release', 'd8'),
+ '--flag', '--flag2', 'run.js'))
def testOneRunStdDevRegExp(self):
test_input = dict(V8_JSON)
- test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
+ test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
- "DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
+ self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
+ 'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
self.assertEquals(0, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": "106"},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': '0.23'},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': '106'},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ self._VerifyMock(
+ os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testTwoRunsStdDevRegExp(self):
test_input = dict(V8_JSON)
- test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
- test_input["run_count"] = 2
+ test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
+ test_input['run_count'] = 2
self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
- "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
- "Richards: 2\nRichards-stddev: 0.5\n"
- "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
+ self._MockCommand(['.'], ['Richards: 3\nRichards-stddev: 0.7\n'
+ 'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
+ 'Richards: 2\nRichards-stddev: 0.5\n'
+ 'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
self.assertEquals(1, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["2.0", "3.0"], "stddev": "0.7"},
- {"name": "DeltaBlue", "results": ["5.0", "6.0"], "stddev": "0.8"},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['2.0', '3.0'], 'stddev': '0.7'},
+ {'name': 'DeltaBlue', 'results': ['5.0', '6.0'], 'stddev': '0.8'},
])
self._VerifyErrors(
- ["Test test/Richards should only run once since a stddev is provided "
- "by the test.",
- "Test test/DeltaBlue should only run once since a stddev is provided "
- "by the test.",
- "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
- "test/DeltaBlue."])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ ['Test test/Richards should only run once since a stddev is provided '
+ 'by the test.',
+ 'Test test/DeltaBlue should only run once since a stddev is provided '
+ 'by the test.',
+ 'Regexp "^DeltaBlue\-stddev: (.+)$" did not match for test '
+ 'test/DeltaBlue.'])
+ self._VerifyMock(
+ os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testBuildbot(self):
self._WriteTestInput(V8_JSON)
- self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
- self.assertEquals(0, self._CallMain("--buildbot"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'])
+ mock.patch.object(
+ run_perf.Platform, 'ReadBuildConfig',
+ mock.MagicMock(return_value={'is_android': False})).start()
+ self.assertEquals(0, self._CallMain('--buildbot'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+ self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testBuildbotWithTotal(self):
test_input = dict(V8_JSON)
- test_input["total"] = True
+ test_input['total'] = True
self._WriteTestInput(test_input)
- self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
- self.assertEquals(0, self._CallMain("--buildbot"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
- {"name": "Total", "results": ["3626.49109719"], "stddev": ""},
+ self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'])
+ mock.patch.object(
+ run_perf.Platform, 'ReadBuildConfig',
+ mock.MagicMock(return_value={'is_android': False})).start()
+ self.assertEquals(0, self._CallMain('--buildbot'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Total', 'results': ['3626.49109719'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+ self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testBuildbotWithTotalAndErrors(self):
test_input = dict(V8_JSON)
- test_input["total"] = True
+ test_input['total'] = True
self._WriteTestInput(test_input)
- self._MockCommand(["."], ["x\nRichards: bla\nDeltaBlue: 10657567\ny\n"])
- self.assertEquals(1, self._CallMain("--buildbot"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": [], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'])
+ mock.patch.object(
+ run_perf.Platform, 'ReadBuildConfig',
+ mock.MagicMock(return_value={'is_android': False})).start()
+ self.assertEquals(1, self._CallMain('--buildbot'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" "
- "returned a non-numeric for test test/Richards.",
- "Not all traces have the same number of results."])
- self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+ ['Regexp "^Richards: (.+)$" '
+ 'returned a non-numeric for test test/Richards.',
+ 'Not all traces have the same number of results.'])
+ self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
- self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
+ self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(1, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": [], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors(
- ["Regexp \"^Richards: (.+)$\" didn't match for test test/Richards."])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ ['Regexp "^Richards: (.+)$" did not match for test test/Richards.'])
+ self._VerifyMock(
+ os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunGeneric(self):
test_input = dict(V8_GENERIC_JSON)
self._WriteTestInput(test_input)
- self._MockCommand(["."], [
- "RESULT Infra: Constant1= 11 count\n"
- "RESULT Infra: Constant2= [10,5,10,15] count\n"
- "RESULT Infra: Constant3= {12,1.2} count\n"
- "RESULT Infra: Constant4= [10,5,error,15] count\n"])
+ self._MockCommand(['.'], [
+ 'RESULT Infra: Constant1= 11 count\n'
+ 'RESULT Infra: Constant2= [10,5,10,15] count\n'
+ 'RESULT Infra: Constant3= {12,1.2} count\n'
+ 'RESULT Infra: Constant4= [10,5,error,15] count\n'])
self.assertEquals(1, self._CallMain())
self.assertEquals([
- {"units": "count",
- "graphs": ["test", "Infra", "Constant1"],
- "results": ["11.0"],
- "stddev": ""},
- {"units": "count",
- "graphs": ["test", "Infra", "Constant2"],
- "results": ["10.0", "5.0", "10.0", "15.0"],
- "stddev": ""},
- {"units": "count",
- "graphs": ["test", "Infra", "Constant3"],
- "results": ["12.0"],
- "stddev": "1.2"},
- {"units": "count",
- "graphs": ["test", "Infra", "Constant4"],
- "results": [],
- "stddev": ""},
- ], self._LoadResults()["traces"])
- self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
- self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
+ {'units': 'count',
+ 'graphs': ['test', 'Infra', 'Constant1'],
+ 'results': ['11.0'],
+ 'stddev': ''},
+ {'units': 'count',
+ 'graphs': ['test', 'Infra', 'Constant2'],
+ 'results': ['10.0', '5.0', '10.0', '15.0'],
+ 'stddev': ''},
+ {'units': 'count',
+ 'graphs': ['test', 'Infra', 'Constant3'],
+ 'results': ['12.0'],
+ 'stddev': '1.2'},
+ {'units': 'count',
+ 'graphs': ['test', 'Infra', 'Constant4'],
+ 'results': [],
+ 'stddev': ''},
+ ], self._LoadResults()['traces'])
+ self._VerifyErrors(['Found non-numeric in test/Infra/Constant4'])
+ self._VerifyMock(os.path.join('out', 'x64.release', 'cc'), '--flag', '')
def testOneRunCrashed(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(
- ["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"], exit_code=1)
+ ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'], exit_code=1)
self.assertEquals(1, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": [], "stddev": ""},
- {"name": "DeltaBlue", "results": [], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+ self._VerifyMock(
+ os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
- test_input["timeout"] = 70
+ test_input['timeout'] = 70
self._WriteTestInput(test_input)
- self._MockCommand(["."], [""], timed_out=True)
+ self._MockCommand(['.'], [''], timed_out=True)
self.assertEquals(1, self._CallMain())
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": [], "stddev": ""},
- {"name": "DeltaBlue", "results": [], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(
- path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
+ self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
+ '--flag', 'run.js', timeout=70)
- # Simple test that mocks out the android platform. Testing the platform would
- # require lots of complicated mocks for the android tools.
def testAndroid(self):
self._WriteTestInput(V8_JSON)
- # FIXME(machenbach): This is not test-local!
- platform = run_perf.AndroidPlatform
- platform.PreExecution = MagicMock(return_value=None)
- platform.PostExecution = MagicMock(return_value=None)
- platform.PreTests = MagicMock(return_value=None)
- platform.Run = MagicMock(
- return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
- run_perf.AndroidPlatform = MagicMock(return_value=platform)
- with patch.object(run_perf.Platform, 'ReadBuildConfig',
- MagicMock(return_value={'is_android': True})):
- self.assertEquals(0, self._CallMain("--arch", "arm"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ mock.patch('run_perf.AndroidPlatform.PreExecution').start()
+ mock.patch('run_perf.AndroidPlatform.PostExecution').start()
+ mock.patch('run_perf.AndroidPlatform.PreTests').start()
+ mock.patch(
+ 'run_perf.AndroidPlatform.Run',
+ return_value=(
+ 'Richards: 1.234\nDeltaBlue: 10657567\n', None)).start()
+ mock.patch('testrunner.local.android._Driver', autospec=True).start()
+ mock.patch(
+ 'run_perf.Platform.ReadBuildConfig',
+ return_value={'is_android': True}).start()
+ self.assertEquals(0, self._CallMain('--arch', 'arm'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
def testTwoRuns_Trybot(self):
test_input = dict(V8_JSON)
- test_input["run_count"] = 2
+ test_input['run_count'] = 2
self._WriteTestInput(test_input)
- self._MockCommand([".", ".", ".", "."],
- ["Richards: 100\nDeltaBlue: 200\n",
- "Richards: 200\nDeltaBlue: 20\n",
- "Richards: 50\nDeltaBlue: 200\n",
- "Richards: 100\nDeltaBlue: 20\n"])
- test_output_secondary = path.join(TEST_WORKSPACE, "results_secondary.json")
+ self._MockCommand(['.', '.', '.', '.'],
+ ['Richards: 100\nDeltaBlue: 200\n',
+ 'Richards: 200\nDeltaBlue: 20\n',
+ 'Richards: 50\nDeltaBlue: 200\n',
+ 'Richards: 100\nDeltaBlue: 20\n'])
+ test_output_secondary = os.path.join(
+ TEST_WORKSPACE, 'results_secondary.json')
self.assertEquals(0, self._CallMain(
- "--outdir-secondary", "out-secondary",
- "--json-test-results-secondary", test_output_secondary,
+ '--outdir-secondary', 'out-secondary',
+ '--json-test-results-secondary', test_output_secondary,
))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['100.0', '200.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['20.0', '20.0'], 'stddev': ''},
])
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['200.0', '200.0'], 'stddev': ''},
], test_output_secondary)
self._VerifyErrors([])
self._VerifyMockMultiple(
- (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
- (path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
+ (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+ (os.path.join('out-secondary', 'x64.release', 'd7'),
+ '--flag', 'run.js'),
+ (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+ (os.path.join('out-secondary', 'x64.release', 'd7'),
+ '--flag', 'run.js'),
)
def testWrongBinaryWithProf(self):
test_input = dict(V8_JSON)
self._WriteTestInput(test_input)
- self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
- self.assertEquals(0, self._CallMain("--extra-flags=--prof"))
- self._VerifyResults("test", "score", [
- {"name": "Richards", "results": ["1.234"], "stddev": ""},
- {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
+ self.assertEquals(0, self._CallMain('--extra-flags=--prof'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
- self._VerifyMock(path.join("out", "x64.release", "d7"),
- "--flag", "--prof", "run.js")
+ self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
+ '--flag', '--prof', 'run.js')
def testUnzip(self):
def Gen():
@@ -510,18 +528,18 @@ class PerfTest(unittest.TestCase):
### System tests
def _RunPerf(self, mocked_d8, test_json):
- output_json = path.join(TEST_WORKSPACE, "output.json")
+ output_json = os.path.join(TEST_WORKSPACE, 'output.json')
args = [
- sys.executable, RUN_PERF,
- "--binary-override-path", os.path.join(TEST_DATA, mocked_d8),
- "--json-test-results", output_json,
+ os.sys.executable, RUN_PERF,
+ '--binary-override-path', os.path.join(TEST_DATA, mocked_d8),
+ '--json-test-results', output_json,
os.path.join(TEST_DATA, test_json),
]
subprocess.check_output(args)
return self._LoadResults(output_json)
def testNormal(self):
- results = self._RunPerf("d8_mocked1.py", "test1.json")
+ results = self._RunPerf('d8_mocked1.py', 'test1.json')
self.assertEquals([], results['errors'])
self.assertEquals([
{
@@ -539,7 +557,7 @@ class PerfTest(unittest.TestCase):
], results['traces'])
def testResultsProcessor(self):
- results = self._RunPerf("d8_mocked2.py", "test2.json")
+ results = self._RunPerf('d8_mocked2.py', 'test2.json')
self.assertEquals([], results['errors'])
self.assertEquals([
{
@@ -557,7 +575,7 @@ class PerfTest(unittest.TestCase):
], results['traces'])
def testResultsProcessorNested(self):
- results = self._RunPerf("d8_mocked2.py", "test3.json")
+ results = self._RunPerf('d8_mocked2.py', 'test3.json')
self.assertEquals([], results['errors'])
self.assertEquals([
{
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index ffe440447d..93b10f5fd9 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -513,7 +513,7 @@ class SystemTest(unittest.TestCase):
self.assertIn(
'Done running sweet/bananas default: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
- self.assertIn('--predictable --verify_predictable', result.stdout, result)
+ self.assertIn('--predictable --verify-predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSlowArch(self):
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index 7f6742e4cc..bba3f04e96 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -16,6 +16,7 @@
"--random-seed=123",
"--nohard-abort"
],
+ "framework_name": "standard_runner",
"name": "sweet/strawberries",
"random_seed": 123,
"result": "FAIL",
@@ -23,7 +24,8 @@
"stderr": "",
"stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
- "variant": "default"
+ "variant": "default",
+ "variant_flags": []
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
@@ -38,6 +40,7 @@
"--random-seed=123",
"--nohard-abort"
],
+ "framework_name": "standard_runner",
"name": "sweet/strawberries",
"random_seed": 123,
"result": "FAIL",
@@ -45,7 +48,8 @@
"stderr": "",
"stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
- "variant": "default"
+ "variant": "default",
+ "variant_flags": []
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
@@ -60,6 +64,7 @@
"--random-seed=123",
"--nohard-abort"
],
+ "framework_name": "standard_runner",
"name": "sweet/strawberries",
"random_seed": 123,
"result": "FAIL",
@@ -67,7 +72,8 @@
"stderr": "",
"stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
- "variant": "default"
+ "variant": "default",
+ "variant_flags": []
}
],
"slowest_tests": [
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index 95224befdd..bbbb90f4ac 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -15,6 +15,7 @@
"--random-seed=123",
"--nohard-abort"
],
+ "framework_name": "standard_runner",
"name": "sweet/bananaflakes",
"random_seed": 123,
"result": "FAIL",
@@ -22,7 +23,8 @@
"stderr": "",
"stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
- "variant": "default"
+ "variant": "default",
+ "variant_flags": []
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
@@ -36,6 +38,7 @@
"--random-seed=123",
"--nohard-abort"
],
+ "framework_name": "standard_runner",
"name": "sweet/bananaflakes",
"random_seed": 123,
"result": "PASS",
@@ -43,7 +46,8 @@
"stderr": "",
"stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
- "variant": "default"
+ "variant": "default",
+ "variant_flags": []
}
],
"slowest_tests": [