summaryrefslogtreecommitdiff
path: root/deps/v8/tools/unittests
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2018-04-10 21:39:51 -0400
committerMyles Borins <mylesborins@google.com>2018-04-11 13:22:42 -0400
commit12a1b9b8049462e47181a298120243dc83e81c55 (patch)
tree8605276308c8b4e3597516961266bae1af57557a /deps/v8/tools/unittests
parent78cd8263354705b767ef8c6a651740efe4931ba0 (diff)
downloadnode-new-12a1b9b8049462e47181a298120243dc83e81c55.tar.gz
deps: update V8 to 6.6.346.23
PR-URL: https://github.com/nodejs/node/pull/19201 Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/tools/unittests')
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py61
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json30
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json20
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py8
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py8
5 files changed, 74 insertions, 53 deletions
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index f4ff3fe1f7..4fb6aaff13 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -101,6 +101,8 @@ def run_tests(basedir, *args, **kwargs):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
+ else:
+ sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(
basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
@@ -145,7 +147,9 @@ class SystemTest(unittest.TestCase):
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
+ from testrunner.local import command
from testrunner.local import pool
+ command.setup_testing()
pool.setup_testing()
@classmethod
@@ -170,10 +174,11 @@ class SystemTest(unittest.TestCase):
'sweet/bananas',
'sweet/raspberries',
)
- self.assertIn('Running 4 tests', result.stdout, result)
+ self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
- self.assertIn('Total time:', result.stderr, result)
- self.assertIn('sweet/bananas', result.stderr, result)
+ # TODO(majeski): Implement for test processors
+ # self.assertIn('Total time:', result.stderr, result)
+ # self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
@@ -199,6 +204,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ @unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
@@ -222,7 +228,7 @@ class SystemTest(unittest.TestCase):
def testFailProc(self):
self.testFail(infra_staging=True)
- def testFail(self, infra_staging=False):
+ def testFail(self, infra_staging=True):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
@@ -269,7 +275,7 @@ class SystemTest(unittest.TestCase):
def testFailWithRerunAndJSONProc(self):
self.testFailWithRerunAndJSON(infra_staging=True)
- def testFailWithRerunAndJSON(self, infra_staging=False):
+ def testFailWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -303,12 +309,13 @@ class SystemTest(unittest.TestCase):
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
+ self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path)
def testFlakeWithRerunAndJSONProc(self):
self.testFlakeWithRerunAndJSON(infra_staging=True)
- def testFlakeWithRerunAndJSON(self, infra_staging=False):
+ def testFlakeWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
@@ -334,6 +341,7 @@ class SystemTest(unittest.TestCase):
'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path)
def testAutoDetect(self):
@@ -374,7 +382,7 @@ class SystemTest(unittest.TestCase):
def testSkipsProc(self):
self.testSkips(infra_staging=True)
- def testSkips(self, infra_staging=False):
+ def testSkips(self, infra_staging=True):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
@@ -390,12 +398,12 @@ class SystemTest(unittest.TestCase):
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertEqual(2, result.returncode, result)
def testDefaultProc(self):
self.testDefault(infra_staging=True)
- def testDefault(self, infra_staging=False):
+ def testDefault(self, infra_staging=True):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
@@ -410,14 +418,14 @@ class SystemTest(unittest.TestCase):
else:
self.assertIn('Running 0 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
- self.assertEqual(0, result.returncode, result)
+ self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_base() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testGNOption(self):
"""Test using gn option, but no gn build folder is found."""
@@ -433,7 +441,7 @@ class SystemTest(unittest.TestCase):
result = run_tests(basedir, '--mode=Release')
self.assertIn('execution mode (release) for release is inconsistent '
'with build config (debug)', result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
@@ -442,13 +450,13 @@ class SystemTest(unittest.TestCase):
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--variants=meh')
- self.assertEqual(1, result.returncode, result)
+ self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
@@ -457,6 +465,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
@@ -475,6 +484,7 @@ class SystemTest(unittest.TestCase):
result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
@@ -489,6 +499,7 @@ class SystemTest(unittest.TestCase):
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
+ @unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
@@ -506,7 +517,7 @@ class SystemTest(unittest.TestCase):
def testPredictableProc(self):
self.testPredictable(infra_staging=True)
- def testPredictable(self, infra_staging=False):
+ def testPredictable(self, infra_staging=True):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
@@ -547,7 +558,10 @@ class SystemTest(unittest.TestCase):
# timeout was used.
self.assertEqual(0, result.returncode, result)
- def testRandomSeedStressWithDefault(self):
+ def testRandomSeedStressWithDefaultProc(self):
+ self.testRandomSeedStressWithDefault(infra_staging=True)
+
+ def testRandomSeedStressWithDefault(self, infra_staging=True):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
@@ -557,8 +571,13 @@ class SystemTest(unittest.TestCase):
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
+ infra_staging=infra_staging,
)
- self.assertIn('Running 2 tests', result.stdout, result)
+ if infra_staging:
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
+ else:
+ self.assertIn('Running 2 tests', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
@@ -573,7 +592,8 @@ class SystemTest(unittest.TestCase):
'--random-seed=123',
'sweet/strawberries',
)
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Running 1 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
@@ -598,7 +618,8 @@ class SystemTest(unittest.TestCase):
)
# Both tests are either marked as running in only default or only
# slow variant.
- self.assertIn('Running 2 tests', result.stdout, result)
+ self.assertIn('Running 2 base tests', result.stdout, result)
+ self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
@@ -611,7 +632,7 @@ class SystemTest(unittest.TestCase):
def testDotsProgressProc(self):
self.testDotsProgress(infra_staging=True)
- def testDotsProgress(self, infra_staging=False):
+ def testDotsProgress(self, infra_staging=True):
with temp_base() as basedir:
result = run_tests(
basedir,
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index 172b87a5d6..e889ecabce 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -41,20 +41,20 @@
"result": "FAIL",
"run": 2,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
@@ -62,40 +62,40 @@
"result": "FAIL",
"run": 3,
"stderr": "",
- "stdout": "--random-seed=123 strawberries --nohard-abort\n",
+ "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"strawberries",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index 7fcfe47f71..cdb4766e95 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 0,
"expected": [
"PASS"
],
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
@@ -41,29 +41,29 @@
"result": "PASS",
"run": 2,
"stderr": "",
- "stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
"name": "sweet/bananaflakes"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
- "--random-seed=123",
"bananaflakes",
+ "--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
index 115471ac72..1fcf2864b6 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -10,7 +10,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
return map(
self._create_test,
['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
@@ -24,8 +24,8 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'd8_mocked.py'
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.name]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
index 9407769b35..a986af5c2f 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
+++ b/deps/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -10,7 +10,7 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
- def ListTests(self, context):
+ def ListTests(self):
return map(
self._create_test,
['bananaflakes'],
@@ -24,8 +24,8 @@ class TestCase(testcase.TestCase):
def get_shell(self):
return 'd8_mocked.py'
- def _get_files_params(self, ctx):
+ def _get_files_params(self):
return [self.name]
-def GetSuite(name, root):
- return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)