diff options
-rw-r--r-- | pylint/lint/run.py | 2 | ||||
-rw-r--r-- | tests/regrtest_data/fail_on.py | 12 | ||||
-rw-r--r-- | tests/regrtest_data/fail_on_info_only.py | 11 | ||||
-rw-r--r-- | tests/test_self.py | 32 |
4 files changed, 56 insertions, 1 deletions
diff --git a/pylint/lint/run.py b/pylint/lint/run.py index 6d674e429..1f6d6b71c 100644 --- a/pylint/lint/run.py +++ b/pylint/lint/run.py @@ -398,7 +398,7 @@ group are mutually exclusive.", # We need to make sure we return a failing exit code in this case. # So we use self.linter.msg_status if that is non-zero, otherwise we just return 1. sys.exit(self.linter.msg_status or 1) - elif score_value and score_value >= linter.config.fail_under: + elif score_value >= linter.config.fail_under: sys.exit(0) else: sys.exit(self.linter.msg_status) diff --git a/tests/regrtest_data/fail_on.py b/tests/regrtest_data/fail_on.py new file mode 100644 index 000000000..6f22e5013 --- /dev/null +++ b/tests/regrtest_data/fail_on.py @@ -0,0 +1,12 @@ +""" + Pylint score: -1.67 +""" +import nonexistent +# pylint: disable=broad-except + + +def loop(): + count = 0 + for _ in range(5): + count += 1 + print(count) diff --git a/tests/regrtest_data/fail_on_info_only.py b/tests/regrtest_data/fail_on_info_only.py new file mode 100644 index 000000000..c6baffed0 --- /dev/null +++ b/tests/regrtest_data/fail_on_info_only.py @@ -0,0 +1,11 @@ +""" + Pylint score: -1.67 +""" +# pylint: disable=broad-except + +def loop(): + """Run a loop.""" + count = 0 + for _ in range(5): + count += 1 + print(count) diff --git a/tests/test_self.py b/tests/test_self.py index a921248c8..ef70cdf33 100644 --- a/tests/test_self.py +++ b/tests/test_self.py @@ -1136,6 +1136,38 @@ class TestRunTC: output_file = "thisdirectorydoesnotexit/output.txt" self._runtest([path, f"--output={output_file}"], code=32) + @pytest.mark.parametrize("args, expected", [ + ([], 0), + (["--enable=C"], 0), + (["--fail-on=superfluous-parens"], 0), + (["--fail-on=import-error"], 6), + (["--fail-on=unused-import"], 6), + (["--fail-on=unused-import", "--enable=C"], 22), + (["--fail-on=missing-function-docstring"], 22), + (["--fail-on=useless-suppression"], 6), + (["--fail-on=useless-suppression", "--enable=C"], 22), + ]) + def test_fail_on_exit_code(self, args, expected): + path = join(HERE, "regrtest_data", "fail_on.py") + # We set fail-under to be something very low so that even with the warnings + # and errors that are generated they don't affect the exit code. + self._runtest([path, "--fail-under=-10"] + args, code=expected) + + @pytest.mark.parametrize("args, expected", [ + ([], 0), + (["--enable=C"], 0), + (["--fail-on=superfluous-parens"], 0), + (["--fail-on=import-error"], 0), + (["--fail-on=unused-import"], 0), + (["--fail-on=unused-import", "--enable=C"], 0), + (["--fail-on=missing-function-docstring"], 0), + (["--fail-on=useless-suppression"], 1), + (["--fail-on=useless-suppression", "--enable=C"], 1), + ]) + def test_fail_on_info_only_exit_code(self, args, expected): + path = join(HERE, "regrtest_data", "fail_on_info_only.py") + self._runtest([path] + args, code=expected) + @pytest.mark.parametrize( "output_format, expected_output", [ |