summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Knight <knight@baldmt.com>2006-02-12 21:55:13 +0000
committerSteven Knight <knight@baldmt.com>2006-02-12 21:55:13 +0000
commit0ccc7750121efbf4ef8c94502591bf347dd828f4 (patch)
treea826079889d55dddca0e911b01fbba8bde08a616
parent19563d915729176a6349021295bef4f4f830b15d (diff)
downloadscons-0ccc7750121efbf4ef8c94502591bf347dd828f4.tar.gz
Add a -t (timing) option to runtest.py. (Baptiste Lepilleur) Add sections to the README to describe better how to change and execute SCons in-place.
-rw-r--r--README272
-rw-r--r--config6
-rw-r--r--runtest.py28
3 files changed, 287 insertions, 19 deletions
diff --git a/README b/README
index 5a5aeb7a..d4d27575 100644
--- a/README
+++ b/README
@@ -2,21 +2,38 @@
SCons - a software construction tool
-Welcome to the SCons development tree. The real purpose of this tree is
-to package SCons for production distribution in a variety of formats,
+Welcome to the SCons development tree. The real purpose of this tree
+is to package SCons for production distribution in a variety of formats,
not just to hack SCons code.
-To that extent, the normal development cycle (enforced by Aegis) is not
-to test the code directly, but to package SCons, unpack the package,
-"install" SCons in a test subdirectory, and then to run the tests
-against the unpacked and installed software. This helps eliminate
-problems caused by, for example, failure to update the list of files to
-be packaged.
+If all you want to do is install and run SCons, it will be easier for you
+to download and install the scons-{version}.tar.gz or scons-{version}.zip
+package rather than to work with the packaging logic in this tree.
-Note that if all you want to do is install and run SCons, it
-will probably be easier for you to download and install the
-scons-{version}.tar.gz or scons-{version}.zip package rather than to
-work with the packaging logic in this tree.
+To the extent that this tree is about building SCons packages, the
+*full* development cycle (enforced by Aegis) is not to test the code
+directly, but to package SCons, unpack the package, "install" SCons in
+a test subdirectory, and then to run the tests against the unpacked and
+installed software. This helps eliminate problems caused by, for example,
+failure to update the list of files to be packaged.
+
+For just working on making an individual change to the SCons source,
+however, you don't actually need to build or install SCons; you
+*can* actually edit and execute SCons in-place. See the following
+sections below for more information:
+
+ MAKING CHANGES
+ How to edit and executing SCons in-place.
+
+ DEBUGGING
+ Tips for debugging problems in SCons.
+
+ TESTING
+ How to use the automated regression tests.
+
+ DEVELOPMENT WORKFLOW
+ An example of how to put the edit/execute/test pieces
+ together in a reasonable development workflow.
LATEST VERSION
@@ -86,6 +103,14 @@ In this case, your options are:
INSTALLATION
============
+ NOTE: You don't need to build SCons packages or install SCons if
+ you just want to work on developing a patch. See the sections
+ about MAKING CHANGES and TESTING below if you just want to submit
+ a bug fix or some new functionality. See the sections below about
+ BUILDING PACKAGES and TESTING PACKAGES if your enhancement involves
+ changing the way in which SCons is packaged and/or installed on an
+ end-user system.
+
Assuming your system satisfies the installation requirements in the
previous section, install SCons from this package by first populating
the build/scons/ subdirectory. (For an easier way to install SCons,
@@ -201,6 +226,117 @@ $HOME--that is, the scons script itself $HOME/bin and the associated
library in $HOME/lib/scons, for example.
+MAKING CHANGES
+==============
+
+Because SCons is implemented in a scripting language, you don't need to
+build it in order to make changes and test them.
+
+Virtually all of the SCons functionality exists in the "build engine,"
+the src/engine/SCons subdirectory hierarchy that contains all of the
+modules that make up SCons. The src/script/scons.py wrapper script exists
+mainly to find the appropriate build engine library and then execute it.
+
+In order to make your own change locally and test them by hand, simply
+edit modules in the local src/engine/SCons and set the SCONS_LIB_DIR
+to point to that directory. Here is one way you can set up environment
+variables to do this on a UNIX or Linux system:
+
+ $ setenv MYSCONS=`pwd`/src
+ $ setenv SCONS_LIB_DIR=$MYSCONS
+ $ python $MYSCONS/script/scons.py [arguments]
+
+Or on Windows:
+
+ C:\scons>set MYSCONS=%cd%\src
+ C:\scons>set SCONS_LIB_DIR=%MYSCONS%
+ C:\scons>python %MYSCONS%\script\scons.py [arguments]
+
+You can use the -C option to have SCons change directory to another
+location where you already have a build configuration set up (for example,
+if the SCons configuration for your project seems to be blocked by
+an SCons bug, and you want to see if a patch you make actually fixes
+that bug):
+
+ $ python $MYSCONS/script/scons.py -C /some/other/location [arguments]
+
+Lastly, if you want to be able to just execute your modified version
+of SCons from the command line, you can make it executable and add its
+directory to your $PATH like so:
+
+ $ chmod 755 src/script/scons.py
+ $ export PATH=$PATH:`pwd`/src/script
+
+You should then be able to run this version of SCons by just typing
+"scons.py" at your UNIX or Linux command line.
+
+Note that the regular SCons development process makes heavy use of
+automated testing. See the TESTING and DEVELOPMENT WORKFLOW sections
+below for more information about the automated regression tests and how
+they can be used in a development cycle to validate that your changes
+don't break existing functionality.
+
+
+DEBUGGING
+=========
+
+Python comes with a good interactive debugger. When debugging changes
+by hand (i.e., when not using the automated tests), you can invoke SCons
+under control of the Python debugger by specifying the --debug=pdb option:
+
+ $ scons --debug=pdb [arguments]
+ > /home/knight/SCons/src/engine/SCons/Script/Main.py(927)_main()
+ -> default_warnings = [ SCons.Warnings.CorruptSConsignWarning,
+ (Pdb)
+
+Once in the debugger, you can set breakpoints at lines in files in the
+build engine modules by providing the path name of the file relative to
+the src/engine subdirectory (that is, including the SCons/ as the first
+directory component):
+
+ (Pdb) b SCons/Tool/msvc.py:158
+
+The debugger also supports single stepping, stepping into functions,
+printing variables, etc.
+
+Trying to debug problems found by running the automated tests (see the
+TESTING section, below) is more difficult, because the test automation
+harness re-invokes SCons and captures output. Consequently, there isn't an
+easy way to invoke the Python debugger in a useful way on any particular
+SCons call within a test script.
+
+The most effective technique for debugging problems that occur during an
+automated test is to use the good old tried-and-true technique of adding
+statements to print tracing information. But note that you can't just use
+"print" statement, or even "sys.stdout.write()," because those change the
+SCons output, and the automated tests usually look for matches of specific
+output strings to decide if a given SCons invocations passes the test.
+
+To deal with this, SCons supports a Trace() function that (by default)
+will print messages to your console screen ("/dev/tty" on UNIX or Linux,
+"con" on Windows). By adding Trace() calls to the SCons source code:
+
+ def sample_method(self, value):
+ fromn SCons.Debug import Trace
+ Trace('called sample_method(%s, %s)\n' % (self, value))
+
+You can then run automated tests that print any arbitrary information
+you wish about what's going on inside SCons, without interfering with
+the test automation.
+
+The Trace() function can also redirect its output to a file, rather than
+the screen:
+
+ def sample_method(self, value):
+ fromn SCons.Debug import Trace
+ Trace('called sample_method(%s, %s)\n' % (self, value),
+ file='trace.out')
+
+Where the Trace() function sends its output is stateful: once you use the
+"file=" argument, all subsequent calls to Trace() send their output to
+the same file, until another call with a "file=" argument is reached.
+
+
TESTING
=======
@@ -221,8 +357,21 @@ You may specifically list one or more tests to be run:
$ python runtest.py test/option-j.py test/Program.py
-Alternatively, the runtest.py script takes a -a option that searches
-the tree for all of the tests and runs them:
+You also use the -f option to execute just the tests listed in a specified
+text file:
+
+ $ cat testlist.txt
+ test/option-j.py
+ test/Program.py
+ $ python runtest.py -f testlist.txt
+
+One test must be listed per line, and any lines that begin with '#'
+will be ignored (allowing you, for example, to comment out tests that
+are currently passing and then uncomment all of the tests in the file
+for a final validation run).
+
+The runtest.py script also takes a -a option that searches the tree for
+all of the tests and runs them:
$ python runtest.py -a
@@ -237,6 +386,101 @@ unpacked packages in the build/test-*/ subdirectories. See the "TESTING
PACKAGES" section below.
+DEVELOPMENT WORKFLOW
+====================
+
+ CAVEAT: The point of this section isn't to describe one dogmatic
+ workflow. Just running the test suite can be time-consuming, and
+ getting a patch to pass all of the tests can be more so. If you're
+ genuinely blocked, it may make more sense to submit a patch with
+ a note about which tests still fail, and how. Someone else may be
+ able to take your "initial draft" and figure out how to improve it
+ to fix the rest of the tests. So there's plenty of room for use of
+ good judgement.
+
+The various techniques described in the above sections can be combined
+to create simple and effective workflows that allow you to validate
+that patches you submit to SCons don't break existing functionality and
+have adequate testing, thereby increasing the speed with which they can
+be integrated.
+
+For example, suppose your project's SCons configuration is blocked by
+an SCons bug, and you decide you want to fix it and submit the patch.
+Here's one possible way to go about doing that (using UNIX/Linux as the
+development platform, Windows users can translate as appropriate)):
+
+ -- Change to the top of your checked-out SCons tree and set
+ $SCONS_LIB_DIR to point to its build engine:
+
+ $ setenv SCONS_LIB_DIR=`pwd`/src
+
+ -- Confirm that the bug still exists in this version of SCons
+ by using the -C option to run the broken build:
+
+ $ python script/scons.py -C /home/me/broken_project .
+
+ -- Fix the bug in SCons by editing appropriate module files
+ underneath src/engine/SCons.
+
+ -- Confirm that you've fixed the bug affecting your project:
+
+ $ python script/scons.py -C /home/me/broken_project .
+
+ -- Test to see if your fix had any unintended side effects
+ that break existing functionality:
+
+ $ python runtest.py -a
+
+ Be patient, there are more than 500 test scripts in the
+ whole suite.
+
+ If any test scripts fail, they will be listed in a summary at
+ the end of the run. Some test scripts may also report NO RESULT
+ because (for example) your local system is the wrong type or
+ doesn't have some installed utilities necessary to run the
+ script. In general, you can ignore the NO RESULT list.
+
+ -- Cut-and-paste the list of failed tests into a file:
+
+ $ cat > failed.txt
+ test/failed-test-1.py
+ test/failed-test-2.py
+ test/failed-test-3.py
+ ^D
+ $
+
+ -- Now debug the test failures and fix them, either by changing
+ SCons, or by making necessary changes to the tests (if, for
+ example, you have a strong reason to change functionality, or
+ if you find that the bug really is in the test script itself).
+ After each change, use the runtest.py -f option to examine the
+ effects of the change on the subset of tests that originally
+ failed:
+
+ $ [edit]
+ $ python runtest.py -f failed.txt
+
+ Repeat this until all of the tests that originally failed
+ now pass.
+
+ -- Now you need to go back and validate that any changes you
+ made while getting the tests to pass didn't break the fix you
+ originally put in, or introduce any *additional* unintended side
+ effects that broke other tests:
+
+ $ python script/scons.py -C /home/me/broken_project .
+ $ python runtest.py -a
+
+ If you find any newly-broken tests, add them to your "failed.txt"
+ file and go back to the previous step.
+
+Of course, the above is only one suggested workflow. In practice, there's
+a lot of room for judgment and experience to make things go quicker.
+For example, if you're making a change to just the Java support, you
+might start looking for regressions by just running the test/Java/*.py
+tests instead of running all of "runtest.py -a".
+
+
BUILDING PACKAGES
=================
diff --git a/config b/config
index 17aa2d33..0e83aad3 100644
--- a/config
+++ b/config
@@ -247,7 +247,7 @@ diff_command =
test $? -le 1";
/*
- * We use a runtest.pl script to execute tests. This takes care of
+ * We use a runtest.py script to execute tests. This takes care of
* massaging environment variables and the like to test against the
* unpacked package in the current directory.
*
@@ -258,9 +258,9 @@ diff_command =
* is set appropriately during a baseline test. So we just use the
* proper aesub variable to comment out the expanded $spe.
*/
-test_command = "python1.5 ${Source runtest.py Absolute} -p tar-gz -v ${SUBSTitute '\\.[CD][0-9]+$' '' ${VERsion}} -q ${File_Name}";
+test_command = "python1.5 ${Source runtest.py Absolute} -p tar-gz -t -v ${SUBSTitute '\\.[CD][0-9]+$' '' ${VERsion}} -q ${File_Name}";
-batch_test_command = "python1.5 ${Source runtest.py Absolute} -p tar-gz -v ${SUBSTitute '\\.[CD][0-9]+$' '' ${VERsion}} -o ${Output} --aegis ${File_Names} ${COMment $spe}";
+batch_test_command = "python1.5 ${Source runtest.py Absolute} -p tar-gz -t -v ${SUBSTitute '\\.[CD][0-9]+$' '' ${VERsion}} -o ${Output} --aegis ${File_Names} ${COMment $spe}";
new_test_filename = "test/CHANGETHIS.py";
diff --git a/runtest.py b/runtest.py
index 964b4410..e96ed831 100644
--- a/runtest.py
+++ b/runtest.py
@@ -58,6 +58,8 @@
# command line it will execute before
# executing it. This suppresses that print.
#
+# -t Print the execution time of each test.
+#
# -X The scons "script" is an executable; don't
# feed it to Python.
#
@@ -84,6 +86,7 @@ import re
import stat
import string
import sys
+import time
all = 0
debug = ''
@@ -97,6 +100,7 @@ scons_exec = None
outputfile = None
testlistfile = None
version = ''
+print_time = lambda fmt, time: None
if os.name == 'java':
python = os.path.join(sys.prefix, 'jython')
@@ -134,17 +138,18 @@ Options:
zip .zip distribution
--passed Summarize which tests passed.
-q, --quiet Don't print the test being executed.
+ -t, --time Print test execution time.
-v version Specify the SCons version.
-X Test script is executable, don't feed to Python.
-x SCRIPT, --exec SCRIPT Test SCRIPT.
--xml Print results in SCons XML format.
"""
-opts, args = getopt.getopt(sys.argv[1:], "adf:ho:P:p:qv:Xx:",
+opts, args = getopt.getopt(sys.argv[1:], "adf:ho:P:p:qv:Xx:t",
['all', 'aegis',
'debug', 'file=', 'help', 'output=',
'package=', 'passed', 'python=', 'quiet',
- 'version=', 'exec=',
+ 'version=', 'exec=', 'time',
'xml'])
for o, a in opts:
@@ -171,6 +176,8 @@ for o, a in opts:
python = a
elif o == '-q' or o == '--quiet':
printcommand = 0
+ elif o == '-t' or o == '--time':
+ print_time = lambda fmt, time: sys.stdout.write(fmt % time)
elif o == '-v' or o == '--version':
version = a
elif o == '-X':
@@ -291,8 +298,10 @@ class XML(PopenExecutor):
f.write(' <exit_status>%s</exit_status>\n' % self.status)
f.write(' <stdout>%s</stdout>\n' % self.stdout)
f.write(' <stderr>%s</stderr>\n' % self.stderr)
+ f.write(' <time>%.1f</time>\n' % self.test_time)
f.write(' </test>\n')
def footer(self, f):
+ f.write(' <time>%.1f</time>\n' % self.total_time)
f.write(' </results>\n')
format_class = {
@@ -494,6 +503,15 @@ class Unbuffered:
sys.stdout = Unbuffered(sys.stdout)
+# time.clock() is the suggested interface for doing benchmarking timings,
+# but time.time() does a better job on Linux systems, so let that be
+# the non-Windows default.
+if sys.platform == 'win32':
+ time_func = time.clock
+else:
+ time_func = time.time
+
+total_start_time = time_func()
for t in tests:
t.command_args = [python, '-tt']
if debug:
@@ -502,7 +520,13 @@ for t in tests:
t.command_str = string.join(map(escape, t.command_args), " ")
if printcommand:
sys.stdout.write(t.command_str + "\n")
+ test_start_time = time_func()
t.execute()
+ t.test_time = time_func() - test_start_time
+ print_time("Test execution time: %.1f seconds\n", t.test_time)
+if len(tests) > 0:
+ tests[0].total_time = time_func() - total_start_time
+ print_time("Total execution time for all tests: %.1f seconds\n", tests[0].total_time)
passed = filter(lambda t: t.status == 0, tests)
fail = filter(lambda t: t.status == 1, tests)