summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary Oberbrunner <garyo@oberbrunner.com>2013-09-22 13:08:12 -0400
committerGary Oberbrunner <garyo@oberbrunner.com>2013-09-22 13:08:12 -0400
commit895990cbd5007876709b93e6d3db2b6c069382ea (patch)
treeb95b2144ccf82d8227cec025af152f4eadfa7282
parentf094b77fc68787c61efdc9ca095098c88bf52373 (diff)
downloadscons-895990cbd5007876709b93e6d3db2b6c069382ea.tar.gz
Result of raw 2to3 run (2to3-2.7); checkpoint for python3 conversion.
-rw-r--r--QMTest/TestCmd.py32
-rw-r--r--QMTest/TestCmdTests.py136
-rw-r--r--QMTest/TestCommon.py66
-rw-r--r--QMTest/TestSCons.py52
-rw-r--r--QMTest/TestSConsMSVS.py2
-rw-r--r--QMTest/TestSCons_time.py4
-rw-r--r--QMTest/TestSConsign.py2
-rw-r--r--QMTest/scons_tdb.py18
-rw-r--r--bench/bench.py8
-rw-r--r--bench/env.__setitem__.py16
-rw-r--r--bench/is_types.py12
-rw-r--r--bench/timeit.py24
-rw-r--r--bin/Command.py6
-rw-r--r--bin/SConsDoc.py28
-rw-r--r--bin/SConsExamples.py20
-rw-r--r--bin/calibrate.py8
-rw-r--r--bin/caller-tree.py10
-rw-r--r--bin/docs-create-example-outputs.py6
-rw-r--r--bin/docs-update-generated.py2
-rw-r--r--bin/docs-validate.py8
-rw-r--r--bin/install_python.py6
-rw-r--r--bin/install_scons.py10
-rw-r--r--bin/linecount.py30
-rw-r--r--bin/memlogs.py8
-rw-r--r--bin/memoicmp.py24
-rw-r--r--bin/objcounts.py13
-rw-r--r--bin/scons-diff.py14
-rw-r--r--bin/scons-proc.py14
-rw-r--r--bin/scons-test.py60
-rw-r--r--bin/scons-unzip.py2
-rw-r--r--bin/scons_dev_master.py6
-rwxr-xr-xbin/svn-bisect.py14
-rw-r--r--bin/update-release-info.py30
-rwxr-xr-xbin/xmlagenda.py2
-rw-r--r--review.py3611
-rwxr-xr-x[-rw-r--r--]runtest.py24
-rw-r--r--src/engine/SCons/Action.py47
-rw-r--r--src/engine/SCons/ActionTests.py12
-rw-r--r--src/engine/SCons/Builder.py20
-rw-r--r--src/engine/SCons/BuilderTests.py28
-rw-r--r--src/engine/SCons/CacheDirTests.py2
-rw-r--r--src/engine/SCons/Debug.py4
-rw-r--r--src/engine/SCons/Defaults.py11
-rw-r--r--src/engine/SCons/DefaultsTests.py2
-rw-r--r--src/engine/SCons/Environment.py59
-rw-r--r--src/engine/SCons/EnvironmentTests.py44
-rw-r--r--src/engine/SCons/ErrorsTests.py12
-rw-r--r--src/engine/SCons/ExecutorTests.py2
-rw-r--r--src/engine/SCons/Job.py4
-rw-r--r--src/engine/SCons/Memoize.py4
-rw-r--r--src/engine/SCons/MemoizeTests.py4
-rw-r--r--src/engine/SCons/Node/FS.py23
-rw-r--r--src/engine/SCons/Node/FSTests.py34
-rw-r--r--src/engine/SCons/Node/__init__.py12
-rw-r--r--src/engine/SCons/Options/__init__.py10
-rw-r--r--src/engine/SCons/Platform/__init__.py4
-rw-r--r--src/engine/SCons/Platform/aix.py2
-rw-r--r--src/engine/SCons/Platform/cygwin.py2
-rw-r--r--src/engine/SCons/Platform/darwin.py2
-rw-r--r--src/engine/SCons/Platform/hpux.py2
-rw-r--r--src/engine/SCons/Platform/irix.py2
-rw-r--r--src/engine/SCons/Platform/os2.py2
-rw-r--r--src/engine/SCons/Platform/posix.py9
-rw-r--r--src/engine/SCons/Platform/sunos.py2
-rw-r--r--src/engine/SCons/Platform/win32.py4
-rw-r--r--src/engine/SCons/SConf.py10
-rw-r--r--src/engine/SCons/SConfTests.py2
-rw-r--r--src/engine/SCons/SConsign.py16
-rw-r--r--src/engine/SCons/Scanner/C.py2
-rw-r--r--src/engine/SCons/Scanner/Fortran.py3
-rw-r--r--src/engine/SCons/Scanner/LaTeX.py4
-rw-r--r--src/engine/SCons/Scanner/Prog.py3
-rw-r--r--src/engine/SCons/Scanner/ProgTests.py4
-rw-r--r--src/engine/SCons/Scanner/ScannerTests.py2
-rw-r--r--src/engine/SCons/Scanner/__init__.py5
-rw-r--r--src/engine/SCons/Script/Interactive.py12
-rw-r--r--src/engine/SCons/Script/Main.py61
-rw-r--r--src/engine/SCons/Script/SConsOptions.py8
-rw-r--r--src/engine/SCons/Script/SConscript.py22
-rw-r--r--src/engine/SCons/Script/__init__.py8
-rw-r--r--src/engine/SCons/Subst.py8
-rw-r--r--src/engine/SCons/SubstTests.py48
-rw-r--r--src/engine/SCons/Taskmaster.py44
-rw-r--r--src/engine/SCons/TaskmasterTests.py10
-rw-r--r--src/engine/SCons/Tool/FortranCommon.py2
-rw-r--r--src/engine/SCons/Tool/GettextCommon.py12
-rw-r--r--src/engine/SCons/Tool/MSCommon/common.py6
-rw-r--r--src/engine/SCons/Tool/MSCommon/netframework.py4
-rw-r--r--src/engine/SCons/Tool/MSCommon/sdk.py8
-rw-r--r--src/engine/SCons/Tool/MSCommon/vc.py24
-rw-r--r--src/engine/SCons/Tool/MSCommon/vs.py6
-rw-r--r--src/engine/SCons/Tool/__init__.py36
-rw-r--r--src/engine/SCons/Tool/aixcc.py2
-rw-r--r--src/engine/SCons/Tool/aixf77.py2
-rw-r--r--src/engine/SCons/Tool/aixlink.py4
-rw-r--r--src/engine/SCons/Tool/applelink.py2
-rw-r--r--src/engine/SCons/Tool/cvf.py2
-rw-r--r--src/engine/SCons/Tool/cyglink.py2
-rw-r--r--src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/docbook.py10
-rw-r--r--src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/xslt.py12
-rw-r--r--src/engine/SCons/Tool/dvipdf.py2
-rw-r--r--src/engine/SCons/Tool/f03.py2
-rw-r--r--src/engine/SCons/Tool/f95.py2
-rw-r--r--src/engine/SCons/Tool/filesystem.py2
-rw-r--r--src/engine/SCons/Tool/gcc.py2
-rw-r--r--src/engine/SCons/Tool/gfortran.py2
-rw-r--r--src/engine/SCons/Tool/gnulink.py2
-rw-r--r--src/engine/SCons/Tool/gs.py2
-rw-r--r--src/engine/SCons/Tool/hpcc.py2
-rw-r--r--src/engine/SCons/Tool/hplink.py2
-rw-r--r--src/engine/SCons/Tool/icc.py2
-rw-r--r--src/engine/SCons/Tool/ifl.py2
-rw-r--r--src/engine/SCons/Tool/ifort.py2
-rw-r--r--src/engine/SCons/Tool/install.py26
-rw-r--r--src/engine/SCons/Tool/intelc.py19
-rw-r--r--src/engine/SCons/Tool/latex.py4
-rw-r--r--src/engine/SCons/Tool/link.py18
-rw-r--r--src/engine/SCons/Tool/midl.py2
-rw-r--r--src/engine/SCons/Tool/msgfmt.py2
-rw-r--r--src/engine/SCons/Tool/msginit.py4
-rw-r--r--src/engine/SCons/Tool/msgmerge.py2
-rw-r--r--src/engine/SCons/Tool/mslib.py2
-rw-r--r--src/engine/SCons/Tool/mslink.py12
-rw-r--r--src/engine/SCons/Tool/mssdk.py2
-rw-r--r--src/engine/SCons/Tool/msvc.py2
-rw-r--r--src/engine/SCons/Tool/msvs.py44
-rw-r--r--src/engine/SCons/Tool/msvsTests.py2
-rw-r--r--src/engine/SCons/Tool/packaging/__init__.py10
-rw-r--r--src/engine/SCons/Tool/packaging/ipk.py2
-rw-r--r--src/engine/SCons/Tool/packaging/msi.py8
-rw-r--r--src/engine/SCons/Tool/packaging/rpm.py12
-rw-r--r--src/engine/SCons/Tool/pdflatex.py2
-rw-r--r--src/engine/SCons/Tool/pdftex.py2
-rw-r--r--src/engine/SCons/Tool/qt.py12
-rw-r--r--src/engine/SCons/Tool/rpmutils.py6
-rw-r--r--src/engine/SCons/Tool/sgicc.py2
-rw-r--r--src/engine/SCons/Tool/sgilink.py2
-rw-r--r--src/engine/SCons/Tool/suncc.py2
-rw-r--r--src/engine/SCons/Tool/sunf77.py2
-rw-r--r--src/engine/SCons/Tool/sunf90.py2
-rw-r--r--src/engine/SCons/Tool/sunf95.py2
-rw-r--r--src/engine/SCons/Tool/sunlink.py2
-rw-r--r--src/engine/SCons/Tool/tex.py94
-rw-r--r--src/engine/SCons/Tool/textfile.py5
-rw-r--r--src/engine/SCons/Tool/xgettext.py8
-rw-r--r--src/engine/SCons/Util.py39
-rw-r--r--src/engine/SCons/UtilTests.py28
-rw-r--r--src/engine/SCons/Variables/EnumVariableTests.py4
-rw-r--r--src/engine/SCons/Variables/PathVariableTests.py16
-rw-r--r--src/engine/SCons/Variables/VariablesTests.py2
-rw-r--r--src/engine/SCons/Variables/__init__.py18
-rw-r--r--src/engine/SCons/compat/__init__.py2
-rw-r--r--src/engine/SCons/compat/_scons_subprocess.py48
-rw-r--r--src/engine/SCons/cpp.py11
-rw-r--r--src/engine/SCons/cppTests.py2
-rw-r--r--src/engine/SCons/dblite.py42
-rw-r--r--src/script/scons-time.py47
-rw-r--r--src/script/sconsign.py40
-rw-r--r--src/test_files.py10
-rw-r--r--src/test_interrupts.py6
-rw-r--r--src/test_pychecker.py2
-rw-r--r--src/test_setup.py6
-rw-r--r--src/test_strings.py8
-rw-r--r--test/AS/nasm.py2
-rw-r--r--test/Actions/unicode-signature.py2
-rw-r--r--test/AddOption/help.py16
-rw-r--r--test/Batch/action-changed.py4
-rw-r--r--test/Chmod.py78
-rw-r--r--test/Configure/ConfigureDryRunError.py2
-rw-r--r--test/Configure/config-h.py52
-rw-r--r--test/Configure/implicit-cache.py10
-rw-r--r--test/Copy-Action.py4
-rw-r--r--test/Deprecated/Options/Options.py2
-rw-r--r--test/Deprecated/SourceCode/BitKeeper/BitKeeper.py6
-rw-r--r--test/Deprecated/SourceCode/Subversion.py2
-rw-r--r--test/GetBuildFailures/parallel.py8
-rw-r--r--test/Glob/glob-libpath.py4
-rw-r--r--test/Install/Install.py2
-rw-r--r--test/Interactive/version.py2
-rw-r--r--test/Java/multi-step.py2
-rw-r--r--test/MSVC/batch-longlines.py122
-rw-r--r--test/MSVC/msvc.py6
-rw-r--r--test/QT/QTFLAGS.py2
-rw-r--r--test/QT/copied-env.py4
-rw-r--r--test/QT/warnings.py4
-rw-r--r--test/SConsignFile/use-dbhash.py2
-rw-r--r--test/SConsignFile/use-dbm.py2
-rw-r--r--test/SConsignFile/use-dumbdbm.py2
-rw-r--r--test/SConsignFile/use-gdbm.py2
-rw-r--r--test/SHELL.py2
-rw-r--r--test/Scanner/unicode.py10
-rw-r--r--test/TEMPFILEPREFIX.py2
-rw-r--r--test/TEX/TEX.py8
-rw-r--r--test/Value.py2
-rw-r--r--test/Variables/Variables.py2
-rw-r--r--test/WhereIs.py4
-rw-r--r--test/Win32/bad-drive.py4
-rw-r--r--test/Win32/default-drive.py2
-rw-r--r--test/ZIP/ZIP.py2
-rw-r--r--test/ZIP/ZIPROOT.py196
-rw-r--r--test/gnutools.py2
-rw-r--r--test/import.py4
-rw-r--r--test/long-lines/signature.py2
-rw-r--r--test/option--random.py4
-rw-r--r--test/option-v.py4
-rw-r--r--test/option/debug-count.py8
-rw-r--r--test/option/debug-time.py10
-rw-r--r--test/option/help-options.py4
-rw-r--r--test/option/profile.py2
-rw-r--r--test/scons-time/run/config/python.py2
-rw-r--r--test/scons-time/run/option/python.py2
-rw-r--r--test/sconsign/nonwritable.py4
-rw-r--r--test/sconsign/script/SConsignFile.py4
-rw-r--r--test/sconsign/script/Signatures.py4
-rw-r--r--test/sconsign/script/no-SConsignFile.py4
-rw-r--r--test/site_scons/sysdirs.py142
-rw-r--r--test/spaces.py2
-rw-r--r--test/subdivide.py4
-rw-r--r--test/update-release-info/update-release-info.py2
-rw-r--r--timings/ElectricCloud/TimeSCons-run.py2
-rwxr-xr-xwww/gen_sched_table.py12
221 files changed, 3292 insertions, 3282 deletions
diff --git a/QMTest/TestCmd.py b/QMTest/TestCmd.py
index 38e9cd3c..4f08d1fe 100644
--- a/QMTest/TestCmd.py
+++ b/QMTest/TestCmd.py
@@ -285,7 +285,7 @@ version.
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-from __future__ import division
+
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 1.3.D001 2010/06/03 12:58:27 knight"
@@ -355,7 +355,7 @@ except NameError:
return isinstance(e, (str, UserString))
else:
def is_String(e):
- return isinstance(e, (str, unicode, UserString))
+ return isinstance(e, (str, UserString))
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
@@ -490,17 +490,17 @@ def match_re(lines = None, res = None):
if not is_List(res):
res = res.split("\n")
if len(lines) != len(res):
- print "match_re: expected %d lines, found %d"%(len(res), len(lines))
+ print("match_re: expected %d lines, found %d"%(len(res), len(lines)))
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
- except re.error, e:
+ except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
if not expr.search(lines[i]):
- print "match_re: mismatch at line %d:\n search re='%s'\n line='%s'"%(i,s,lines[i])
+ print("match_re: mismatch at line %d:\n search re='%s'\n line='%s'"%(i,s,lines[i]))
return
return 1
@@ -514,7 +514,7 @@ def match_re_dotall(lines = None, res = None):
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
- except re.error, e:
+ except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
return expr.match(lines)
@@ -564,7 +564,7 @@ def diff_re(a, b, fromfile='', tofile='',
s = "^" + aline + "$"
try:
expr = re.compile(s)
- except re.error, e:
+ except re.error as e:
msg = "Regular expression error in %s: %s"
raise re.error(msg % (repr(s), e.args[0]))
if not expr.search(bline):
@@ -640,7 +640,7 @@ else:
st = os.stat(f)
except OSError:
continue
- if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
+ if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
return f
return None
@@ -716,7 +716,7 @@ class Popen(subprocess.Popen):
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
- except (subprocess.pywintypes.error, Exception), why:
+ except (subprocess.pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
@@ -737,7 +737,7 @@ class Popen(subprocess.Popen):
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
- except (subprocess.pywintypes.error, Exception), why:
+ except (subprocess.pywintypes.error, Exception) as why:
if why.args[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
@@ -756,7 +756,7 @@ class Popen(subprocess.Popen):
try:
written = os.write(self.stdin.fileno(), input)
- except OSError, why:
+ except OSError as why:
if why.args[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
@@ -950,7 +950,7 @@ class TestCmd(object):
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
- print unicode("Preserved directory " + dir + "\n"),
+ print(str("Preserved directory " + dir + "\n"), end=' ')
else:
list = self._dirlist[:]
list.reverse()
@@ -1016,10 +1016,10 @@ class TestCmd(object):
if diff_function is None:
diff_function = self.simple_diff
if name is not None:
- print self.banner(name)
+ print(self.banner(name))
args = (a.splitlines(), b.splitlines()) + args
for line in diff_function(*args, **kw):
- print line
+ print(line)
def diff_stderr(self, a, b, *args, **kw):
"""Compare actual and expected file contents.
@@ -1645,12 +1645,12 @@ class TestCmd(object):
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
- else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0o200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
- else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0o200))
if os.path.isfile(top):
do_chmod(top)
diff --git a/QMTest/TestCmdTests.py b/QMTest/TestCmdTests.py
index 1044ed1e..96e0cbf6 100644
--- a/QMTest/TestCmdTests.py
+++ b/QMTest/TestCmdTests.py
@@ -26,13 +26,13 @@ import os
import shutil
import signal
import stat
-import StringIO
+import io
import sys
import tempfile
import time
import types
import unittest
-import UserList
+import collections
# Strip the current directory so we get the right TestCmd.py module.
sys.path = sys.path[1:]
@@ -131,11 +131,11 @@ class TestCmdTestCase(unittest.TestCase):
run_env.write(t.scriptout_path, textout)
run_env.write(t.scripterr_path, texterr)
- os.chmod(t.script_path, 0644) # XXX UNIX-specific
- os.chmod(t.scriptx_path, 0755) # XXX UNIX-specific
- os.chmod(t.script1_path, 0644) # XXX UNIX-specific
- os.chmod(t.scriptout_path, 0644) # XXX UNIX-specific
- os.chmod(t.scripterr_path, 0644) # XXX UNIX-specific
+ os.chmod(t.script_path, 0o644) # XXX UNIX-specific
+ os.chmod(t.scriptx_path, 0o755) # XXX UNIX-specific
+ os.chmod(t.script1_path, 0o644) # XXX UNIX-specific
+ os.chmod(t.scriptout_path, 0o644) # XXX UNIX-specific
+ os.chmod(t.scripterr_path, 0o644) # XXX UNIX-specific
t.orig_cwd = os.getcwd()
@@ -220,8 +220,8 @@ class cleanup_TestCase(TestCmdTestCase):
test = TestCmd.TestCmd(workdir = '')
wdir = test.workdir
test.write('file2', "Test file #2\n")
- os.chmod(test.workpath('file2'), 0400)
- os.chmod(wdir, 0500)
+ os.chmod(test.workpath('file2'), 0o400)
+ os.chmod(wdir, 0o500)
test.cleanup()
assert not os.path.exists(wdir)
@@ -286,35 +286,35 @@ class chmod_TestCase(TestCmdTestCase):
test.chmod(['sub', 'file2'], stat.S_IWRITE)
file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
- assert file1_mode == 0444, '0%o' % file1_mode
+ assert file1_mode == 0o444, '0%o' % file1_mode
file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
- assert file2_mode == 0666, '0%o' % file2_mode
+ assert file2_mode == 0o666, '0%o' % file2_mode
test.chmod('file1', stat.S_IWRITE)
test.chmod(wdir_sub_file2, stat.S_IREAD)
file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
- assert file1_mode == 0666, '0%o' % file1_mode
+ assert file1_mode == 0o666, '0%o' % file1_mode
file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
- assert file2_mode == 0444, '0%o' % file2_mode
+ assert file2_mode == 0o444, '0%o' % file2_mode
else:
- test.chmod(wdir_file1, 0700)
- test.chmod(['sub', 'file2'], 0760)
+ test.chmod(wdir_file1, 0o700)
+ test.chmod(['sub', 'file2'], 0o760)
file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
- assert file1_mode == 0700, '0%o' % file1_mode
+ assert file1_mode == 0o700, '0%o' % file1_mode
file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
- assert file2_mode == 0760, '0%o' % file2_mode
+ assert file2_mode == 0o760, '0%o' % file2_mode
- test.chmod('file1', 0765)
- test.chmod(wdir_sub_file2, 0567)
+ test.chmod('file1', 0o765)
+ test.chmod(wdir_sub_file2, 0o567)
file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
- assert file1_mode == 0765, '0%o' % file1_mode
+ assert file1_mode == 0o765, '0%o' % file1_mode
file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
- assert file2_mode == 0567, '0%o' % file2_mode
+ assert file2_mode == 0o567, '0%o' % file2_mode
@@ -1032,14 +1032,14 @@ class match_exact_TestCase(TestCmdTestCase):
assert test.match_exact("abcde\n", "abcde\n")
assert not test.match_exact(["12345\n", "abcde\n"], ["1[0-9]*5\n", "a.*e\n"])
assert test.match_exact(["12345\n", "abcde\n"], ["12345\n", "abcde\n"])
- assert not test.match_exact(UserList.UserList(["12345\n", "abcde\n"]),
+ assert not test.match_exact(collections.UserList(["12345\n", "abcde\n"]),
["1[0-9]*5\n", "a.*e\n"])
- assert test.match_exact(UserList.UserList(["12345\n", "abcde\n"]),
+ assert test.match_exact(collections.UserList(["12345\n", "abcde\n"]),
["12345\n", "abcde\n"])
assert not test.match_exact(["12345\n", "abcde\n"],
- UserList.UserList(["1[0-9]*5\n", "a.*e\n"]))
+ collections.UserList(["1[0-9]*5\n", "a.*e\n"]))
assert test.match_exact(["12345\n", "abcde\n"],
- UserList.UserList(["12345\n", "abcde\n"]))
+ collections.UserList(["12345\n", "abcde\n"]))
assert not test.match_exact("12345\nabcde\n", "1[0-9]*5\na.*e\n")
assert test.match_exact("12345\nabcde\n", "12345\nabcde\n")
lines = ["vwxyz\n", "67890\n"]
@@ -1098,26 +1098,26 @@ sys.exit(0)
["1.*j\n"])
assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
["12345\n", "abcde\n", "fghij\n"])
- assert test.match_re_dotall(UserList.UserList(["12345\n",
+ assert test.match_re_dotall(collections.UserList(["12345\n",
"abcde\n",
"fghij\n"]),
["1[0-9]*5\n", "a.*e\n", "f.*j\n"])
- assert test.match_re_dotall(UserList.UserList(["12345\n",
+ assert test.match_re_dotall(collections.UserList(["12345\n",
"abcde\n",
"fghij\n"]),
["1.*j\n"])
- assert test.match_re_dotall(UserList.UserList(["12345\n",
+ assert test.match_re_dotall(collections.UserList(["12345\n",
"abcde\n",
"fghij\n"]),
["12345\n", "abcde\n", "fghij\n"])
assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
- UserList.UserList(["1[0-9]*5\n",
+ collections.UserList(["1[0-9]*5\n",
"a.*e\n",
"f.*j\n"]))
assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
- UserList.UserList(["1.*j\n"]))
+ collections.UserList(["1.*j\n"]))
assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
- UserList.UserList(["12345\n",
+ collections.UserList(["12345\n",
"abcde\n",
"fghij\n"]))
assert test.match_re_dotall("12345\nabcde\nfghij\n",
@@ -1176,14 +1176,14 @@ sys.exit(0)
assert test.match_re("abcde\n", "abcde\n")
assert test.match_re(["12345\n", "abcde\n"], ["1[0-9]*5\n", "a.*e\n"])
assert test.match_re(["12345\n", "abcde\n"], ["12345\n", "abcde\n"])
- assert test.match_re(UserList.UserList(["12345\n", "abcde\n"]),
+ assert test.match_re(collections.UserList(["12345\n", "abcde\n"]),
["1[0-9]*5\n", "a.*e\n"])
- assert test.match_re(UserList.UserList(["12345\n", "abcde\n"]),
+ assert test.match_re(collections.UserList(["12345\n", "abcde\n"]),
["12345\n", "abcde\n"])
assert test.match_re(["12345\n", "abcde\n"],
- UserList.UserList(["1[0-9]*5\n", "a.*e\n"]))
+ collections.UserList(["1[0-9]*5\n", "a.*e\n"]))
assert test.match_re(["12345\n", "abcde\n"],
- UserList.UserList(["12345\n", "abcde\n"]))
+ collections.UserList(["12345\n", "abcde\n"]))
assert test.match_re("12345\nabcde\n", "1[0-9]*5\na.*e\n")
assert test.match_re("12345\nabcde\n", "12345\nabcde\n")
lines = ["vwxyz\n", "67890\n"]
@@ -1463,7 +1463,7 @@ class preserve_TestCase(TestCmdTestCase):
def test_preserve(self):
"""Test preserve()"""
def cleanup_test(test, cond=None, stdout=""):
- io = StringIO.StringIO()
+ io = io.StringIO()
save = sys.stdout
sys.stdout = io
try:
@@ -1603,7 +1603,7 @@ class read_TestCase(TestCmdTestCase):
_file_matches(wdir_foo_file3, test.read(['foo', 'file3']),
"Test\nfile\n#3.\n")
_file_matches(wdir_foo_file3,
- test.read(UserList.UserList(['foo', 'file3'])),
+ test.read(collections.UserList(['foo', 'file3'])),
"Test\nfile\n#3.\n")
_file_matches(wdir_file4, test.read('file4', mode = 'r'),
"Test\nfile\n#4.\n")
@@ -1862,8 +1862,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 1)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
test.run(arguments = ['arg1 arg2'])
o = sys.stdout.getvalue()
@@ -1876,8 +1876,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 1)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
testx.run(arguments = ['arg1 arg2'])
expect = '"%s" "arg1 arg2"\n' % t.scriptx_path
@@ -1913,8 +1913,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 2)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
test.run(arguments = ['arg1 arg2'])
@@ -1934,8 +1934,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 2)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
testx.run(arguments = ['arg1 arg2'])
@@ -1958,8 +1958,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 2)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
test.run(arguments = ['arg1 arg2'])
@@ -1978,8 +1978,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 3)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
test.run(arguments = ['arg1 arg2'])
@@ -2002,8 +2002,8 @@ class run_verbose_TestCase(TestCmdTestCase):
interpreter = 'python',
workdir = '')
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
test.run(arguments = ['arg1 arg2'])
@@ -2022,8 +2022,8 @@ class run_verbose_TestCase(TestCmdTestCase):
testx = TestCmd.TestCmd(program = t.scriptx,
workdir = '')
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
testx.run(arguments = ['arg1 arg2'])
@@ -2048,8 +2048,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 1)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
test.run(arguments = ['arg1 arg2'])
o = sys.stdout.getvalue()
@@ -2062,8 +2062,8 @@ class run_verbose_TestCase(TestCmdTestCase):
workdir = '',
verbose = 1)
- sys.stdout = StringIO.StringIO()
- sys.stderr = StringIO.StringIO()
+ sys.stdout = io.StringIO()
+ sys.stderr = io.StringIO()
testx.run(arguments = ['arg1 arg2'])
expect = '"%s" "arg1 arg2"\n' % t.scriptx_path
@@ -2347,7 +2347,7 @@ while 1:
logfp.close()
""" % t.recv_out_path
t.run_env.write(t.recv_script_path, text)
- os.chmod(t.recv_script_path, 0644) # XXX UNIX-specific
+ os.chmod(t.recv_script_path, 0o644) # XXX UNIX-specific
return t
def test_start(self):
@@ -2765,11 +2765,11 @@ class subdir_TestCase(TestCmdTestCase):
assert test.subdir('bar') == 1
assert test.subdir(['foo', 'succeed']) == 1
if os.name != "nt":
- os.chmod(test.workpath('foo'), 0500)
+ os.chmod(test.workpath('foo'), 0o500)
assert test.subdir(['foo', 'fail']) == 0
assert test.subdir(['sub', 'dir', 'ectory'], 'sub') == 1
assert test.subdir('one',
- UserList.UserList(['one', 'two']),
+ collections.UserList(['one', 'two']),
['one', 'two', 'three']) == 3
assert os.path.isdir(test.workpath('foo'))
assert os.path.isdir(test.workpath('bar'))
@@ -2962,7 +2962,7 @@ class unlink_TestCase(TestCmdTestCase):
test.unlink(['foo', 'file3a'])
assert not os.path.exists(wdir_foo_file3a)
- test.unlink(UserList.UserList(['foo', 'file3b']))
+ test.unlink(collections.UserList(['foo', 'file3b']))
assert not os.path.exists(wdir_foo_file3b)
test.unlink([test.workdir, 'foo', 'file4'])
@@ -2971,8 +2971,8 @@ class unlink_TestCase(TestCmdTestCase):
# Make it so we can't unlink file5.
# For UNIX, remove write permission from the dir and the file.
# For Windows, open the file.
- os.chmod(test.workdir, 0500)
- os.chmod(wdir_file5, 0400)
+ os.chmod(test.workdir, 0o500)
+ os.chmod(wdir_file5, 0o400)
f = open(wdir_file5, 'r')
try:
@@ -2983,8 +2983,8 @@ class unlink_TestCase(TestCmdTestCase):
except:
raise
finally:
- os.chmod(test.workdir, 0700)
- os.chmod(wdir_file5, 0600)
+ os.chmod(test.workdir, 0o700)
+ os.chmod(wdir_file5, 0o600)
f.close()
@@ -3208,11 +3208,11 @@ class executable_TestCase(TestCmdTestCase):
def make_executable(fname):
st = os.stat(fname)
- os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0100))
+ os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0o100))
def make_non_executable(fname):
st = os.stat(fname)
- os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0100))
+ os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0o100))
test.executable(test.workdir, 0)
# XXX skip these tests if euid == 0?
@@ -3282,7 +3282,7 @@ class write_TestCase(TestCmdTestCase):
test.write('file9', "Test file #9.\r\n", mode = 'wb')
if os.name != "nt":
- os.chmod(test.workdir, 0500)
+ os.chmod(test.workdir, 0o500)
try:
test.write('file10', "Test file #10 (should not get created).\n")
except IOError: # expect "Permission denied"
diff --git a/QMTest/TestCommon.py b/QMTest/TestCommon.py
index 4e90e168..5a397ffc 100644
--- a/QMTest/TestCommon.py
+++ b/QMTest/TestCommon.py
@@ -281,9 +281,9 @@ class TestCommon(TestCmd):
existing, missing = separate_files(files)
unwritable = [x for x in existing if not is_writable(x)]
if missing:
- print "Missing files: `%s'" % "', `".join(missing)
+ print("Missing files: `%s'" % "', `".join(missing))
if unwritable:
- print "Unwritable files: `%s'" % "', `".join(unwritable)
+ print("Unwritable files: `%s'" % "', `".join(unwritable))
self.fail_test(missing + unwritable)
def must_contain(self, file, required, mode = 'rb', find = None):
@@ -298,11 +298,11 @@ class TestCommon(TestCmd):
return None
contains = find(file_contents, required)
if not contains:
- print "File `%s' does not contain required string." % file
- print self.banner('Required string ')
- print required
- print self.banner('%s contents ' % file)
- print file_contents
+ print("File `%s' does not contain required string." % file)
+ print(self.banner('Required string '))
+ print(required)
+ print(self.banner('%s contents ' % file))
+ print(file_contents)
self.fail_test(not contains)
def must_contain_all_lines(self, output, lines, title=None, find=None):
@@ -437,7 +437,7 @@ class TestCommon(TestCmd):
files = [is_List(x) and os.path.join(*x) or x for x in files]
missing = [x for x in files if not os.path.exists(x) and not os.path.islink(x) ]
if missing:
- print "Missing files: `%s'" % "', `".join(missing)
+ print("Missing files: `%s'" % "', `".join(missing))
self.fail_test(missing)
def must_exist_one_of(self, files):
@@ -457,7 +457,7 @@ class TestCommon(TestCmd):
if glob.glob(xpath):
return
missing.append(xpath)
- print "Missing one of: `%s'" % "', `".join(missing)
+ print("Missing one of: `%s'" % "', `".join(missing))
self.fail_test(missing)
def must_match(self, file, expect, mode = 'rb', match=None):
@@ -474,7 +474,7 @@ class TestCommon(TestCmd):
except KeyboardInterrupt:
raise
except:
- print "Unexpected contents of `%s'" % file
+ print("Unexpected contents of `%s'" % file)
self.diff(expect, file_contents, 'contents ')
raise
@@ -490,11 +490,11 @@ class TestCommon(TestCmd):
return None
contains = find(file_contents, banned)
if contains:
- print "File `%s' contains banned string." % file
- print self.banner('Banned string ')
- print banned
- print self.banner('%s contents ' % file)
- print file_contents
+ print("File `%s' contains banned string." % file)
+ print(self.banner('Banned string '))
+ print(banned)
+ print(self.banner('%s contents ' % file))
+ print(file_contents)
self.fail_test(contains)
def must_not_contain_any_line(self, output, lines, title=None, find=None):
@@ -541,7 +541,7 @@ class TestCommon(TestCmd):
files = [is_List(x) and os.path.join(*x) or x for x in files]
existing = [x for x in files if os.path.exists(x) or os.path.islink(x)]
if existing:
- print "Unexpected files exist: `%s'" % "', `".join(existing)
+ print("Unexpected files exist: `%s'" % "', `".join(existing))
self.fail_test(existing)
def must_not_exist_any_of(self, files):
@@ -561,7 +561,7 @@ class TestCommon(TestCmd):
if glob.glob(xpath):
existing.append(xpath)
if existing:
- print "Unexpected files exist: `%s'" % "', `".join(existing)
+ print("Unexpected files exist: `%s'" % "', `".join(existing))
self.fail_test(existing)
def must_not_be_writable(self, *files):
@@ -575,9 +575,9 @@ class TestCommon(TestCmd):
existing, missing = separate_files(files)
writable = list(filter(is_writable, existing))
if missing:
- print "Missing files: `%s'" % "', `".join(missing)
+ print("Missing files: `%s'" % "', `".join(missing))
if writable:
- print "Writable files: `%s'" % "', `".join(writable)
+ print("Writable files: `%s'" % "', `".join(writable))
self.fail_test(missing + writable)
def _complete(self, actual_stdout, expected_stdout,
@@ -590,23 +590,23 @@ class TestCommon(TestCmd):
expect = ''
if status != 0:
expect = " (expected %s)" % str(status)
- print "%s returned %s%s" % (self.program, _status(self), expect)
- print self.banner('STDOUT ')
- print actual_stdout
- print self.banner('STDERR ')
- print actual_stderr
+ print("%s returned %s%s" % (self.program, _status(self), expect))
+ print(self.banner('STDOUT '))
+ print(actual_stdout)
+ print(self.banner('STDERR '))
+ print(actual_stderr)
self.fail_test()
if (expected_stdout is not None
and not match(actual_stdout, expected_stdout)):
self.diff(expected_stdout, actual_stdout, 'STDOUT ')
if actual_stderr:
- print self.banner('STDERR ')
- print actual_stderr
+ print(self.banner('STDERR '))
+ print(actual_stderr)
self.fail_test()
if (expected_stderr is not None
and not match(actual_stderr, expected_stderr)):
- print self.banner('STDOUT ')
- print actual_stdout
+ print(self.banner('STDOUT '))
+ print(actual_stdout)
self.diff(expected_stderr, actual_stderr, 'STDERR ')
self.fail_test()
@@ -626,15 +626,15 @@ class TestCommon(TestCmd):
universal_newlines, **kw)
except KeyboardInterrupt:
raise
- except Exception, e:
- print self.banner('STDOUT ')
+ except Exception as e:
+ print(self.banner('STDOUT '))
try:
- print self.stdout()
+ print(self.stdout())
except IndexError:
pass
- print self.banner('STDERR ')
+ print(self.banner('STDERR '))
try:
- print self.stderr()
+ print(self.stderr())
except IndexError:
pass
cmd_args = self.command_args(program, interpreter, arguments)
diff --git a/QMTest/TestSCons.py b/QMTest/TestSCons.py
index 84cc60f0..72624e62 100644
--- a/QMTest/TestSCons.py
+++ b/QMTest/TestSCons.py
@@ -13,7 +13,7 @@ attributes defined in this subclass.
"""
# __COPYRIGHT__
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -356,7 +356,7 @@ class TestSCons(TestCommon):
# raised so as to not mask possibly serious disk or
# network issues.
continue
- if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
+ if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
return os.path.normpath(f)
else:
import SCons.Environment
@@ -503,9 +503,9 @@ class TestSCons(TestCommon):
self.pass_test()
else:
# test failed; have to do this by hand...
- print self.banner('STDOUT ')
- print self.stdout()
- print self.diff(warning, stderr, 'STDERR ')
+ print(self.banner('STDOUT '))
+ print(self.stdout())
+ print(self.diff(warning, stderr, 'STDERR '))
self.fail_test()
return warning
@@ -574,7 +574,7 @@ class TestSCons(TestCommon):
We stick the requested file name and line number in the right
places, abstracting out the version difference.
"""
- exec 'import traceback; x = traceback.format_stack()[-1]'
+ exec('import traceback; x = traceback.format_stack()[-1]')
x = x.lstrip()
x = x.replace('<string>', file)
x = x.replace('line 1,', 'line %s,' % line)
@@ -721,7 +721,7 @@ class TestSCons(TestCommon):
home = os.path.normpath('%s/..'%jar)
if os.path.isdir(home):
return home
- print("Could not determine JAVA_HOME: %s is not a directory" % home)
+ print(("Could not determine JAVA_HOME: %s is not a directory" % home))
self.fail_test()
def java_where_jar(self, version=None):
@@ -1087,27 +1087,27 @@ SConscript( sconscript )
if doCheckLog and lastEnd != len(logfile):
raise NoMatch(lastEnd)
- except NoMatch, m:
- print "Cannot match log file against log regexp."
- print "log file: "
- print "------------------------------------------------------"
- print logfile[m.pos:]
- print "------------------------------------------------------"
- print "log regexp: "
- print "------------------------------------------------------"
- print log
- print "------------------------------------------------------"
+ except NoMatch as m:
+ print("Cannot match log file against log regexp.")
+ print("log file: ")
+ print("------------------------------------------------------")
+ print(logfile[m.pos:])
+ print("------------------------------------------------------")
+ print("log regexp: ")
+ print("------------------------------------------------------")
+ print(log)
+ print("------------------------------------------------------")
self.fail_test()
if doCheckStdout:
exp_stdout = self.wrap_stdout(".*", rdstr)
if not self.match_re_dotall(self.stdout(), exp_stdout):
- print "Unexpected stdout: "
- print "-----------------------------------------------------"
- print repr(self.stdout())
- print "-----------------------------------------------------"
- print repr(exp_stdout)
- print "-----------------------------------------------------"
+ print("Unexpected stdout: ")
+ print("-----------------------------------------------------")
+ print(repr(self.stdout()))
+ print("-----------------------------------------------------")
+ print(repr(exp_stdout))
+ print("-----------------------------------------------------")
self.fail_test()
def get_python_version(self):
@@ -1241,7 +1241,7 @@ class TimeSCons(TestSCons):
self.variables = kw.get('variables')
default_calibrate_variables = []
if self.variables is not None:
- for variable, value in self.variables.items():
+ for variable, value in list(self.variables.items()):
value = os.environ.get(variable, value)
try:
value = int(value)
@@ -1297,7 +1297,7 @@ class TimeSCons(TestSCons):
"""
if 'options' not in kw and self.variables:
options = []
- for variable, value in self.variables.items():
+ for variable, value in list(self.variables.items()):
options.append('%s=%s' % (variable, value))
kw['options'] = ' '.join(options)
if self.calibrate:
@@ -1323,7 +1323,7 @@ class TimeSCons(TestSCons):
self.elapsed_time(),
"seconds",
sort=0)
- for name, args in stats.items():
+ for name, args in list(stats.items()):
self.trace(name, trace, **args)
def uptime(self):
diff --git a/QMTest/TestSConsMSVS.py b/QMTest/TestSConsMSVS.py
index c78b4523..478438ad 100644
--- a/QMTest/TestSConsMSVS.py
+++ b/QMTest/TestSConsMSVS.py
@@ -1039,7 +1039,7 @@ print "self._msvs_versions =", str(SCons.Tool.MSCommon.query_versions())
try:
host = _ARCH_TO_CANONICAL[host_platform]
- except KeyError, e:
+ except KeyError as e:
# Default to x86 for all other platforms
host = 'x86'
diff --git a/QMTest/TestSCons_time.py b/QMTest/TestSCons_time.py
index abe8ccf1..ba7fbd8d 100644
--- a/QMTest/TestSCons_time.py
+++ b/QMTest/TestSCons_time.py
@@ -225,7 +225,7 @@ class TestSCons_time(TestCommon):
def write_fake_aegis_py(self, name):
name = self.workpath(name)
self.write(name, aegis_py)
- os.chmod(name, 0755)
+ os.chmod(name, 0o755)
return name
def write_fake_scons_py(self):
@@ -235,7 +235,7 @@ class TestSCons_time(TestCommon):
def write_fake_svn_py(self, name):
name = self.workpath(name)
self.write(name, svn_py)
- os.chmod(name, 0755)
+ os.chmod(name, 0o755)
return name
def write_sample_directory(self, archive, dir, files):
diff --git a/QMTest/TestSConsign.py b/QMTest/TestSConsign.py
index 700c242c..665059c5 100644
--- a/QMTest/TestSConsign.py
+++ b/QMTest/TestSConsign.py
@@ -68,7 +68,7 @@ class TestSConsign(TestSCons):
elif os.path.exists(self.script_path('sconsign')):
sconsign = 'sconsign'
else:
- print "Can find neither 'sconsign.py' nor 'sconsign' scripts."
+ print("Can find neither 'sconsign.py' nor 'sconsign' scripts.")
self.no_result()
self.set_sconsign(sconsign)
diff --git a/QMTest/scons_tdb.py b/QMTest/scons_tdb.py
index 6be4696b..845e99cb 100644
--- a/QMTest/scons_tdb.py
+++ b/QMTest/scons_tdb.py
@@ -20,7 +20,7 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import division
+
"""
QMTest classes to support SCons' testing and Aegis-inspired workflow.
@@ -92,7 +92,7 @@ def get_explicit_arguments(e):
# Determine which subset of the 'arguments' have been set
# explicitly.
explicit_arguments = {}
- for name, field in arguments.items():
+ for name, field in list(arguments.items()):
# Do not record computed fields.
if field.IsComputed():
continue
@@ -337,14 +337,14 @@ class AegisChangeStream(AegisStream):
# We'd like to use the _FormatStatistics() method to do
# this, but it's wrapped around the list in Result.outcomes,
# so it's simpler to just do it ourselves.
- print " %6d tests total\n" % self._num_tests
+ print(" %6d tests total\n" % self._num_tests)
for outcome in AegisTest.aegis_outcomes:
if self._outcome_counts[outcome] != 0:
- print " %6d (%3.0f%%) tests %s" % (
+ print(" %6d (%3.0f%%) tests %s" % (
self._outcome_counts[outcome],
self._percent(outcome),
outcome
- )
+ ))
class AegisBaselineStream(AegisStream):
def WriteResult(self, result):
@@ -368,19 +368,19 @@ class AegisBaselineStream(AegisStream):
# this, but it's wrapped around the list in Result.outcomes,
# so it's simpler to just do it ourselves.
if self._outcome_counts[AegisTest.FAIL]:
- print " %6d (%3.0f%%) tests as expected" % (
+ print(" %6d (%3.0f%%) tests as expected" % (
self._outcome_counts[AegisTest.FAIL],
self._percent(AegisTest.FAIL),
- )
+ ))
non_fail_outcomes = list(AegisTest.aegis_outcomes[:])
non_fail_outcomes.remove(AegisTest.FAIL)
for outcome in non_fail_outcomes:
if self._outcome_counts[outcome] != 0:
- print " %6d (%3.0f%%) tests unexpected %s" % (
+ print(" %6d (%3.0f%%) tests unexpected %s" % (
self._outcome_counts[outcome],
self._percent(outcome),
outcome,
- )
+ ))
class AegisBatchStream(FileResultStream):
def __init__(self, arguments):
diff --git a/bench/bench.py b/bench/bench.py
index 74dbf121..1a98d8c2 100644
--- a/bench/bench.py
+++ b/bench/bench.py
@@ -23,7 +23,7 @@
#
# This will allow (as much as possible) us to time just the code itself,
# not Python function call overhead.
-from __future__ import division
+
import getopt
import sys
@@ -94,7 +94,7 @@ exec(open(args[0], 'rU').read())
try:
FunctionList
except NameError:
- function_names = sorted([x for x in locals().keys() if x[:4] == FunctionPrefix])
+ function_names = sorted([x for x in list(locals().keys()) if x[:4] == FunctionPrefix])
l = [locals()[f] for f in function_names]
FunctionList = [f for f in l if isinstance(f, types.FunctionType)]
@@ -113,12 +113,12 @@ def display(label, results):
total = 0.0
for r in results:
total += r
- print " %8.3f" % ((total * 1e6) / len(results)), ':', label
+ print(" %8.3f" % ((total * 1e6) / len(results)), ':', label)
for func in FunctionList:
if func.__doc__: d = ' (' + func.__doc__ + ')'
else: d = ''
- print func.__name__ + d + ':'
+ print(func.__name__ + d + ':')
for label, args, kw in Data:
r = timer(func, *args, **kw)
diff --git a/bench/env.__setitem__.py b/bench/env.__setitem__.py
index b17b59e4..284653e8 100644
--- a/bench/env.__setitem__.py
+++ b/bench/env.__setitem__.py
@@ -33,15 +33,15 @@ class Timing(object):
def times(num=1000000, init='', title='Results:', **statements):
# time each statement
timings = []
- for n, s in statements.items():
+ for n, s in list(statements.items()):
t = Timing(n, num, init, s)
t.timeit()
timings.append(t)
- print
- print title
+ print()
+ print(title)
for i in sorted([(i.getResult(),i.name) for i in timings]):
- print " %9.3f s %s" % i
+ print(" %9.3f s %s" % i)
# Import the necessary local SCons.* modules used by some of our
# alternative implementations below, first manipulating sys.path so
@@ -287,7 +287,7 @@ else:
# that the timer will use to get at these classes.
class_names = []
-for n in locals().keys():
+for n in list(locals().keys()):
#if n.startswith('env_'):
if n[:4] == 'env_':
class_names.append(n)
@@ -339,9 +339,9 @@ def run_it(title, init):
s['init'] = init
times(**s)
-print 'Environment __setitem__ benchmark using',
-print 'Python', sys.version.split()[0],
-print 'on', sys.platform, os.name
+print('Environment __setitem__ benchmark using', end=' ')
+print('Python', sys.version.split()[0], end=' ')
+print('on', sys.platform, os.name)
run_it('Results for re-adding an existing variable name 100 times:',
common_imports + """
diff --git a/bench/is_types.py b/bench/is_types.py
index 69c029fc..b6da0d25 100644
--- a/bench/is_types.py
+++ b/bench/is_types.py
@@ -17,11 +17,11 @@ InstanceType = types.InstanceType
DictType = dict
ListType = list
StringType = str
-try: unicode
+try: str
except NameError:
UnicodeType = None
else:
- UnicodeType = unicode
+ UnicodeType = str
# The original implementations, pretty straightforward checks for the
@@ -36,7 +36,7 @@ def original_is_List(e):
if UnicodeType is not None:
def original_is_String(e):
- return isinstance(e, (str,unicode,UserString))
+ return isinstance(e, (str,UserString))
else:
def original_is_String(e):
return isinstance(e, (str,UserString))
@@ -58,7 +58,7 @@ def checkInstanceType_is_List(e):
if UnicodeType is not None:
def checkInstanceType_is_String(e):
return isinstance(e, str) \
- or isinstance(e, unicode) \
+ or isinstance(e, str) \
or (isinstance(e, types.InstanceType) and isinstance(e, UserString))
else:
def checkInstanceType_is_String(e):
@@ -84,7 +84,7 @@ if UnicodeType is not None:
def cache_type_e_is_String(e):
t = type(e)
return t is str \
- or t is unicode \
+ or t is str \
or (t is types.InstanceType and isinstance(e, UserString))
else:
def cache_type_e_is_String(e):
@@ -136,7 +136,7 @@ if UnicodeType is not None:
t = type(obj)
if t is types.InstanceType:
t = instanceTypeMap.get(obj.__class__, t)
- elif t is unicode:
+ elif t is str:
t = str
return t
else:
diff --git a/bench/timeit.py b/bench/timeit.py
index c5fef126..28400109 100644
--- a/bench/timeit.py
+++ b/bench/timeit.py
@@ -46,7 +46,7 @@ be aware of it. The baseline overhead can be measured by invoking the
program without arguments. The baseline overhead differs between
Python versions!
"""
-from __future__ import division
+
try:
import gc
@@ -122,7 +122,7 @@ class Timer(object):
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
ns = {}
- exec code in globals(), ns
+ exec(code, globals(), ns)
self.inner = ns["inner"]
def print_exc(self, file=None):
@@ -216,9 +216,9 @@ def main(args=None):
opts, args = getopt.getopt(args, "n:s:r:tcvh",
["number=", "setup=", "repeat=",
"time", "clock", "verbose", "help"])
- except getopt.error, err:
- print err
- print "use -h/--help for command line help"
+ except getopt.error as err:
+ print(err)
+ print("use -h/--help for command line help")
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
@@ -245,7 +245,7 @@ def main(args=None):
precision = precision + 1
verbose = precision + 1
if o in ("-h", "--help"):
- print __doc__,
+ print(__doc__, end=' ')
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
@@ -264,7 +264,7 @@ def main(args=None):
t.print_exc()
return 1
if verbose:
- print "%d loops -> %.*g secs" % (number, precision, x)
+ print("%d loops -> %.*g secs" % (number, precision, x))
if x >= 0.2:
break
try:
@@ -274,18 +274,18 @@ def main(args=None):
return 1
best = min(r)
if verbose:
- print "raw times:", ' '.join(["%.*g" % (precision, x) for x in r])
- print "%d loops," % number,
+ print("raw times:", ' '.join(["%.*g" % (precision, x) for x in r]))
+ print("%d loops," % number, end=' ')
usec = best * 1e6 / number
if usec < 1000:
- print "best of %d: %.*g usec per loop" % (repeat, precision, usec)
+ print("best of %d: %.*g usec per loop" % (repeat, precision, usec))
else:
msec = usec / 1000
if msec < 1000:
- print "best of %d: %.*g msec per loop" % (repeat, precision, msec)
+ print("best of %d: %.*g msec per loop" % (repeat, precision, msec))
else:
sec = msec / 1000
- print "best of %d: %.*g sec per loop" % (repeat, precision, sec)
+ print("best of %d: %.*g sec per loop" % (repeat, precision, sec))
return None
if __name__ == "__main__":
diff --git a/bin/Command.py b/bin/Command.py
index 8702f51d..768caedc 100644
--- a/bin/Command.py
+++ b/bin/Command.py
@@ -109,18 +109,18 @@ Usage: script-template.py [-hnq]
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
- except getopt.error, msg:
+ except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ('-h', '--help'):
- print helpstr
+ print(helpstr)
sys.exit(0)
elif o in ('-n', '--no-exec'):
Command.execute = Command.do_not_execute
elif o in ('-q', '--quiet'):
Command.display = Command.do_not_display
- except Usage, err:
+ except Usage as err:
sys.stderr.write(err.msg)
sys.stderr.write('use -h to get help')
return 2
diff --git a/bin/SConsDoc.py b/bin/SConsDoc.py
index dc9d161d..453e241d 100644
--- a/bin/SConsDoc.py
+++ b/bin/SConsDoc.py
@@ -210,12 +210,12 @@ class Libxml2ValidityHandler:
def error(self, msg, data):
if data != ARG:
- raise Exception, "Error handler did not receive correct argument"
+ raise Exception("Error handler did not receive correct argument")
self.errors.append(msg)
def warning(self, msg, data):
if data != ARG:
- raise Exception, "Warning handler did not receive correct argument"
+ raise Exception("Warning handler did not receive correct argument")
self.warnings.append(msg)
@@ -332,16 +332,16 @@ if not has_libxml2:
xmlschema = etree.XMLSchema(xmlschema_context)
try:
doc = etree.parse(fpath)
- except Exception, e:
- print "ERROR: %s fails to parse:"%fpath
- print e
+ except Exception as e:
+ print("ERROR: %s fails to parse:"%fpath)
+ print(e)
return False
doc.xinclude()
try:
xmlschema.assertValid(doc)
- except Exception, e:
- print "ERROR: %s fails to validate:" % fpath
- print e
+ except Exception as e:
+ print("ERROR: %s fails to validate:" % fpath)
+ print(e)
return False
return True
@@ -477,8 +477,8 @@ else:
if err or eh.errors:
for e in eh.errors:
- print e.rstrip("\n")
- print "%s fails to validate" % fpath
+ print(e.rstrip("\n"))
+ print("%s fails to validate" % fpath)
return False
return True
@@ -599,7 +599,7 @@ class SConsDocTree:
# Create xpath context
self.xpath_context = self.doc.xpathNewContext()
# Register namespaces
- for key, val in self.nsmap.iteritems():
+ for key, val in self.nsmap.items():
self.xpath_context.xpathRegisterNs(key, val)
def __del__(self):
@@ -637,8 +637,8 @@ def validate_all_xml(dpaths, xsdfile=default_xsd):
fails = []
for idx, fp in enumerate(fpaths):
fpath = os.path.join(path, fp)
- print "%.2f%s (%d/%d) %s" % (float(idx+1)*100.0/float(len(fpaths)),
- perc, idx+1, len(fpaths),fp)
+ print("%.2f%s (%d/%d) %s" % (float(idx+1)*100.0/float(len(fpaths)),
+ perc, idx+1, len(fpaths),fp))
if not tf.validateXml(fp, xmlschema_context):
fails.append(fp)
@@ -810,7 +810,7 @@ def importfile(path):
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
- except ImportError, e:
+ except ImportError as e:
sys.stderr.write("Could not import %s: %s\n" % (path, e))
return None
file.close()
diff --git a/bin/SConsExamples.py b/bin/SConsExamples.py
index 9823a052..e3a7502a 100644
--- a/bin/SConsExamples.py
+++ b/bin/SConsExamples.py
@@ -265,7 +265,7 @@ def ensureExampleOutputsExist(dpath):
os.mkdir(generated_examples)
examples = readAllExampleInfos(dpath)
- for key, value in examples.iteritems():
+ for key, value in examples.items():
# Process all scons_output tags
for o in value.outputs:
cpath = os.path.join(generated_examples,
@@ -303,10 +303,10 @@ def createAllExampleOutputs(dpath):
examples = readAllExampleInfos(dpath)
total = len(examples)
idx = 0
- for key, value in examples.iteritems():
+ for key, value in examples.items():
# Process all scons_output tags
- print "%.2f%s (%d/%d) %s" % (float(idx + 1) * 100.0 / float(total),
- perc, idx + 1, total, key)
+ print("%.2f%s (%d/%d) %s" % (float(idx + 1) * 100.0 / float(total),
+ perc, idx + 1, total, key))
create_scons_output(value)
# Process all scons_example_file tags
@@ -344,7 +344,7 @@ def collectSConsExampleNames(fpath):
if n not in suffixes:
suffixes[n] = []
else:
- print "Error: Example in file '%s' is missing a name!" % fpath
+ print("Error: Example in file '%s' is missing a name!" % fpath)
failed_suffixes = True
for o in stf.findAll(t.root, "scons_output", SConsDoc.dbxid,
@@ -353,11 +353,11 @@ def collectSConsExampleNames(fpath):
if stf.hasAttribute(o, 'example'):
n = stf.getAttribute(o, 'example')
else:
- print "Error: scons_output in file '%s' is missing an example name!" % fpath
+ print("Error: scons_output in file '%s' is missing an example name!" % fpath)
failed_suffixes = True
if n not in suffixes:
- print "Error: scons_output in file '%s' is referencing non-existent example '%s'!" % (fpath, n)
+ print("Error: scons_output in file '%s' is referencing non-existent example '%s'!" % (fpath, n))
failed_suffixes = True
continue
@@ -365,13 +365,13 @@ def collectSConsExampleNames(fpath):
if stf.hasAttribute(o, 'suffix'):
s = stf.getAttribute(o, 'suffix')
else:
- print "Error: scons_output in file '%s' (example '%s') is missing a suffix!" % (fpath, n)
+ print("Error: scons_output in file '%s' (example '%s') is missing a suffix!" % (fpath, n))
failed_suffixes = True
if s not in suffixes[n]:
suffixes[n].append(s)
else:
- print "Error: scons_output in file '%s' (example '%s') is using a duplicate suffix '%s'!" % (fpath, n, s)
+ print("Error: scons_output in file '%s' (example '%s') is using a duplicate suffix '%s'!" % (fpath, n, s))
failed_suffixes = True
return names, failed_suffixes
@@ -392,7 +392,7 @@ def exampleNamesAreUnique(dpath):
unique = False
i = allnames.intersection(names)
if i:
- print "Not unique in %s are: %s" % (fpath, ', '.join(i))
+ print("Not unique in %s are: %s" % (fpath, ', '.join(i)))
unique = False
allnames |= names
diff --git a/bin/calibrate.py b/bin/calibrate.py
index 8ed2ecea..31c04e80 100644
--- a/bin/calibrate.py
+++ b/bin/calibrate.py
@@ -20,7 +20,7 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import division
+
import optparse
import os
@@ -48,7 +48,7 @@ def main(argv=None):
for arg in args:
if len(args) > 1:
- print arg + ':'
+ print(arg + ':')
command = [sys.executable, 'runtest.py']
if opts.package:
@@ -67,9 +67,9 @@ def main(argv=None):
try:
elapsed = float(em.group(1))
except AttributeError:
- print output
+ print(output)
raise
- print "run %3d: %7.3f: %s" % (run, elapsed, ' '.join(vm.groups()))
+ print("run %3d: %7.3f: %s" % (run, elapsed, ' '.join(vm.groups())))
if opts.min < elapsed and elapsed < opts.max:
good += 1
else:
diff --git a/bin/caller-tree.py b/bin/caller-tree.py
index 03c1616a..327e6a11 100644
--- a/bin/caller-tree.py
+++ b/bin/caller-tree.py
@@ -74,19 +74,19 @@ for line in sys.stdin.readlines():
stack = []
def print_entry(e, level, calls):
- print '%-72s%6s' % ((' '*2*level) + e.file_line_func, calls)
+ print('%-72s%6s' % ((' '*2*level) + e.file_line_func, calls))
if e in stack:
- print (' '*2*(level+1))+'RECURSION'
- print
+ print((' '*2*(level+1))+'RECURSION')
+ print()
elif e.called_by:
stack.append(e)
for c in e.called_by:
print_entry(c[0], level+1, c[1])
stack.pop()
else:
- print
+ print()
-for e in [ e for e in AllCalls.values() if not e.calls ]:
+for e in [ e for e in list(AllCalls.values()) if not e.calls ]:
print_entry(e, 0, '')
# Local Variables:
diff --git a/bin/docs-create-example-outputs.py b/bin/docs-create-example-outputs.py
index 30dc0eef..b2dfbea6 100644
--- a/bin/docs-create-example-outputs.py
+++ b/bin/docs-create-example-outputs.py
@@ -9,11 +9,11 @@ import sys
import SConsExamples
if __name__ == "__main__":
- print "Checking whether all example names are unique..."
+ print("Checking whether all example names are unique...")
if SConsExamples.exampleNamesAreUnique(os.path.join('doc','user')):
- print "OK"
+ print("OK")
else:
- print "Not all example names and suffixes are unique! Please correct the errors listed above and try again."
+ print("Not all example names and suffixes are unique! Please correct the errors listed above and try again.")
sys.exit(0)
SConsExamples.createAllExampleOutputs(os.path.join('doc','user'))
diff --git a/bin/docs-update-generated.py b/bin/docs-update-generated.py
index 66b22c0b..2b419a47 100644
--- a/bin/docs-update-generated.py
+++ b/bin/docs-update-generated.py
@@ -38,7 +38,7 @@ def generate_all():
try:
os.makedirs(gen_folder)
except:
- print "Couldn't create destination folder %s! Exiting..." % gen_folder
+ print("Couldn't create destination folder %s! Exiting..." % gen_folder)
return
# Call scons-proc.py
os.system('python %s -b %s -f %s -t %s -v %s %s' %
diff --git a/bin/docs-validate.py b/bin/docs-validate.py
index c445c3f9..e5d06591 100644
--- a/bin/docs-validate.py
+++ b/bin/docs-validate.py
@@ -10,9 +10,9 @@ import SConsDoc
if __name__ == "__main__":
if len(sys.argv)>1:
if SConsDoc.validate_all_xml((sys.argv[1],)):
- print "OK"
+ print("OK")
else:
- print "Validation failed! Please correct the errors above and try again."
+ print("Validation failed! Please correct the errors above and try again.")
else:
if SConsDoc.validate_all_xml(['src',
os.path.join('doc','design'),
@@ -22,6 +22,6 @@ if __name__ == "__main__":
os.path.join('doc','reference'),
os.path.join('doc','user')
]):
- print "OK"
+ print("OK")
else:
- print "Validation failed! Please correct the errors above and try again."
+ print("Validation failed! Please correct the errors above and try again.")
diff --git a/bin/install_python.py b/bin/install_python.py
index 86807af6..dca37d0f 100644
--- a/bin/install_python.py
+++ b/bin/install_python.py
@@ -48,7 +48,7 @@ Usage: install_python.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
- except getopt.error, msg:
+ except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
@@ -57,7 +57,7 @@ Usage: install_python.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
elif o in ('-d', '--downloads'):
downloads_dir = a
elif o in ('-h', '--help'):
- print helpstr
+ print(helpstr)
sys.exit(0)
elif o in ('-n', '--no-exec'):
CommandRunner.execute = CommandRunner.do_not_execute
@@ -65,7 +65,7 @@ Usage: install_python.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
prefix = a
elif o in ('-q', '--quiet'):
CommandRunner.display = CommandRunner.do_not_display
- except Usage, err:
+ except Usage as err:
sys.stderr.write(str(err.msg) + '\n')
sys.stderr.write('use -h to get help\n')
return 2
diff --git a/bin/install_scons.py b/bin/install_scons.py
index 00129f65..b732193f 100644
--- a/bin/install_scons.py
+++ b/bin/install_scons.py
@@ -23,7 +23,7 @@ import os
import shutil
import sys
import tarfile
-import urllib
+import urllib.request, urllib.parse, urllib.error
from Command import CommandRunner, Usage
@@ -129,7 +129,7 @@ Usage: install_scons.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
- except getopt.error, msg:
+ except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
@@ -138,7 +138,7 @@ Usage: install_scons.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
elif o in ('-d', '--downloads'):
downloads_dir = a
elif o in ('-h', '--help'):
- print helpstr
+ print(helpstr)
sys.exit(0)
elif o in ('-n', '--no-exec'):
CommandRunner.execute = CommandRunner.do_not_execute
@@ -146,7 +146,7 @@ Usage: install_scons.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
prefix = a
elif o in ('-q', '--quiet'):
CommandRunner.display = CommandRunner.do_not_display
- except Usage, err:
+ except Usage as err:
sys.stderr.write(str(err.msg) + '\n')
sys.stderr.write('use -h to get help\n')
return 2
@@ -171,7 +171,7 @@ Usage: install_scons.py [-ahnq] [-d DIR] [-p PREFIX] [VERSION ...]
if not os.path.exists(tar_gz):
if not os.path.exists(downloads_dir):
cmd.run('mkdir %(downloads_dir)s')
- cmd.run((urllib.urlretrieve, tar_gz_url, tar_gz),
+ cmd.run((urllib.request.urlretrieve, tar_gz_url, tar_gz),
'wget -O %(tar_gz)s %(tar_gz_url)s')
def extract(tar_gz):
diff --git a/bin/linecount.py b/bin/linecount.py
index 6f49dcad..2d478f0c 100644
--- a/bin/linecount.py
+++ b/bin/linecount.py
@@ -21,7 +21,7 @@
# in each category, the number of non-blank lines, and the number of
# non-comment lines. The last figure (non-comment) lines is the most
# interesting one for most purposes.
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -98,23 +98,23 @@ all_tests = Collection('all tests', src_tests.files + test_tests.files)
def ratio(over, under):
return "%.2f" % (float(len(over)) / float(len(under)))
-print fmt % ('', '', '', '', '', 'non-blank')
-print fmt % ('', 'files', 'lines', 'non-blank', 'non-comment', 'non-comment')
-print
-print fmt % src_Tests_py_tests.printables()
-print fmt % src_test_tests.printables()
-print
-print fmt % src_tests.printables()
-print fmt % test_tests.printables()
-print
-print fmt % all_tests.printables()
-print fmt % sources.printables()
-print
-print fmt % ('ratio:',
+print(fmt % ('', '', '', '', '', 'non-blank'))
+print(fmt % ('', 'files', 'lines', 'non-blank', 'non-comment', 'non-comment'))
+print()
+print(fmt % src_Tests_py_tests.printables())
+print(fmt % src_test_tests.printables())
+print()
+print(fmt % src_tests.printables())
+print(fmt % test_tests.printables())
+print()
+print(fmt % all_tests.printables())
+print(fmt % sources.printables())
+print()
+print(fmt % ('ratio:',
ratio(all_tests, sources),
ratio(all_tests.lines(), sources.lines()),
ratio(all_tests.non_blank(), sources.non_blank()),
ratio(all_tests.non_comment(), sources.non_comment()),
ratio(all_tests.non_blank_non_comment(),
sources.non_blank_non_comment())
- )
+ ))
diff --git a/bin/memlogs.py b/bin/memlogs.py
index 9d957c9f..0409dfee 100644
--- a/bin/memlogs.py
+++ b/bin/memlogs.py
@@ -27,20 +27,20 @@ import sys
filenames = sys.argv[1:]
if not filenames:
- print """Usage: memlogs.py file [...]
+ print("""Usage: memlogs.py file [...]
Summarizes the --debug=memory numbers from one or more build logs.
-"""
+""")
sys.exit(0)
fmt = "%12s %12s %12s %12s %s"
-print fmt % ("pre-read", "post-read", "pre-build", "post-build", "")
+print(fmt % ("pre-read", "post-read", "pre-build", "post-build", ""))
for fname in sys.argv[1:]:
lines = [l for l in open(fname).readlines() if l[:7] == 'Memory ']
t = tuple([l.split()[-1] for l in lines]) + (fname,)
- print fmt % t
+ print(fmt % t)
# Local Variables:
# tab-width:4
diff --git a/bin/memoicmp.py b/bin/memoicmp.py
index 812af665..5021c412 100644
--- a/bin/memoicmp.py
+++ b/bin/memoicmp.py
@@ -23,20 +23,20 @@ def memoize_cmp(filea, fileb):
ma = memoize_output(filea)
mb = memoize_output(fileb)
- print 'All output: %s / %s [delta]'%(filea, fileb)
- print '----------HITS---------- ---------MISSES---------'
+ print('All output: %s / %s [delta]'%(filea, fileb))
+ print('----------HITS---------- ---------MISSES---------')
cfmt='%7d/%-7d [%d]'
ma_o = []
mb_o = []
mab = []
- for k in ma.keys():
- if k in mb.keys():
+ for k in list(ma.keys()):
+ if k in list(mb.keys()):
if k not in mab:
mab.append(k)
else:
ma_o.append(k)
- for k in mb.keys():
- if k in ma.keys():
+ for k in list(mb.keys()):
+ if k in list(ma.keys()):
if k not in mab:
mab.append(k)
else:
@@ -49,26 +49,26 @@ def memoize_cmp(filea, fileb):
for k in mab:
hits = cfmt%(ma[k][0], mb[k][0], mb[k][0]-ma[k][0])
miss = cfmt%(ma[k][1], mb[k][1], mb[k][1]-ma[k][1])
- print '%-24s %-24s %s'%(hits, miss, k)
+ print('%-24s %-24s %s'%(hits, miss, k))
for k in ma_o:
hits = '%7d/ --'%(ma[k][0])
miss = '%7d/ --'%(ma[k][1])
- print '%-24s %-24s %s'%(hits, miss, k)
+ print('%-24s %-24s %s'%(hits, miss, k))
for k in mb_o:
hits = ' -- /%-7d'%(mb[k][0])
miss = ' -- /%-7d'%(mb[k][1])
- print '%-24s %-24s %s'%(hits, miss, k)
+ print('%-24s %-24s %s'%(hits, miss, k))
- print '-'*(24+24+1+20)
+ print('-'*(24+24+1+20))
if __name__ == "__main__":
if len(sys.argv) != 3:
- print """Usage: %s file1 file2
+ print("""Usage: %s file1 file2
-Compares --debug=memomize output from file1 against file2."""%sys.argv[0]
+Compares --debug=memomize output from file1 against file2."""%sys.argv[0])
sys.exit(1)
memoize_cmp(sys.argv[1], sys.argv[2])
diff --git a/bin/objcounts.py b/bin/objcounts.py
index 06620126..25b985b8 100644
--- a/bin/objcounts.py
+++ b/bin/objcounts.py
@@ -27,10 +27,10 @@ import sys
filenames = sys.argv[1:]
if len(sys.argv) != 3:
- print """Usage: objcounts.py file1 file2
+ print("""Usage: objcounts.py file1 file2
Compare the --debug=object counts from two build logs.
-"""
+""")
sys.exit(0)
def fetch_counts(fname):
@@ -47,7 +47,7 @@ c1 = fetch_counts(sys.argv[1])
c2 = fetch_counts(sys.argv[2])
common = {}
-for k in c1.keys():
+for k in list(c1.keys()):
try:
common[k] = (c1[k], c2[k])
except KeyError:
@@ -58,7 +58,7 @@ for k in c1.keys():
if not '.' in k:
s = '.'+k
l = len(s)
- for k2 in c2.keys():
+ for k2 in list(c2.keys()):
if k2[-l:] == s:
common[k2] = (c1[k], c2[k2])
del c1[k]
@@ -81,10 +81,9 @@ def diffstr(c1, c2):
return " %5s/%-5s %-8s" % (c1, c2, d)
def printline(c1, c2, classname):
- print \
- diffstr(c1[2], c2[2]) + \
+ print(diffstr(c1[2], c2[2]) + \
diffstr(c1[3], c2[3]) + \
- ' ' + classname
+ ' ' + classname)
for k in sorted(common.keys()):
c = common[k]
diff --git a/bin/scons-diff.py b/bin/scons-diff.py
index 52bd51b1..5181fa17 100644
--- a/bin/scons-diff.py
+++ b/bin/scons-diff.py
@@ -43,7 +43,7 @@ def diff_line(left, right):
opts = ' ' + ' '.join(diff_options)
else:
opts = ''
- print 'diff%s %s %s' % (opts, left, right)
+ print('diff%s %s %s' % (opts, left, right))
for o, a in opts:
if o in ('-c', '-u'):
@@ -51,7 +51,7 @@ for o, a in opts:
context = int(a)
diff_options.append(o)
elif o in ('-h', '--help'):
- print Usage
+ print(Usage)
sys.exit(0)
elif o in ('-n'):
diff_options.append(o)
@@ -161,9 +161,9 @@ def diff_file(left, right):
else:
if text:
diff_line(left, right)
- print text,
+ print(text, end=' ')
elif report_same:
- print 'Files %s and %s are identical' % (left, right)
+ print('Files %s and %s are identical' % (left, right))
def diff_dir(left, right):
llist = os.listdir(left)
@@ -173,16 +173,16 @@ def diff_dir(left, right):
u[l] = 1
for r in rlist:
u[r] = 1
- for x in sorted([ x for x in u.keys() if x[-4:] != '.pyc' ]):
+ for x in sorted([ x for x in list(u.keys()) if x[-4:] != '.pyc' ]):
if x in llist:
if x in rlist:
do_diff(os.path.join(left, x),
os.path.join(right, x),
recursive)
else:
- print 'Only in %s: %s' % (left, x)
+ print('Only in %s: %s' % (left, x))
else:
- print 'Only in %s: %s' % (right, x)
+ print('Only in %s: %s' % (right, x))
do_diff(left, right, True)
diff --git a/bin/scons-proc.py b/bin/scons-proc.py
index 9567db8c..19be4c37 100644
--- a/bin/scons-proc.py
+++ b/bin/scons-proc.py
@@ -106,7 +106,7 @@ Link_Entities_Header = """\
class SCons_XML(object):
def __init__(self, entries, **kw):
self.values = entries
- for k, v in kw.items():
+ for k, v in list(kw.items()):
setattr(self, k, v)
def fopen(self, name):
@@ -344,25 +344,25 @@ def write_output_files(h, buildersfiles, functionsfiles,
processor_class = SCons_XML
# Step 1: Creating entity files for builders, functions,...
-print "Generating entity files..."
+print("Generating entity files...")
h = parse_docs(args, False)
write_output_files(h, buildersfiles, functionsfiles, toolsfiles,
variablesfiles, SCons_XML.write_mod)
# Step 2: Validating all input files
-print "Validating files against SCons XSD..."
+print("Validating files against SCons XSD...")
if SConsDoc.validate_all_xml(['src']):
- print "OK"
+ print("OK")
else:
- print "Validation failed! Please correct the errors above and try again."
+ print("Validation failed! Please correct the errors above and try again.")
# Step 3: Creating actual documentation snippets, using the
# fully resolved and updated entities from the *.mod files.
-print "Updating documentation for builders, tools and functions..."
+print("Updating documentation for builders, tools and functions...")
h = parse_docs(args, True)
write_output_files(h, buildersfiles, functionsfiles, toolsfiles,
variablesfiles, SCons_XML.write)
-print "Done"
+print("Done")
# Local Variables:
# tab-width:4
diff --git a/bin/scons-test.py b/bin/scons-test.py
index 046cf4b0..788fc6d1 100644
--- a/bin/scons-test.py
+++ b/bin/scons-test.py
@@ -60,7 +60,7 @@ for o, a in opts:
outdir = a
elif o == '-v' or o == '--verbose':
def printname(x):
- print x
+ print(x)
elif o == '--xml':
format = o
@@ -148,34 +148,34 @@ else:
if format == '--xml':
- print "<scons_test_run>"
- print " <sys>"
+ print("<scons_test_run>")
+ print(" <sys>")
sys_keys = ['byteorder', 'exec_prefix', 'executable', 'maxint', 'maxunicode', 'platform', 'prefix', 'version', 'version_info']
for k in sys_keys:
- print " <%s>%s</%s>" % (k, sys.__dict__[k], k)
- print " </sys>"
+ print(" <%s>%s</%s>" % (k, sys.__dict__[k], k))
+ print(" </sys>")
fmt = '%a %b %d %H:%M:%S %Y'
- print " <time>"
- print " <gmtime>%s</gmtime>" % time.strftime(fmt, time.gmtime())
- print " <localtime>%s</localtime>" % time.strftime(fmt, time.localtime())
- print " </time>"
+ print(" <time>")
+ print(" <gmtime>%s</gmtime>" % time.strftime(fmt, time.gmtime()))
+ print(" <localtime>%s</localtime>" % time.strftime(fmt, time.localtime()))
+ print(" </time>")
- print " <tempdir>%s</tempdir>" % tempdir
+ print(" <tempdir>%s</tempdir>" % tempdir)
def print_version_info(tag, module):
- print " <%s>" % tag
- print " <version>%s</version>" % module.__version__
- print " <build>%s</build>" % module.__build__
- print " <buildsys>%s</buildsys>" % module.__buildsys__
- print " <date>%s</date>" % module.__date__
- print " <developer>%s</developer>" % module.__developer__
- print " </%s>" % tag
-
- print " <scons>"
+ print(" <%s>" % tag)
+ print(" <version>%s</version>" % module.__version__)
+ print(" <build>%s</build>" % module.__build__)
+ print(" <buildsys>%s</buildsys>" % module.__buildsys__)
+ print(" <date>%s</date>" % module.__date__)
+ print(" <developer>%s</developer>" % module.__developer__)
+ print(" </%s>" % tag)
+
+ print(" <scons>")
print_version_info("script", scons)
print_version_info("engine", SCons)
- print " </scons>"
+ print(" </scons>")
environ_keys = [
'PATH',
@@ -213,32 +213,32 @@ if format == '--xml':
'USER',
]
- print " <environment>"
+ print(" <environment>")
for key in sorted(environ_keys):
value = os.environ.get(key)
if value:
- print " <variable>"
- print " <name>%s</name>" % key
- print " <value>%s</value>" % value
- print " </variable>"
- print " </environment>"
+ print(" <variable>")
+ print(" <name>%s</name>" % key)
+ print(" <value>%s</value>" % value)
+ print(" </variable>")
+ print(" </environment>")
command = '"%s" runtest.py -q -o - --xml %s' % (sys.executable, runtest_args)
#print command
os.system(command)
- print "</scons_test_run>"
+ print("</scons_test_run>")
else:
def print_version_info(tag, module):
- print "\t%s: v%s.%s, %s, by %s on %s" % (tag,
+ print("\t%s: v%s.%s, %s, by %s on %s" % (tag,
module.__version__,
module.__build__,
module.__date__,
module.__developer__,
- module.__buildsys__)
+ module.__buildsys__))
- print "SCons by Steven Knight et al.:"
+ print("SCons by Steven Knight et al.:")
print_version_info("script", scons)
print_version_info("engine", SCons)
diff --git a/bin/scons-unzip.py b/bin/scons-unzip.py
index d4ec4bff..75d3281d 100644
--- a/bin/scons-unzip.py
+++ b/bin/scons-unzip.py
@@ -32,7 +32,7 @@ for o, a in opts:
outdir = a
elif o == '-v' or o == '--verbose':
def printname(x):
- print x
+ print(x)
if len(args) != 1:
sys.stderr.write("scons-unzip.py: \n")
diff --git a/bin/scons_dev_master.py b/bin/scons_dev_master.py
index 3c41ac06..71034ad5 100644
--- a/bin/scons_dev_master.py
+++ b/bin/scons_dev_master.py
@@ -131,12 +131,12 @@ Usage: scons_dev_master.py [-hnqy] [--password PASSWORD] [--username USER]
try:
try:
opts, args = getopt.getopt(argv[1:], short_options, long_options)
- except getopt.error, msg:
+ except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ('-h', '--help'):
- print helpstr
+ print(helpstr)
sys.exit(0)
elif o in ('-n', '--no-exec'):
CommandRunner.execute = CommandRunner.do_not_execute
@@ -148,7 +148,7 @@ Usage: scons_dev_master.py [-hnqy] [--password PASSWORD] [--username USER]
username = a
elif o in ('-y', '--yes', '--assume-yes'):
yesflag = o
- except Usage, err:
+ except Usage as err:
sys.stderr.write(str(err.msg) + '\n')
sys.stderr.write('use -h to get help\n')
return 2
diff --git a/bin/svn-bisect.py b/bin/svn-bisect.py
index 77bda58f..f2623660 100755
--- a/bin/svn-bisect.py
+++ b/bin/svn-bisect.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- Python -*-
-from __future__ import division
+
import sys
from math import log, ceil
@@ -28,22 +28,22 @@ script = script_args[2:]
# print an error message and quit
def error(s):
- print >>sys.stderr, "******", s, "******"
+ print("******", s, "******", file=sys.stderr)
sys.exit(1)
# update to the specified version and run test
def testfail(revision):
"Return true if test fails"
- print "Updating to revision", revision
+ print("Updating to revision", revision)
if subprocess.call(["svn","up","-qr",str(revision)]) != 0:
m = "SVN did not update properly to revision %d"
raise RuntimeError(m % revision)
return subprocess.call(script,shell=False) != 0
# confirm that the endpoints are different
-print "****** Checking upper bracket", upper
+print("****** Checking upper bracket", upper)
upperfails = testfail(upper)
-print "****** Checking lower bracket", lower
+print("****** Checking lower bracket", lower)
lowerfails = testfail(lower)
if upperfails == lowerfails:
error("Upper and lower revisions must bracket the failure")
@@ -51,7 +51,7 @@ if upperfails == lowerfails:
# binary search for transition
msg = "****** max %d revisions to test (bug bracketed by [%d,%d])"
while upper-lower > 1:
- print msg % (ceil(log(upper-lower,2)), lower, upper)
+ print(msg % (ceil(log(upper-lower,2)), lower, upper))
mid = (lower + upper)//2
midfails = testfail(mid)
@@ -64,7 +64,7 @@ while upper-lower > 1:
# show which revision was first to fail
if upperfails != lowerfails: lower = upper
-print "The error was caused by revision", lower
+print("The error was caused by revision", lower)
# Local Variables:
# tab-width:4
diff --git a/bin/update-release-info.py b/bin/update-release-info.py
index 338bba91..f60c187d 100644
--- a/bin/update-release-info.py
+++ b/bin/update-release-info.py
@@ -73,14 +73,14 @@ if len(sys.argv) < 2:
else:
mode = sys.argv[1]
if mode not in ['develop', 'release', 'post']:
- print("""ERROR: `%s' as a parameter is invalid; it must be one of
-\tdevelop, release, or post. The default is develop.""" % mode)
+ print(("""ERROR: `%s' as a parameter is invalid; it must be one of
+\tdevelop, release, or post. The default is develop.""" % mode))
sys.exit(1)
# Get configuration information
config = dict()
-exec open('ReleaseConfig').read() in globals(), config
+exec(open('ReleaseConfig').read(), globals(), config)
try:
version_tuple = config['version_tuple']
@@ -90,9 +90,9 @@ except KeyError:
print('''ERROR: Config file must contain at least version_tuple,
\tunsupported_python_version, and deprecated_python_version.''')
sys.exit(1)
-if DEBUG: print 'version tuple', version_tuple
-if DEBUG: print 'unsupported Python version', unsupported_version
-if DEBUG: print 'deprecated Python version', deprecated_version
+if DEBUG: print('version tuple', version_tuple)
+if DEBUG: print('unsupported Python version', unsupported_version)
+if DEBUG: print('deprecated Python version', deprecated_version)
try:
release_date = config['release_date']
@@ -102,9 +102,9 @@ else:
if len(release_date) == 3:
release_date = release_date + time.localtime()[3:6]
if len(release_date) != 6:
- print '''ERROR: Invalid release date''', release_date
+ print('''ERROR: Invalid release date''', release_date)
sys.exit(1)
-if DEBUG: print 'release date', release_date
+if DEBUG: print('release date', release_date)
if mode == 'develop' and version_tuple[3] != 'alpha':
version_tuple == version_tuple[:3] + ('alpha', 0)
@@ -119,11 +119,11 @@ if len(version_tuple) > 3:
version_type = version_tuple[3]
else:
version_type = 'final'
-if DEBUG: print 'version string', version_string
+if DEBUG: print('version string', version_string)
if version_type not in ['alpha', 'beta', 'candidate', 'final']:
- print("""ERROR: `%s' is not a valid release type in version tuple;
-\tit must be one of alpha, beta, candidate, or final""" % version_type)
+ print(("""ERROR: `%s' is not a valid release type in version tuple;
+\tit must be one of alpha, beta, candidate, or final""" % version_type))
sys.exit(1)
try:
@@ -133,13 +133,13 @@ except KeyError:
month_year = 'MONTH YEAR'
else:
month_year = time.strftime('%B %Y', release_date + (0,0,0))
-if DEBUG: print 'month year', month_year
+if DEBUG: print('month year', month_year)
try:
copyright_years = config['copyright_years']
except KeyError:
copyright_years = ', '.join(map(str, list(range(2001, release_date[0] + 1))))
-if DEBUG: print 'copyright years', copyright_years
+if DEBUG: print('copyright years', copyright_years)
class UpdateFile(object):
"""
@@ -218,7 +218,7 @@ class UpdateFile(object):
XXX
'''
if self.file is not None and self.content != self.orig:
- print 'Updating ' + self.file + '...'
+ print('Updating ' + self.file + '...')
open(self.file, 'w').write(self.content)
if mode == 'post':
@@ -332,7 +332,7 @@ t.replace_assign('deprecated_python_version', str(deprecated_version))
# Update doc/user/main.{in,xml}
-docyears = ', '.join(map(str, iter(range(2004, release_date[0] + 1))))
+docyears = ', '.join(map(str, iter(list(range(2004, release_date[0] + 1)))))
t = UpdateFile(os.path.join('doc', 'user', 'main.in'))
if DEBUG: t.file = '/tmp/main.in'
## TODO debug these
diff --git a/bin/xmlagenda.py b/bin/xmlagenda.py
index b3cd5205..40f5ca14 100755
--- a/bin/xmlagenda.py
+++ b/bin/xmlagenda.py
@@ -89,7 +89,7 @@ for issue in issues:
writer.writerow(['','','','','','',''])
for member in team: writer.writerow(['','',member,'','','',''])
-print "Exported %d issues to editlist.csv. Ready to upload to Google."%len(issues)
+print("Exported %d issues to editlist.csv. Ready to upload to Google."%len(issues))
# Local Variables:
# tab-width:4
diff --git a/review.py b/review.py
index 23e4b10d..72e187e3 100644
--- a/review.py
+++ b/review.py
@@ -1,1805 +1,1806 @@
-#!/usr/bin/env python
-#
-# Copyright 2007 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tool for uploading diffs from a version control system to the codereview app.
-
-Usage summary: upload.py [options] [-- diff_options] [path...]
-
-Diff options are passed to the diff command of the underlying system.
-
-Supported version control systems:
- Git
- Mercurial
- Subversion
-
-It is important for Git/Mercurial users to specify a tree/node/branch to diff
-against by using the '--rev' option.
-"""
-# This code is derived from appcfg.py in the App Engine SDK (open source),
-# and from ASPN recipe #146306.
-
-import ConfigParser
-import cookielib
-import fnmatch
-import getpass
-import logging
-import mimetypes
-import optparse
-import os
-import re
-import socket
-import subprocess
-import sys
-import urllib
-import urllib2
-import urlparse
-
-# The md5 module was deprecated in Python 2.5.
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-try:
- import readline
-except ImportError:
- pass
-
-try:
- import keyring
-except ImportError:
- keyring = None
-
-# The logging verbosity:
-# 0: Errors only.
-# 1: Status messages.
-# 2: Info logs.
-# 3: Debug logs.
-verbosity = 1
-
-# The account type used for authentication.
-# This line could be changed by the review server (see handler for
-# upload.py).
-AUTH_ACCOUNT_TYPE = "GOOGLE"
-
-# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
-# changed by the review server (see handler for upload.py).
-DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
-
-# Max size of patch or base file.
-MAX_UPLOAD_SIZE = 900 * 1024
-
-# Constants for version control names. Used by GuessVCSName.
-VCS_GIT = "Git"
-VCS_MERCURIAL = "Mercurial"
-VCS_SUBVERSION = "Subversion"
-VCS_UNKNOWN = "Unknown"
-
-# whitelist for non-binary filetypes which do not start with "text/"
-# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
-TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
- 'application/xml', 'application/x-freemind',
- 'application/x-sh']
-
-VCS_ABBREVIATIONS = {
- VCS_MERCURIAL.lower(): VCS_MERCURIAL,
- "hg": VCS_MERCURIAL,
- VCS_SUBVERSION.lower(): VCS_SUBVERSION,
- "svn": VCS_SUBVERSION,
- VCS_GIT.lower(): VCS_GIT,
-}
-
-# The result of parsing Subversion's [auto-props] setting.
-svn_auto_props_map = None
-
-def GetEmail(prompt):
- """Prompts the user for their email address and returns it.
-
- The last used email address is saved to a file and offered up as a suggestion
- to the user. If the user presses enter without typing in anything the last
- used email address is used. If the user enters a new address, it is saved
- for next time we prompt.
-
- """
- last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
- last_email = ""
- if os.path.exists(last_email_file_name):
- try:
- last_email_file = open(last_email_file_name, "r")
- last_email = last_email_file.readline().strip("\n")
- last_email_file.close()
- prompt += " [%s]" % last_email
- except IOError, e:
- pass
- email = raw_input(prompt + ": ").strip()
- if email:
- try:
- last_email_file = open(last_email_file_name, "w")
- last_email_file.write(email)
- last_email_file.close()
- except IOError, e:
- pass
- else:
- email = last_email
- return email
-
-
-def StatusUpdate(msg):
- """Print a status message to stdout.
-
- If 'verbosity' is greater than 0, print the message.
-
- Args:
- msg: The string to print.
- """
- if verbosity > 0:
- print msg
-
-
-def ErrorExit(msg):
- """Print an error message to stderr and exit."""
- print >>sys.stderr, msg
- sys.exit(1)
-
-
-class ClientLoginError(urllib2.HTTPError):
- """Raised to indicate there was an error authenticating with ClientLogin."""
-
- def __init__(self, url, code, msg, headers, args):
- urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
- self.args = args
- self.reason = args["Error"]
- self.info = args.get("Info", None)
-
-
-class AbstractRpcServer(object):
- """Provides a common interface for a simple RPC server."""
-
- def __init__(self, host, auth_function, host_override=None, extra_headers={},
- save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
- """Creates a new HttpRpcServer.
-
- Args:
- host: The host to send requests to.
- auth_function: A function that takes no arguments and returns an
- (email, password) tuple when called. Will be called if authentication
- is required.
- host_override: The host header to send to the server (defaults to host).
- extra_headers: A dict of extra headers to append to every request.
- save_cookies: If True, save the authentication cookies to local disk.
- If False, use an in-memory cookiejar instead. Subclasses must
- implement this functionality. Defaults to False.
- account_type: Account type used for authentication. Defaults to
- AUTH_ACCOUNT_TYPE.
- """
- self.host = host
- if (not self.host.startswith("http://") and
- not self.host.startswith("https://")):
- self.host = "http://" + self.host
- self.host_override = host_override
- self.auth_function = auth_function
- self.authenticated = False
- self.extra_headers = extra_headers
- self.save_cookies = save_cookies
- self.account_type = account_type
- self.opener = self._GetOpener()
- if self.host_override:
- logging.info("Server: %s; Host: %s", self.host, self.host_override)
- else:
- logging.info("Server: %s", self.host)
-
- def _GetOpener(self):
- """Returns an OpenerDirector for making HTTP requests.
-
- Returns:
- A urllib2.OpenerDirector object.
- """
- raise NotImplementedError()
-
- def _CreateRequest(self, url, data=None):
- """Creates a new urllib request."""
- logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
- req = urllib2.Request(url, data=data)
- if self.host_override:
- req.add_header("Host", self.host_override)
- for key, value in self.extra_headers.iteritems():
- req.add_header(key, value)
- return req
-
- def _GetAuthToken(self, email, password):
- """Uses ClientLogin to authenticate the user, returning an auth token.
-
- Args:
- email: The user's email address
- password: The user's password
-
- Raises:
- ClientLoginError: If there was an error authenticating with ClientLogin.
- HTTPError: If there was some other form of HTTP error.
-
- Returns:
- The authentication token returned by ClientLogin.
- """
- account_type = self.account_type
- if self.host.endswith(".google.com"):
- # Needed for use inside Google.
- account_type = "HOSTED"
- req = self._CreateRequest(
- url="https://www.google.com/accounts/ClientLogin",
- data=urllib.urlencode({
- "Email": email,
- "Passwd": password,
- "service": "ah",
- "source": "rietveld-codereview-upload",
- "accountType": account_type,
- }),
- )
- try:
- response = self.opener.open(req)
- response_body = response.read()
- response_dict = dict(x.split("=")
- for x in response_body.split("\n") if x)
- return response_dict["Auth"]
- except urllib2.HTTPError, e:
- if e.code == 403:
- body = e.read()
- response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
- raise ClientLoginError(req.get_full_url(), e.code, e.msg,
- e.headers, response_dict)
- else:
- raise
-
- def _GetAuthCookie(self, auth_token):
- """Fetches authentication cookies for an authentication token.
-
- Args:
- auth_token: The authentication token returned by ClientLogin.
-
- Raises:
- HTTPError: If there was an error fetching the authentication cookies.
- """
- # This is a dummy value to allow us to identify when we're successful.
- continue_location = "http://localhost/"
- args = {"continue": continue_location, "auth": auth_token}
- req = self._CreateRequest("%s/_ah/login?%s" %
- (self.host, urllib.urlencode(args)))
- try:
- response = self.opener.open(req)
- except urllib2.HTTPError, e:
- response = e
- if (response.code != 302 or
- response.info()["location"] != continue_location):
- raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
- response.headers, response.fp)
- self.authenticated = True
-
- def _Authenticate(self):
- """Authenticates the user.
-
- The authentication process works as follows:
- 1) We get a username and password from the user
- 2) We use ClientLogin to obtain an AUTH token for the user
- (see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
- 3) We pass the auth token to /_ah/login on the server to obtain an
- authentication cookie. If login was successful, it tries to redirect
- us to the URL we provided.
-
- If we attempt to access the upload API without first obtaining an
- authentication cookie, it returns a 401 response (or a 302) and
- directs us to authenticate ourselves with ClientLogin.
- """
- for i in range(3):
- credentials = self.auth_function()
- try:
- auth_token = self._GetAuthToken(credentials[0], credentials[1])
- except ClientLoginError, e:
- print >>sys.stderr, ''
- if e.reason == "BadAuthentication":
- if e.info == "InvalidSecondFactor":
- print >>sys.stderr, (
- "Use an application-specific password instead "
- "of your regular account password.\n"
- "See http://www.google.com/"
- "support/accounts/bin/answer.py?answer=185833")
- else:
- print >>sys.stderr, "Invalid username or password."
- elif e.reason == "CaptchaRequired":
- print >>sys.stderr, (
- "Please go to\n"
- "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
- "and verify you are a human. Then try again.\n"
- "If you are using a Google Apps account the URL is:\n"
- "https://www.google.com/a/yourdomain.com/UnlockCaptcha")
- elif e.reason == "NotVerified":
- print >>sys.stderr, "Account not verified."
- elif e.reason == "TermsNotAgreed":
- print >>sys.stderr, "User has not agreed to TOS."
- elif e.reason == "AccountDeleted":
- print >>sys.stderr, "The user account has been deleted."
- elif e.reason == "AccountDisabled":
- print >>sys.stderr, "The user account has been disabled."
- break
- elif e.reason == "ServiceDisabled":
- print >>sys.stderr, ("The user's access to the service has been "
- "disabled.")
- elif e.reason == "ServiceUnavailable":
- print >>sys.stderr, "The service is not available; try again later."
- else:
- # Unknown error.
- raise
- print >>sys.stderr, ''
- continue
- self._GetAuthCookie(auth_token)
- return
-
- def Send(self, request_path, payload=None,
- content_type="application/octet-stream",
- timeout=None,
- extra_headers=None,
- **kwargs):
- """Sends an RPC and returns the response.
-
- Args:
- request_path: The path to send the request to, eg /api/appversion/create.
- payload: The body of the request, or None to send an empty request.
- content_type: The Content-Type header to use.
- timeout: timeout in seconds; default None i.e. no timeout.
- (Note: for large requests on OS X, the timeout doesn't work right.)
- extra_headers: Dict containing additional HTTP headers that should be
- included in the request (string header names mapped to their values),
- or None to not include any additional headers.
- kwargs: Any keyword arguments are converted into query string parameters.
-
- Returns:
- The response body, as a string.
- """
- # TODO: Don't require authentication. Let the server say
- # whether it is necessary.
- if not self.authenticated:
- self._Authenticate()
-
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- tries = 0
- while True:
- tries += 1
- args = dict(kwargs)
- url = "%s%s" % (self.host, request_path)
- if args:
- url += "?" + urllib.urlencode(args)
- req = self._CreateRequest(url=url, data=payload)
- req.add_header("Content-Type", content_type)
- if extra_headers:
- for header, value in extra_headers.items():
- req.add_header(header, value)
- try:
- f = self.opener.open(req)
- response = f.read()
- f.close()
- return response
- except urllib2.HTTPError, e:
- if tries > 3:
- raise
- elif e.code == 401 or e.code == 302:
- self._Authenticate()
-## elif e.code >= 500 and e.code < 600:
-## # Server Error - try again.
-## continue
- elif e.code == 301:
- # Handle permanent redirect manually.
- url = e.info()["location"]
- url_loc = urlparse.urlparse(url)
- self.host = '%s://%s' % (url_loc[0], url_loc[1])
- else:
- raise
- finally:
- socket.setdefaulttimeout(old_timeout)
-
-
-class HttpRpcServer(AbstractRpcServer):
- """Provides a simplified RPC-style interface for HTTP requests."""
-
- def _Authenticate(self):
- """Save the cookie jar after authentication."""
- super(HttpRpcServer, self)._Authenticate()
- if self.save_cookies:
- StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
- self.cookie_jar.save()
-
- def _GetOpener(self):
- """Returns an OpenerDirector that supports cookies and ignores redirects.
-
- Returns:
- A urllib2.OpenerDirector object.
- """
- opener = urllib2.OpenerDirector()
- opener.add_handler(urllib2.ProxyHandler())
- opener.add_handler(urllib2.UnknownHandler())
- opener.add_handler(urllib2.HTTPHandler())
- opener.add_handler(urllib2.HTTPDefaultErrorHandler())
- opener.add_handler(urllib2.HTTPSHandler())
- opener.add_handler(urllib2.HTTPErrorProcessor())
- if self.save_cookies:
- self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
- self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
- if os.path.exists(self.cookie_file):
- try:
- self.cookie_jar.load()
- self.authenticated = True
- StatusUpdate("Loaded authentication cookies from %s" %
- self.cookie_file)
- except (cookielib.LoadError, IOError):
- # Failed to load cookies - just ignore them.
- pass
- else:
- # Create an empty cookie file with mode 600
- fd = os.open(self.cookie_file, os.O_CREAT, 0600)
- os.close(fd)
- # Always chmod the cookie file
- os.chmod(self.cookie_file, 0600)
- else:
- # Don't save cookies across runs of update.py.
- self.cookie_jar = cookielib.CookieJar()
- opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
- return opener
-
-
-parser = optparse.OptionParser(
- usage="%prog [options] [-- diff_options] [path...]")
-parser.add_option("-y", "--assume_yes", action="store_true",
- dest="assume_yes", default=False,
- help="Assume that the answer to yes/no questions is 'yes'.")
-# Logging
-group = parser.add_option_group("Logging options")
-group.add_option("-q", "--quiet", action="store_const", const=0,
- dest="verbose", help="Print errors only.")
-group.add_option("-v", "--verbose", action="store_const", const=2,
- dest="verbose", default=1,
- help="Print info level logs.")
-group.add_option("--noisy", action="store_const", const=3,
- dest="verbose", help="Print all logs.")
-# Review server
-group = parser.add_option_group("Review server options")
-group.add_option("-s", "--server", action="store", dest="server",
- default=DEFAULT_REVIEW_SERVER,
- metavar="SERVER",
- help=("The server to upload to. The format is host[:port]. "
- "Defaults to '%default'."))
-group.add_option("-e", "--email", action="store", dest="email",
- metavar="EMAIL", default=None,
- help="The username to use. Will prompt if omitted.")
-group.add_option("-H", "--host", action="store", dest="host",
- metavar="HOST", default=None,
- help="Overrides the Host header sent with all RPCs.")
-group.add_option("--no_cookies", action="store_false",
- dest="save_cookies", default=True,
- help="Do not save authentication cookies to local disk.")
-group.add_option("--account_type", action="store", dest="account_type",
- metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
- choices=["GOOGLE", "HOSTED"],
- help=("Override the default account type "
- "(defaults to '%default', "
- "valid choices are 'GOOGLE' and 'HOSTED')."))
-# Issue
-group = parser.add_option_group("Issue options")
-group.add_option("-d", "--description", action="store", dest="description",
- metavar="DESCRIPTION", default=None,
- help="Optional description when creating an issue.")
-group.add_option("-f", "--description_file", action="store",
- dest="description_file", metavar="DESCRIPTION_FILE",
- default=None,
- help="Optional path of a file that contains "
- "the description when creating an issue.")
-group.add_option("-r", "--reviewers", action="store", dest="reviewers",
- metavar="REVIEWERS", default=None,
- help="Add reviewers (comma separated email addresses).")
-group.add_option("--cc", action="store", dest="cc",
- metavar="CC", default="dev@scons.tigris.org",
- help="Add CC (comma separated email addresses).")
-group.add_option("--private", action="store_true", dest="private",
- default=False,
- help="Make the issue restricted to reviewers and those CCed")
-# Upload options
-group = parser.add_option_group("Patch options")
-group.add_option("-m", "--message", action="store", dest="message",
- metavar="MESSAGE", default=None,
- help="A message to identify the patch. "
- "Will prompt if omitted.")
-group.add_option("-i", "--issue", type="int", action="store",
- metavar="ISSUE", default=None,
- help="Issue number to which to add. Defaults to new issue.")
-group.add_option("--base_url", action="store", dest="base_url", default=None,
- help="Base repository URL (listed as \"Base URL\" when "
- "viewing issue). If omitted, will be guessed automatically "
- "for SVN repos and left blank for others.")
-group.add_option("--download_base", action="store_true",
- dest="download_base", default=False,
- help="Base files will be downloaded by the server "
- "(side-by-side diffs may not work on files with CRs).")
-group.add_option("--rev", action="store", dest="revision",
- metavar="REV", default=None,
- help="Base revision/branch/tree to diff against. Use "
- "rev1:rev2 range to review already committed changeset.")
-group.add_option("--send_mail", action="store_true",
- dest="send_mail", default=True,
- help="Send notification email to reviewers.")
-group.add_option("--vcs", action="store", dest="vcs",
- metavar="VCS", default="svn",
- help=("Version control system (optional, usually upload.py "
- "already guesses the right VCS)."))
-group.add_option("--emulate_svn_auto_props", action="store_true",
- dest="emulate_svn_auto_props", default=False,
- help=("Emulate Subversion's auto properties feature."))
-
-
-def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
- account_type=AUTH_ACCOUNT_TYPE):
- """Returns an instance of an AbstractRpcServer.
-
- Args:
- server: String containing the review server URL.
- email: String containing user's email address.
- host_override: If not None, string containing an alternate hostname to use
- in the host header.
- save_cookies: Whether authentication cookies should be saved to disk.
- account_type: Account type for authentication, either 'GOOGLE'
- or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
-
- Returns:
- A new AbstractRpcServer, on which RPC calls can be made.
- """
-
- rpc_server_class = HttpRpcServer
-
- # If this is the dev_appserver, use fake authentication.
- host = (host_override or server).lower()
- if re.match(r'(http://)?localhost([:/]|$)', host):
- if email is None:
- email = "test@example.com"
- logging.info("Using debug user %s. Override with --email" % email)
- server = rpc_server_class(
- server,
- lambda: (email, "password"),
- host_override=host_override,
- extra_headers={"Cookie":
- 'dev_appserver_login="%s:False"' % email},
- save_cookies=save_cookies,
- account_type=account_type)
- # Don't try to talk to ClientLogin.
- server.authenticated = True
- return server
-
- def GetUserCredentials():
- """Prompts the user for a username and password."""
- # Create a local alias to the email variable to avoid Python's crazy
- # scoping rules.
- local_email = email
- if local_email is None:
- local_email = GetEmail("Email (login for uploading to %s)" % server)
- password = None
- if keyring:
- password = keyring.get_password(host, local_email)
- if password is not None:
- print "Using password from system keyring."
- else:
- password = getpass.getpass("Password for %s: " % local_email)
- if keyring:
- answer = raw_input("Store password in system keyring?(y/N) ").strip()
- if answer == "y":
- keyring.set_password(host, local_email, password)
- return (local_email, password)
-
- return rpc_server_class(server,
- GetUserCredentials,
- host_override=host_override,
- save_cookies=save_cookies)
-
-
-def EncodeMultipartFormData(fields, files):
- """Encode form fields for multipart/form-data.
-
- Args:
- fields: A sequence of (name, value) elements for regular form fields.
- files: A sequence of (name, filename, value) elements for data to be
- uploaded as files.
- Returns:
- (content_type, body) ready for httplib.HTTP instance.
-
- Source:
- http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
- """
- BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
- CRLF = '\r\n'
- lines = []
- for (key, value) in fields:
- lines.append('--' + BOUNDARY)
- lines.append('Content-Disposition: form-data; name="%s"' % key)
- lines.append('')
- if isinstance(value, unicode):
- value = value.encode('utf-8')
- lines.append(value)
- for (key, filename, value) in files:
- lines.append('--' + BOUNDARY)
- lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
- (key, filename))
- lines.append('Content-Type: %s' % GetContentType(filename))
- lines.append('')
- if isinstance(value, unicode):
- value = value.encode('utf-8')
- lines.append(value)
- lines.append('--' + BOUNDARY + '--')
- lines.append('')
- body = CRLF.join(lines)
- content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
- return content_type, body
-
-
-def GetContentType(filename):
- """Helper to guess the content-type from the filename."""
- return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
-
-
-# Use a shell for subcommands on Windows to get a PATH search.
-use_shell = sys.platform.startswith("win")
-
-def RunShellWithReturnCodeAndStderr(command, print_output=False,
- universal_newlines=True,
- env=os.environ):
- """Executes a command and returns the output from stdout, stderr and the return code.
-
- Args:
- command: Command to execute.
- print_output: If True, the output is printed to stdout.
- If False, both stdout and stderr are ignored.
- universal_newlines: Use universal_newlines flag (default: True).
-
- Returns:
- Tuple (stdout, stderr, return code)
- """
- logging.info("Running %s", command)
- env = env.copy()
- env['LC_MESSAGES'] = 'C'
- p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- shell=use_shell, universal_newlines=universal_newlines,
- env=env)
- if print_output:
- output_array = []
- while True:
- line = p.stdout.readline()
- if not line:
- break
- print line.strip("\n")
- output_array.append(line)
- output = "".join(output_array)
- else:
- output = p.stdout.read()
- p.wait()
- errout = p.stderr.read()
- if print_output and errout:
- print >>sys.stderr, errout
- p.stdout.close()
- p.stderr.close()
- return output, errout, p.returncode
-
-def RunShellWithReturnCode(command, print_output=False,
- universal_newlines=True,
- env=os.environ):
- """Executes a command and returns the output from stdout and the return code."""
- out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
- universal_newlines, env)
- return out, retcode
-
-def RunShell(command, silent_ok=False, universal_newlines=True,
- print_output=False, env=os.environ):
- data, retcode = RunShellWithReturnCode(command, print_output,
- universal_newlines, env)
- if retcode:
- ErrorExit("Got error status from %s:\n%s" % (command, data))
- if not silent_ok and not data:
- ErrorExit("No output from %s" % command)
- return data
-
-
-class VersionControlSystem(object):
- """Abstract base class providing an interface to the VCS."""
-
- def __init__(self, options):
- """Constructor.
-
- Args:
- options: Command line options.
- """
- self.options = options
-
- def PostProcessDiff(self, diff):
- """Return the diff with any special post processing this VCS needs, e.g.
- to include an svn-style "Index:"."""
- return diff
-
- def GenerateDiff(self, args):
- """Return the current diff as a string.
-
- Args:
- args: Extra arguments to pass to the diff command.
- """
- raise NotImplementedError(
- "abstract method -- subclass %s must override" % self.__class__)
-
- def GetUnknownFiles(self):
- """Return a list of files unknown to the VCS."""
- raise NotImplementedError(
- "abstract method -- subclass %s must override" % self.__class__)
-
- def CheckForUnknownFiles(self):
- """Show an "are you sure?" prompt if there are unknown files."""
- unknown_files = self.GetUnknownFiles()
- if unknown_files:
- print "The following files are not added to version control:"
- for line in unknown_files:
- print line
- prompt = "Are you sure to continue?(y/N) "
- answer = raw_input(prompt).strip()
- if answer != "y":
- ErrorExit("User aborted")
-
- def GetBaseFile(self, filename):
- """Get the content of the upstream version of a file.
-
- Returns:
- A tuple (base_content, new_content, is_binary, status)
- base_content: The contents of the base file.
- new_content: For text files, this is empty. For binary files, this is
- the contents of the new file, since the diff output won't contain
- information to reconstruct the current file.
- is_binary: True iff the file is binary.
- status: The status of the file.
- """
-
- raise NotImplementedError(
- "abstract method -- subclass %s must override" % self.__class__)
-
-
- def GetBaseFiles(self, diff):
- """Helper that calls GetBase file for each file in the patch.
-
- Returns:
- A dictionary that maps from filename to GetBaseFile's tuple. Filenames
- are retrieved based on lines that start with "Index:" or
- "Property changes on:".
- """
- files = {}
- for line in diff.splitlines(True):
- if line.startswith('Index:') or line.startswith('Property changes on:'):
- unused, filename = line.split(':', 1)
- # On Windows if a file has property changes its filename uses '\'
- # instead of '/'.
- filename = filename.strip().replace('\\', '/')
- files[filename] = self.GetBaseFile(filename)
- return files
-
-
- def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
- files):
- """Uploads the base files (and if necessary, the current ones as well)."""
-
- def UploadFile(filename, file_id, content, is_binary, status, is_base):
- """Uploads a file to the server."""
- file_too_large = False
- if is_base:
- type = "base"
- else:
- type = "current"
- if len(content) > MAX_UPLOAD_SIZE:
- print ("Not uploading the %s file for %s because it's too large." %
- (type, filename))
- file_too_large = True
- content = ""
- checksum = md5(content).hexdigest()
- if options.verbose > 0 and not file_too_large:
- print "Uploading %s file for %s" % (type, filename)
- url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
- form_fields = [("filename", filename),
- ("status", status),
- ("checksum", checksum),
- ("is_binary", str(is_binary)),
- ("is_current", str(not is_base)),
- ]
- if file_too_large:
- form_fields.append(("file_too_large", "1"))
- if options.email:
- form_fields.append(("user", options.email))
- ctype, body = EncodeMultipartFormData(form_fields,
- [("data", filename, content)])
- response_body = rpc_server.Send(url, body,
- content_type=ctype)
- if not response_body.startswith("OK"):
- StatusUpdate(" --> %s" % response_body)
- sys.exit(1)
-
- patches = dict()
- [patches.setdefault(v, k) for k, v in patch_list]
- for filename in patches.keys():
- base_content, new_content, is_binary, status = files[filename]
- file_id_str = patches.get(filename)
- if file_id_str.find("nobase") != -1:
- base_content = None
- file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
- file_id = int(file_id_str)
- if base_content != None:
- UploadFile(filename, file_id, base_content, is_binary, status, True)
- if new_content != None:
- UploadFile(filename, file_id, new_content, is_binary, status, False)
-
- def IsImage(self, filename):
- """Returns true if the filename has an image extension."""
- mimetype = mimetypes.guess_type(filename)[0]
- if not mimetype:
- return False
- return mimetype.startswith("image/")
-
- def IsBinary(self, filename):
- """Returns true if the guessed mimetyped isnt't in text group."""
- mimetype = mimetypes.guess_type(filename)[0]
- if not mimetype:
- return False # e.g. README, "real" binaries usually have an extension
- # special case for text files which don't start with text/
- if mimetype in TEXT_MIMETYPES:
- return False
- return not mimetype.startswith("text/")
-
-
-class SubversionVCS(VersionControlSystem):
- """Implementation of the VersionControlSystem interface for Subversion."""
-
- def __init__(self, options):
- super(SubversionVCS, self).__init__(options)
- if self.options.revision:
- match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
- if not match:
- ErrorExit("Invalid Subversion revision %s." % self.options.revision)
- self.rev_start = match.group(1)
- self.rev_end = match.group(3)
- else:
- self.rev_start = self.rev_end = None
- # Cache output from "svn list -r REVNO dirname".
- # Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
- self.svnls_cache = {}
- # Base URL is required to fetch files deleted in an older revision.
- # Result is cached to not guess it over and over again in GetBaseFile().
- required = self.options.download_base or self.options.revision is not None
- self.svn_base = self._GuessBase(required)
-
- def GuessBase(self, required):
- """Wrapper for _GuessBase."""
- return self.svn_base
-
- def _GuessBase(self, required):
- """Returns base URL for current diff.
-
- Args:
- required: If true, exits if the url can't be guessed, otherwise None is
- returned.
- """
- info = RunShell(["svn", "info"])
- for line in info.splitlines():
- if line.startswith("URL: "):
- url = line.split()[1]
- scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
- guess = ""
- if netloc == "svn.python.org" and scheme == "svn+ssh":
- path = "projects" + path
- scheme = "http"
- guess = "Python "
- elif netloc.endswith(".googlecode.com"):
- scheme = "http"
- guess = "Google Code "
- path = path + "/"
- base = urlparse.urlunparse((scheme, netloc, path, params,
- query, fragment))
- logging.info("Guessed %sbase = %s", guess, base)
- return base
- if required:
- ErrorExit("Can't find URL in output from svn info")
- return None
-
- def GenerateDiff(self, args):
- cmd = ["svn", "diff"]
- if self.options.revision:
- cmd += ["-r", self.options.revision]
- cmd.extend(args)
- data = RunShell(cmd)
- count = 0
- for line in data.splitlines():
- if line.startswith("Index:") or line.startswith("Property changes on:"):
- count += 1
- logging.info(line)
- if not count:
- ErrorExit("No valid patches found in output from svn diff")
- return data
-
- def _CollapseKeywords(self, content, keyword_str):
- """Collapses SVN keywords."""
- # svn cat translates keywords but svn diff doesn't. As a result of this
- # behavior patching.PatchChunks() fails with a chunk mismatch error.
- # This part was originally written by the Review Board development team
- # who had the same problem (http://reviews.review-board.org/r/276/).
- # Mapping of keywords to known aliases
- svn_keywords = {
- # Standard keywords
- 'Date': ['Date', 'LastChangedDate'],
- 'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
- 'Author': ['Author', 'LastChangedBy'],
- 'HeadURL': ['HeadURL', 'URL'],
- 'Id': ['Id'],
-
- # Aliases
- 'LastChangedDate': ['LastChangedDate', 'Date'],
- 'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
- 'LastChangedBy': ['LastChangedBy', 'Author'],
- 'URL': ['URL', 'HeadURL'],
- }
-
- def repl(m):
- if m.group(2):
- return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
- return "$%s$" % m.group(1)
- keywords = [keyword
- for name in keyword_str.split(" ")
- for keyword in svn_keywords.get(name, [])]
- return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
-
- def GetUnknownFiles(self):
- status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
- unknown_files = []
- for line in status.split("\n"):
- if line and line[0] == "?":
- unknown_files.append(line)
- return unknown_files
-
- def ReadFile(self, filename):
- """Returns the contents of a file."""
- file = open(filename, 'rb')
- result = ""
- try:
- result = file.read()
- finally:
- file.close()
- return result
-
- def GetStatus(self, filename):
- """Returns the status of a file."""
- if not self.options.revision:
- status = RunShell(["svn", "status", "--ignore-externals", filename])
- if not status:
- ErrorExit("svn status returned no output for %s" % filename)
- status_lines = status.splitlines()
- # If file is in a cl, the output will begin with
- # "\n--- Changelist 'cl_name':\n". See
- # http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
- if (len(status_lines) == 3 and
- not status_lines[0] and
- status_lines[1].startswith("--- Changelist")):
- status = status_lines[2]
- else:
- status = status_lines[0]
- # If we have a revision to diff against we need to run "svn list"
- # for the old and the new revision and compare the results to get
- # the correct status for a file.
- else:
- dirname, relfilename = os.path.split(filename)
- if dirname not in self.svnls_cache:
- cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
- out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
- if returncode:
- # Directory might not yet exist at start revison
- # svn: Unable to find repository location for 'abc' in revision nnn
- if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
- old_files = ()
- else:
- ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
- else:
- old_files = out.splitlines()
- args = ["svn", "list"]
- if self.rev_end:
- args += ["-r", self.rev_end]
- cmd = args + [dirname or "."]
- out, returncode = RunShellWithReturnCode(cmd)
- if returncode:
- ErrorExit("Failed to run command %s" % cmd)
- self.svnls_cache[dirname] = (old_files, out.splitlines())
- old_files, new_files = self.svnls_cache[dirname]
- if relfilename in old_files and relfilename not in new_files:
- status = "D "
- elif relfilename in old_files and relfilename in new_files:
- status = "M "
- else:
- status = "A "
- return status
-
- def GetBaseFile(self, filename):
- status = self.GetStatus(filename)
- base_content = None
- new_content = None
-
- # If a file is copied its status will be "A +", which signifies
- # "addition-with-history". See "svn st" for more information. We need to
- # upload the original file or else diff parsing will fail if the file was
- # edited.
- if status[0] == "A" and status[3] != "+":
- # We'll need to upload the new content if we're adding a binary file
- # since diff's output won't contain it.
- mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
- silent_ok=True)
- base_content = ""
- is_binary = bool(mimetype) and not mimetype.startswith("text/")
- if is_binary and self.IsImage(filename):
- new_content = self.ReadFile(filename)
- elif (status[0] in ("M", "D", "R") or
- (status[0] == "A" and status[3] == "+") or # Copied file.
- (status[0] == " " and status[1] == "M")): # Property change.
- args = []
- if self.options.revision:
- url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
- else:
- # Don't change filename, it's needed later.
- url = filename
- args += ["-r", "BASE"]
- cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
- mimetype, returncode = RunShellWithReturnCode(cmd)
- if returncode:
- # File does not exist in the requested revision.
- # Reset mimetype, it contains an error message.
- mimetype = ""
- else:
- mimetype = mimetype.strip()
- get_base = False
- is_binary = (bool(mimetype) and
- not mimetype.startswith("text/") and
- not mimetype in TEXT_MIMETYPES)
- if status[0] == " ":
- # Empty base content just to force an upload.
- base_content = ""
- elif is_binary:
- if self.IsImage(filename):
- get_base = True
- if status[0] == "M":
- if not self.rev_end:
- new_content = self.ReadFile(filename)
- else:
- url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
- new_content = RunShell(["svn", "cat", url],
- universal_newlines=True, silent_ok=True)
- else:
- base_content = ""
- else:
- get_base = True
-
- if get_base:
- if is_binary:
- universal_newlines = False
- else:
- universal_newlines = True
- if self.rev_start:
- # "svn cat -r REV delete_file.txt" doesn't work. cat requires
- # the full URL with "@REV" appended instead of using "-r" option.
- url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
- base_content = RunShell(["svn", "cat", url],
- universal_newlines=universal_newlines,
- silent_ok=True)
- else:
- base_content, ret_code = RunShellWithReturnCode(
- ["svn", "cat", filename], universal_newlines=universal_newlines)
- if ret_code and status[0] == "R":
- # It's a replaced file without local history (see issue208).
- # The base file needs to be fetched from the server.
- url = "%s/%s" % (self.svn_base, filename)
- base_content = RunShell(["svn", "cat", url],
- universal_newlines=universal_newlines,
- silent_ok=True)
- elif ret_code:
- ErrorExit("Got error status from 'svn cat %s'" % filename)
- if not is_binary:
- args = []
- if self.rev_start:
- url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
- else:
- url = filename
- args += ["-r", "BASE"]
- cmd = ["svn"] + args + ["propget", "svn:keywords", url]
- keywords, returncode = RunShellWithReturnCode(cmd)
- if keywords and not returncode:
- base_content = self._CollapseKeywords(base_content, keywords)
- else:
- StatusUpdate("svn status returned unexpected output: %s" % status)
- sys.exit(1)
- return base_content, new_content, is_binary, status[0:5]
-
-
-class GitVCS(VersionControlSystem):
- """Implementation of the VersionControlSystem interface for Git."""
-
- def __init__(self, options):
- super(GitVCS, self).__init__(options)
- # Map of filename -> (hash before, hash after) of base file.
- # Hashes for "no such file" are represented as None.
- self.hashes = {}
- # Map of new filename -> old filename for renames.
- self.renames = {}
-
- def PostProcessDiff(self, gitdiff):
- """Converts the diff output to include an svn-style "Index:" line as well
- as record the hashes of the files, so we can upload them along with our
- diff."""
- # Special used by git to indicate "no such content".
- NULL_HASH = "0"*40
-
- def IsFileNew(filename):
- return filename in self.hashes and self.hashes[filename][0] is None
-
- def AddSubversionPropertyChange(filename):
- """Add svn's property change information into the patch if given file is
- new file.
-
- We use Subversion's auto-props setting to retrieve its property.
- See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
- Subversion's [auto-props] setting.
- """
- if self.options.emulate_svn_auto_props and IsFileNew(filename):
- svnprops = GetSubversionPropertyChanges(filename)
- if svnprops:
- svndiff.append("\n" + svnprops + "\n")
-
- svndiff = []
- filecount = 0
- filename = None
- for line in gitdiff.splitlines():
- match = re.match(r"diff --git a/(.*) b/(.*)$", line)
- if match:
- # Add auto property here for previously seen file.
- if filename is not None:
- AddSubversionPropertyChange(filename)
- filecount += 1
- # Intentionally use the "after" filename so we can show renames.
- filename = match.group(2)
- svndiff.append("Index: %s\n" % filename)
- if match.group(1) != match.group(2):
- self.renames[match.group(2)] = match.group(1)
- else:
- # The "index" line in a git diff looks like this (long hashes elided):
- # index 82c0d44..b2cee3f 100755
- # We want to save the left hash, as that identifies the base file.
- match = re.match(r"index (\w+)\.\.(\w+)", line)
- if match:
- before, after = (match.group(1), match.group(2))
- if before == NULL_HASH:
- before = None
- if after == NULL_HASH:
- after = None
- self.hashes[filename] = (before, after)
- svndiff.append(line + "\n")
- if not filecount:
- ErrorExit("No valid patches found in output from git diff")
- # Add auto property for the last seen file.
- assert filename is not None
- AddSubversionPropertyChange(filename)
- return "".join(svndiff)
-
- def GenerateDiff(self, extra_args):
- extra_args = extra_args[:]
- if self.options.revision:
- if ":" in self.options.revision:
- extra_args = self.options.revision.split(":", 1) + extra_args
- else:
- extra_args = [self.options.revision] + extra_args
-
- # --no-ext-diff is broken in some versions of Git, so try to work around
- # this by overriding the environment (but there is still a problem if the
- # git config key "diff.external" is used).
- env = os.environ.copy()
- if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
- return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
- + extra_args, env=env)
-
- def GetUnknownFiles(self):
- status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
- silent_ok=True)
- return status.splitlines()
-
- def GetFileContent(self, file_hash, is_binary):
- """Returns the content of a file identified by its git hash."""
- data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
- universal_newlines=not is_binary)
- if retcode:
- ErrorExit("Got error status from 'git show %s'" % file_hash)
- return data
-
- def GetBaseFile(self, filename):
- hash_before, hash_after = self.hashes.get(filename, (None,None))
- base_content = None
- new_content = None
- is_binary = self.IsBinary(filename)
- status = None
-
- if filename in self.renames:
- status = "A +" # Match svn attribute name for renames.
- if filename not in self.hashes:
- # If a rename doesn't change the content, we never get a hash.
- base_content = RunShell(["git", "show", "HEAD:" + filename])
- elif not hash_before:
- status = "A"
- base_content = ""
- elif not hash_after:
- status = "D"
- else:
- status = "M"
-
- is_image = self.IsImage(filename)
-
- # Grab the before/after content if we need it.
- # We should include file contents if it's text or it's an image.
- if not is_binary or is_image:
- # Grab the base content if we don't have it already.
- if base_content is None and hash_before:
- base_content = self.GetFileContent(hash_before, is_binary)
- # Only include the "after" file if it's an image; otherwise it
- # it is reconstructed from the diff.
- if is_image and hash_after:
- new_content = self.GetFileContent(hash_after, is_binary)
-
- return (base_content, new_content, is_binary, status)
-
-
-class MercurialVCS(VersionControlSystem):
- """Implementation of the VersionControlSystem interface for Mercurial."""
-
- def __init__(self, options, repo_dir):
- super(MercurialVCS, self).__init__(options)
- # Absolute path to repository (we can be in a subdir)
- self.repo_dir = os.path.normpath(repo_dir)
- # Compute the subdir
- cwd = os.path.normpath(os.getcwd())
- assert cwd.startswith(self.repo_dir)
- self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
- if self.options.revision:
- self.base_rev = self.options.revision
- else:
- self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
-
- def _GetRelPath(self, filename):
- """Get relative path of a file according to the current directory,
- given its logical path in the repo."""
- assert filename.startswith(self.subdir), (filename, self.subdir)
- return filename[len(self.subdir):].lstrip(r"\/")
-
- def GenerateDiff(self, extra_args):
- cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
- data = RunShell(cmd, silent_ok=True)
- svndiff = []
- filecount = 0
- for line in data.splitlines():
- m = re.match("diff --git a/(\S+) b/(\S+)", line)
- if m:
- # Modify line to make it look like as it comes from svn diff.
- # With this modification no changes on the server side are required
- # to make upload.py work with Mercurial repos.
- # NOTE: for proper handling of moved/copied files, we have to use
- # the second filename.
- filename = m.group(2)
- svndiff.append("Index: %s" % filename)
- svndiff.append("=" * 67)
- filecount += 1
- logging.info(line)
- else:
- svndiff.append(line)
- if not filecount:
- ErrorExit("No valid patches found in output from hg diff")
- return "\n".join(svndiff) + "\n"
-
- def GetUnknownFiles(self):
- """Return a list of files unknown to the VCS."""
- args = []
- status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
- silent_ok=True)
- unknown_files = []
- for line in status.splitlines():
- st, fn = line.split(" ", 1)
- if st == "?":
- unknown_files.append(fn)
- return unknown_files
-
- def GetBaseFile(self, filename):
- # "hg status" and "hg cat" both take a path relative to the current subdir
- # rather than to the repo root, but "hg diff" has given us the full path
- # to the repo root.
- base_content = ""
- new_content = None
- is_binary = False
- oldrelpath = relpath = self._GetRelPath(filename)
- # "hg status -C" returns two lines for moved/copied files, one otherwise
- out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
- out = out.splitlines()
- # HACK: strip error message about missing file/directory if it isn't in
- # the working copy
- if out[0].startswith('%s: ' % relpath):
- out = out[1:]
- status, _ = out[0].split(' ', 1)
- if len(out) > 1 and status == "A":
- # Moved/copied => considered as modified, use old filename to
- # retrieve base contents
- oldrelpath = out[1].strip()
- status = "M"
- if ":" in self.base_rev:
- base_rev = self.base_rev.split(":", 1)[0]
- else:
- base_rev = self.base_rev
- if status != "A":
- base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
- silent_ok=True)
- is_binary = "\0" in base_content # Mercurial's heuristic
- if status != "R":
- new_content = open(relpath, "rb").read()
- is_binary = is_binary or "\0" in new_content
- if is_binary and base_content:
- # Fetch again without converting newlines
- base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
- silent_ok=True, universal_newlines=False)
- if not is_binary or not self.IsImage(relpath):
- new_content = None
- return base_content, new_content, is_binary, status
-
-
-# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
-def SplitPatch(data):
- """Splits a patch into separate pieces for each file.
-
- Args:
- data: A string containing the output of svn diff.
-
- Returns:
- A list of 2-tuple (filename, text) where text is the svn diff output
- pertaining to filename.
- """
- patches = []
- filename = None
- diff = []
- for line in data.splitlines(True):
- new_filename = None
- if line.startswith('Index:'):
- unused, new_filename = line.split(':', 1)
- new_filename = new_filename.strip()
- elif line.startswith('Property changes on:'):
- unused, temp_filename = line.split(':', 1)
- # When a file is modified, paths use '/' between directories, however
- # when a property is modified '\' is used on Windows. Make them the same
- # otherwise the file shows up twice.
- temp_filename = temp_filename.strip().replace('\\', '/')
- if temp_filename != filename:
- # File has property changes but no modifications, create a new diff.
- new_filename = temp_filename
- if new_filename:
- if filename and diff:
- patches.append((filename, ''.join(diff)))
- filename = new_filename
- diff = [line]
- continue
- if diff is not None:
- diff.append(line)
- if filename and diff:
- patches.append((filename, ''.join(diff)))
- return patches
-
-
-def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
- """Uploads a separate patch for each file in the diff output.
-
- Returns a list of [patch_key, filename] for each file.
- """
- patches = SplitPatch(data)
- rv = []
- for patch in patches:
- if len(patch[1]) > MAX_UPLOAD_SIZE:
- print ("Not uploading the patch for " + patch[0] +
- " because the file is too large.")
- continue
- form_fields = [("filename", patch[0])]
- if not options.download_base:
- form_fields.append(("content_upload", "1"))
- files = [("data", "data.diff", patch[1])]
- ctype, body = EncodeMultipartFormData(form_fields, files)
- url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
- print "Uploading patch for " + patch[0]
- response_body = rpc_server.Send(url, body, content_type=ctype)
- lines = response_body.splitlines()
- if not lines or lines[0] != "OK":
- StatusUpdate(" --> %s" % response_body)
- sys.exit(1)
- rv.append([lines[1], patch[0]])
- return rv
-
-
-def GuessVCSName():
- """Helper to guess the version control system.
-
- This examines the current directory, guesses which VersionControlSystem
- we're using, and returns an string indicating which VCS is detected.
-
- Returns:
- A pair (vcs, output). vcs is a string indicating which VCS was detected
- and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
- output is a string containing any interesting output from the vcs
- detection routine, or None if there is nothing interesting.
- """
- def RunDetectCommand(vcs_type, command):
- """Helper to detect VCS by executing command.
-
- Returns:
- A pair (vcs, output) or None. Throws exception on error.
- """
- try:
- out, returncode = RunShellWithReturnCode(command)
- if returncode == 0:
- return (vcs_type, out.strip())
- except OSError, (errcode, message):
- if errcode != errno.ENOENT: # command not found code
- raise
-
- # Mercurial has a command to get the base directory of a repository
- # Try running it, but don't die if we don't have hg installed.
- # NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
- res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
- if res != None:
- return res
-
- # Subversion has a .svn in all working directories.
- if os.path.isdir('.svn'):
- logging.info("Guessed VCS = Subversion")
- return (VCS_SUBVERSION, None)
-
- # Git has a command to test if you're in a git tree.
- # Try running it, but don't die if we don't have git installed.
- res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
- "--is-inside-work-tree"])
- if res != None:
- return res
-
- return (VCS_UNKNOWN, None)
-
-
-def GuessVCS(options):
- """Helper to guess the version control system.
-
- This verifies any user-specified VersionControlSystem (by command line
- or environment variable). If the user didn't specify one, this examines
- the current directory, guesses which VersionControlSystem we're using,
- and returns an instance of the appropriate class. Exit with an error
- if we can't figure it out.
-
- Returns:
- A VersionControlSystem instance. Exits if the VCS can't be guessed.
- """
- vcs = options.vcs
- if not vcs:
- vcs = os.environ.get("CODEREVIEW_VCS")
- if vcs:
- v = VCS_ABBREVIATIONS.get(vcs.lower())
- if v is None:
- ErrorExit("Unknown version control system %r specified." % vcs)
- (vcs, extra_output) = (v, None)
- else:
- (vcs, extra_output) = GuessVCSName()
-
- if vcs == VCS_MERCURIAL:
- if extra_output is None:
- extra_output = RunShell(["hg", "root"]).strip()
- return MercurialVCS(options, extra_output)
- elif vcs == VCS_SUBVERSION:
- return SubversionVCS(options)
- elif vcs == VCS_GIT:
- return GitVCS(options)
-
- ErrorExit(("Could not guess version control system. "
- "Are you in a working copy directory?"))
-
-
-def CheckReviewer(reviewer):
- """Validate a reviewer -- either a nickname or an email addres.
-
- Args:
- reviewer: A nickname or an email address.
-
- Calls ErrorExit() if it is an invalid email address.
- """
- if "@" not in reviewer:
- return # Assume nickname
- parts = reviewer.split("@")
- if len(parts) > 2:
- ErrorExit("Invalid email address: %r" % reviewer)
- assert len(parts) == 2
- if "." not in parts[1]:
- ErrorExit("Invalid email address: %r" % reviewer)
-
-
-def LoadSubversionAutoProperties():
- """Returns the content of [auto-props] section of Subversion's config file as
- a dictionary.
-
- Returns:
- A dictionary whose key-value pair corresponds the [auto-props] section's
- key-value pair.
- In following cases, returns empty dictionary:
- - config file doesn't exist, or
- - 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
- """
- if os.name == 'nt':
- subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
- else:
- subversion_config = os.path.expanduser("~/.subversion/config")
- if not os.path.exists(subversion_config):
- return {}
- config = ConfigParser.ConfigParser()
- config.read(subversion_config)
- if (config.has_section("miscellany") and
- config.has_option("miscellany", "enable-auto-props") and
- config.getboolean("miscellany", "enable-auto-props") and
- config.has_section("auto-props")):
- props = {}
- for file_pattern in config.options("auto-props"):
- props[file_pattern] = ParseSubversionPropertyValues(
- config.get("auto-props", file_pattern))
- return props
- else:
- return {}
-
-def ParseSubversionPropertyValues(props):
- """Parse the given property value which comes from [auto-props] section and
- returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
-
- See the following doctest for example.
-
- >>> ParseSubversionPropertyValues('svn:eol-style=LF')
- [('svn:eol-style', 'LF')]
- >>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
- [('svn:mime-type', 'image/jpeg')]
- >>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
- [('svn:eol-style', 'LF'), ('svn:executable', '*')]
- """
- key_value_pairs = []
- for prop in props.split(";"):
- key_value = prop.split("=")
- assert len(key_value) <= 2
- if len(key_value) == 1:
- # If value is not given, use '*' as a Subversion's convention.
- key_value_pairs.append((key_value[0], "*"))
- else:
- key_value_pairs.append((key_value[0], key_value[1]))
- return key_value_pairs
-
-
-def GetSubversionPropertyChanges(filename):
- """Return a Subversion's 'Property changes on ...' string, which is used in
- the patch file.
-
- Args:
- filename: filename whose property might be set by [auto-props] config.
-
- Returns:
- A string like 'Property changes on |filename| ...' if given |filename|
- matches any entries in [auto-props] section. None, otherwise.
- """
- global svn_auto_props_map
- if svn_auto_props_map is None:
- svn_auto_props_map = LoadSubversionAutoProperties()
-
- all_props = []
- for file_pattern, props in svn_auto_props_map.items():
- if fnmatch.fnmatch(filename, file_pattern):
- all_props.extend(props)
- if all_props:
- return FormatSubversionPropertyChanges(filename, all_props)
- return None
-
-
-def FormatSubversionPropertyChanges(filename, props):
- """Returns Subversion's 'Property changes on ...' strings using given filename
- and properties.
-
- Args:
- filename: filename
- props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
-
- Returns:
- A string which can be used in the patch file for Subversion.
-
- See the following doctest for example.
-
- >>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
- Property changes on: foo.cc
- ___________________________________________________________________
- Added: svn:eol-style
- + LF
- <BLANKLINE>
- """
- prop_changes_lines = [
- "Property changes on: %s" % filename,
- "___________________________________________________________________"]
- for key, value in props:
- prop_changes_lines.append("Added: " + key)
- prop_changes_lines.append(" + " + value)
- return "\n".join(prop_changes_lines) + "\n"
-
-
-def RealMain(argv, data=None):
- """The real main function.
-
- Args:
- argv: Command line arguments.
- data: Diff contents. If None (default) the diff is generated by
- the VersionControlSystem implementation returned by GuessVCS().
-
- Returns:
- A 2-tuple (issue id, patchset id).
- The patchset id is None if the base files are not uploaded by this
- script (applies only to SVN checkouts).
- """
- options, args = parser.parse_args(argv[1:])
- global verbosity
- verbosity = options.verbose
- if verbosity >= 3:
- logging.getLogger().setLevel(logging.DEBUG)
- elif verbosity >= 2:
- logging.getLogger().setLevel(logging.INFO)
-
- vcs = GuessVCS(options)
-
- base = options.base_url
- if isinstance(vcs, SubversionVCS):
- # Guessing the base field is only supported for Subversion.
- # Note: Fetching base files may become deprecated in future releases.
- guessed_base = vcs.GuessBase(options.download_base)
- if base:
- if guessed_base and base != guessed_base:
- print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
- (base, guessed_base)
- else:
- base = guessed_base
-
- if not base and options.download_base:
- options.download_base = True
- logging.info("Enabled upload of base file")
- if not options.assume_yes:
- vcs.CheckForUnknownFiles()
- if data is None:
- data = vcs.GenerateDiff(args)
- data = vcs.PostProcessDiff(data)
- files = vcs.GetBaseFiles(data)
- if verbosity >= 1:
- print "Upload server:", options.server, "(change with -s/--server)"
- if options.issue:
- prompt = "Message describing this patch set: "
- else:
- prompt = "New issue subject: "
- message = options.message or raw_input(prompt).strip()
- if not message:
- ErrorExit("A non-empty message is required")
- rpc_server = GetRpcServer(options.server,
- options.email,
- options.host,
- options.save_cookies,
- options.account_type)
- form_fields = [("subject", message)]
- if base:
- b = urlparse.urlparse(base)
- username, netloc = urllib.splituser(b.netloc)
- if username:
- logging.info("Removed username from base URL")
- base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
- b.query, b.fragment))
- form_fields.append(("base", base))
- if options.issue:
- form_fields.append(("issue", str(options.issue)))
- if options.email:
- form_fields.append(("user", options.email))
- if options.reviewers:
- for reviewer in options.reviewers.split(','):
- CheckReviewer(reviewer)
- form_fields.append(("reviewers", options.reviewers))
- if options.cc:
- for cc in options.cc.split(','):
- CheckReviewer(cc)
- form_fields.append(("cc", options.cc))
- description = options.description
- if options.description_file:
- if options.description:
- ErrorExit("Can't specify description and description_file")
- file = open(options.description_file, 'r')
- description = file.read()
- file.close()
- if description:
- form_fields.append(("description", description))
- # Send a hash of all the base file so the server can determine if a copy
- # already exists in an earlier patchset.
- base_hashes = ""
- for file, info in files.iteritems():
- if not info[0] is None:
- checksum = md5(info[0]).hexdigest()
- if base_hashes:
- base_hashes += "|"
- base_hashes += checksum + ":" + file
- form_fields.append(("base_hashes", base_hashes))
- if options.private:
- if options.issue:
- print "Warning: Private flag ignored when updating an existing issue."
- else:
- form_fields.append(("private", "1"))
- # If we're uploading base files, don't send the email before the uploads, so
- # that it contains the file status.
- if options.send_mail and options.download_base:
- form_fields.append(("send_mail", "1"))
- if not options.download_base:
- form_fields.append(("content_upload", "1"))
- if len(data) > MAX_UPLOAD_SIZE:
- print "Patch is large, so uploading file patches separately."
- uploaded_diff_file = []
- form_fields.append(("separate_patches", "1"))
- else:
- uploaded_diff_file = [("data", "data.diff", data)]
- ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
- response_body = rpc_server.Send("/upload", body, content_type=ctype)
- patchset = None
- if not options.download_base or not uploaded_diff_file:
- lines = response_body.splitlines()
- if len(lines) >= 2:
- msg = lines[0]
- patchset = lines[1].strip()
- patches = [x.split(" ", 1) for x in lines[2:]]
- else:
- msg = response_body
- else:
- msg = response_body
- StatusUpdate(msg)
- if not response_body.startswith("Issue created.") and \
- not response_body.startswith("Issue updated."):
- sys.exit(0)
- issue = msg[msg.rfind("/")+1:]
-
- if not uploaded_diff_file:
- result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
- if not options.download_base:
- patches = result
-
- if not options.download_base:
- vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
- if options.send_mail:
- rpc_server.Send("/" + issue + "/mail", payload="")
- return issue, patchset
-
-
-def main():
- try:
- logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
- "%(lineno)s %(message)s "))
- os.environ['LC_ALL'] = 'C'
- RealMain(sys.argv)
- except KeyboardInterrupt:
- print
- StatusUpdate("Interrupted.")
- sys.exit(1)
-
-
-if __name__ == "__main__":
- main()
-
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tool for uploading diffs from a version control system to the codereview app.
+
+Usage summary: upload.py [options] [-- diff_options] [path...]
+
+Diff options are passed to the diff command of the underlying system.
+
+Supported version control systems:
+ Git
+ Mercurial
+ Subversion
+
+It is important for Git/Mercurial users to specify a tree/node/branch to diff
+against by using the '--rev' option.
+"""
+# This code is derived from appcfg.py in the App Engine SDK (open source),
+# and from ASPN recipe #146306.
+
+import configparser
+import http.cookiejar
+import fnmatch
+import getpass
+import logging
+import mimetypes
+import optparse
+import os
+import re
+import socket
+import subprocess
+import sys
+import urllib.request, urllib.parse, urllib.error
+import urllib.request, urllib.error, urllib.parse
+import urllib.parse
+
+# The md5 module was deprecated in Python 2.5.
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+try:
+ import readline
+except ImportError:
+ pass
+
+try:
+ import keyring
+except ImportError:
+ keyring = None
+
+# The logging verbosity:
+# 0: Errors only.
+# 1: Status messages.
+# 2: Info logs.
+# 3: Debug logs.
+verbosity = 1
+
+# The account type used for authentication.
+# This line could be changed by the review server (see handler for
+# upload.py).
+AUTH_ACCOUNT_TYPE = "GOOGLE"
+
+# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
+# changed by the review server (see handler for upload.py).
+DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
+
+# Max size of patch or base file.
+MAX_UPLOAD_SIZE = 900 * 1024
+
+# Constants for version control names. Used by GuessVCSName.
+VCS_GIT = "Git"
+VCS_MERCURIAL = "Mercurial"
+VCS_SUBVERSION = "Subversion"
+VCS_UNKNOWN = "Unknown"
+
+# whitelist for non-binary filetypes which do not start with "text/"
+# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
+TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
+ 'application/xml', 'application/x-freemind',
+ 'application/x-sh']
+
+VCS_ABBREVIATIONS = {
+ VCS_MERCURIAL.lower(): VCS_MERCURIAL,
+ "hg": VCS_MERCURIAL,
+ VCS_SUBVERSION.lower(): VCS_SUBVERSION,
+ "svn": VCS_SUBVERSION,
+ VCS_GIT.lower(): VCS_GIT,
+}
+
+# The result of parsing Subversion's [auto-props] setting.
+svn_auto_props_map = None
+
+def GetEmail(prompt):
+ """Prompts the user for their email address and returns it.
+
+ The last used email address is saved to a file and offered up as a suggestion
+ to the user. If the user presses enter without typing in anything the last
+ used email address is used. If the user enters a new address, it is saved
+ for next time we prompt.
+
+ """
+ last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
+ last_email = ""
+ if os.path.exists(last_email_file_name):
+ try:
+ last_email_file = open(last_email_file_name, "r")
+ last_email = last_email_file.readline().strip("\n")
+ last_email_file.close()
+ prompt += " [%s]" % last_email
+ except IOError as e:
+ pass
+ email = input(prompt + ": ").strip()
+ if email:
+ try:
+ last_email_file = open(last_email_file_name, "w")
+ last_email_file.write(email)
+ last_email_file.close()
+ except IOError as e:
+ pass
+ else:
+ email = last_email
+ return email
+
+
+def StatusUpdate(msg):
+ """Print a status message to stdout.
+
+ If 'verbosity' is greater than 0, print the message.
+
+ Args:
+ msg: The string to print.
+ """
+ if verbosity > 0:
+ print(msg)
+
+
+def ErrorExit(msg):
+ """Print an error message to stderr and exit."""
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+
+class ClientLoginError(urllib.error.HTTPError):
+ """Raised to indicate there was an error authenticating with ClientLogin."""
+
+ def __init__(self, url, code, msg, headers, args):
+ urllib.error.HTTPError.__init__(self, url, code, msg, headers, None)
+ self.args = args
+ self.reason = args["Error"]
+ self.info = args.get("Info", None)
+
+
+class AbstractRpcServer(object):
+ """Provides a common interface for a simple RPC server."""
+
+ def __init__(self, host, auth_function, host_override=None, extra_headers={},
+ save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
+ """Creates a new HttpRpcServer.
+
+ Args:
+ host: The host to send requests to.
+ auth_function: A function that takes no arguments and returns an
+ (email, password) tuple when called. Will be called if authentication
+ is required.
+ host_override: The host header to send to the server (defaults to host).
+ extra_headers: A dict of extra headers to append to every request.
+ save_cookies: If True, save the authentication cookies to local disk.
+ If False, use an in-memory cookiejar instead. Subclasses must
+ implement this functionality. Defaults to False.
+ account_type: Account type used for authentication. Defaults to
+ AUTH_ACCOUNT_TYPE.
+ """
+ self.host = host
+ if (not self.host.startswith("http://") and
+ not self.host.startswith("https://")):
+ self.host = "http://" + self.host
+ self.host_override = host_override
+ self.auth_function = auth_function
+ self.authenticated = False
+ self.extra_headers = extra_headers
+ self.save_cookies = save_cookies
+ self.account_type = account_type
+ self.opener = self._GetOpener()
+ if self.host_override:
+ logging.info("Server: %s; Host: %s", self.host, self.host_override)
+ else:
+ logging.info("Server: %s", self.host)
+
+ def _GetOpener(self):
+ """Returns an OpenerDirector for making HTTP requests.
+
+ Returns:
+ A urllib2.OpenerDirector object.
+ """
+ raise NotImplementedError()
+
+ def _CreateRequest(self, url, data=None):
+ """Creates a new urllib request."""
+ logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
+ req = urllib.request.Request(url, data=data)
+ if self.host_override:
+ req.add_header("Host", self.host_override)
+ for key, value in self.extra_headers.items():
+ req.add_header(key, value)
+ return req
+
+ def _GetAuthToken(self, email, password):
+ """Uses ClientLogin to authenticate the user, returning an auth token.
+
+ Args:
+ email: The user's email address
+ password: The user's password
+
+ Raises:
+ ClientLoginError: If there was an error authenticating with ClientLogin.
+ HTTPError: If there was some other form of HTTP error.
+
+ Returns:
+ The authentication token returned by ClientLogin.
+ """
+ account_type = self.account_type
+ if self.host.endswith(".google.com"):
+ # Needed for use inside Google.
+ account_type = "HOSTED"
+ req = self._CreateRequest(
+ url="https://www.google.com/accounts/ClientLogin",
+ data=urllib.parse.urlencode({
+ "Email": email,
+ "Passwd": password,
+ "service": "ah",
+ "source": "rietveld-codereview-upload",
+ "accountType": account_type,
+ }),
+ )
+ try:
+ response = self.opener.open(req)
+ response_body = response.read()
+ response_dict = dict(x.split("=")
+ for x in response_body.split("\n") if x)
+ return response_dict["Auth"]
+ except urllib.error.HTTPError as e:
+ if e.code == 403:
+ body = e.read()
+ response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
+ raise ClientLoginError(req.get_full_url(), e.code, e.msg,
+ e.headers, response_dict)
+ else:
+ raise
+
+ def _GetAuthCookie(self, auth_token):
+ """Fetches authentication cookies for an authentication token.
+
+ Args:
+ auth_token: The authentication token returned by ClientLogin.
+
+ Raises:
+ HTTPError: If there was an error fetching the authentication cookies.
+ """
+ # This is a dummy value to allow us to identify when we're successful.
+ continue_location = "http://localhost/"
+ args = {"continue": continue_location, "auth": auth_token}
+ req = self._CreateRequest("%s/_ah/login?%s" %
+ (self.host, urllib.parse.urlencode(args)))
+ try:
+ response = self.opener.open(req)
+ except urllib.error.HTTPError as e:
+ response = e
+ if (response.code != 302 or
+ response.info()["location"] != continue_location):
+ raise urllib.error.HTTPError(req.get_full_url(), response.code, response.msg,
+ response.headers, response.fp)
+ self.authenticated = True
+
+ def _Authenticate(self):
+ """Authenticates the user.
+
+ The authentication process works as follows:
+ 1) We get a username and password from the user
+ 2) We use ClientLogin to obtain an AUTH token for the user
+ (see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
+ 3) We pass the auth token to /_ah/login on the server to obtain an
+ authentication cookie. If login was successful, it tries to redirect
+ us to the URL we provided.
+
+ If we attempt to access the upload API without first obtaining an
+ authentication cookie, it returns a 401 response (or a 302) and
+ directs us to authenticate ourselves with ClientLogin.
+ """
+ for i in range(3):
+ credentials = self.auth_function()
+ try:
+ auth_token = self._GetAuthToken(credentials[0], credentials[1])
+ except ClientLoginError as e:
+ print('', file=sys.stderr)
+ if e.reason == "BadAuthentication":
+ if e.info == "InvalidSecondFactor":
+ print((
+ "Use an application-specific password instead "
+ "of your regular account password.\n"
+ "See http://www.google.com/"
+ "support/accounts/bin/answer.py?answer=185833"), file=sys.stderr)
+ else:
+ print("Invalid username or password.", file=sys.stderr)
+ elif e.reason == "CaptchaRequired":
+ print((
+ "Please go to\n"
+ "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
+ "and verify you are a human. Then try again.\n"
+ "If you are using a Google Apps account the URL is:\n"
+ "https://www.google.com/a/yourdomain.com/UnlockCaptcha"), file=sys.stderr)
+ elif e.reason == "NotVerified":
+ print("Account not verified.", file=sys.stderr)
+ elif e.reason == "TermsNotAgreed":
+ print("User has not agreed to TOS.", file=sys.stderr)
+ elif e.reason == "AccountDeleted":
+ print("The user account has been deleted.", file=sys.stderr)
+ elif e.reason == "AccountDisabled":
+ print("The user account has been disabled.", file=sys.stderr)
+ break
+ elif e.reason == "ServiceDisabled":
+ print(("The user's access to the service has been "
+ "disabled."), file=sys.stderr)
+ elif e.reason == "ServiceUnavailable":
+ print("The service is not available; try again later.", file=sys.stderr)
+ else:
+ # Unknown error.
+ raise
+ print('', file=sys.stderr)
+ continue
+ self._GetAuthCookie(auth_token)
+ return
+
+ def Send(self, request_path, payload=None,
+ content_type="application/octet-stream",
+ timeout=None,
+ extra_headers=None,
+ **kwargs):
+ """Sends an RPC and returns the response.
+
+ Args:
+ request_path: The path to send the request to, eg /api/appversion/create.
+ payload: The body of the request, or None to send an empty request.
+ content_type: The Content-Type header to use.
+ timeout: timeout in seconds; default None i.e. no timeout.
+ (Note: for large requests on OS X, the timeout doesn't work right.)
+ extra_headers: Dict containing additional HTTP headers that should be
+ included in the request (string header names mapped to their values),
+ or None to not include any additional headers.
+ kwargs: Any keyword arguments are converted into query string parameters.
+
+ Returns:
+ The response body, as a string.
+ """
+ # TODO: Don't require authentication. Let the server say
+ # whether it is necessary.
+ if not self.authenticated:
+ self._Authenticate()
+
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ tries = 0
+ while True:
+ tries += 1
+ args = dict(kwargs)
+ url = "%s%s" % (self.host, request_path)
+ if args:
+ url += "?" + urllib.parse.urlencode(args)
+ req = self._CreateRequest(url=url, data=payload)
+ req.add_header("Content-Type", content_type)
+ if extra_headers:
+ for header, value in list(extra_headers.items()):
+ req.add_header(header, value)
+ try:
+ f = self.opener.open(req)
+ response = f.read()
+ f.close()
+ return response
+ except urllib.error.HTTPError as e:
+ if tries > 3:
+ raise
+ elif e.code == 401 or e.code == 302:
+ self._Authenticate()
+## elif e.code >= 500 and e.code < 600:
+## # Server Error - try again.
+## continue
+ elif e.code == 301:
+ # Handle permanent redirect manually.
+ url = e.info()["location"]
+ url_loc = urllib.parse.urlparse(url)
+ self.host = '%s://%s' % (url_loc[0], url_loc[1])
+ else:
+ raise
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+
+class HttpRpcServer(AbstractRpcServer):
+ """Provides a simplified RPC-style interface for HTTP requests."""
+
+ def _Authenticate(self):
+ """Save the cookie jar after authentication."""
+ super(HttpRpcServer, self)._Authenticate()
+ if self.save_cookies:
+ StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
+ self.cookie_jar.save()
+
+ def _GetOpener(self):
+ """Returns an OpenerDirector that supports cookies and ignores redirects.
+
+ Returns:
+ A urllib2.OpenerDirector object.
+ """
+ opener = urllib.request.OpenerDirector()
+ opener.add_handler(urllib.request.ProxyHandler())
+ opener.add_handler(urllib.request.UnknownHandler())
+ opener.add_handler(urllib.request.HTTPHandler())
+ opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
+ opener.add_handler(urllib.request.HTTPSHandler())
+ opener.add_handler(urllib2.HTTPErrorProcessor())
+ if self.save_cookies:
+ self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
+ self.cookie_jar = http.cookiejar.MozillaCookieJar(self.cookie_file)
+ if os.path.exists(self.cookie_file):
+ try:
+ self.cookie_jar.load()
+ self.authenticated = True
+ StatusUpdate("Loaded authentication cookies from %s" %
+ self.cookie_file)
+ except (http.cookiejar.LoadError, IOError):
+ # Failed to load cookies - just ignore them.
+ pass
+ else:
+ # Create an empty cookie file with mode 600
+ fd = os.open(self.cookie_file, os.O_CREAT, 0o600)
+ os.close(fd)
+ # Always chmod the cookie file
+ os.chmod(self.cookie_file, 0o600)
+ else:
+ # Don't save cookies across runs of update.py.
+ self.cookie_jar = http.cookiejar.CookieJar()
+ opener.add_handler(urllib.request.HTTPCookieProcessor(self.cookie_jar))
+ return opener
+
+
+parser = optparse.OptionParser(
+ usage="%prog [options] [-- diff_options] [path...]")
+parser.add_option("-y", "--assume_yes", action="store_true",
+ dest="assume_yes", default=False,
+ help="Assume that the answer to yes/no questions is 'yes'.")
+# Logging
+group = parser.add_option_group("Logging options")
+group.add_option("-q", "--quiet", action="store_const", const=0,
+ dest="verbose", help="Print errors only.")
+group.add_option("-v", "--verbose", action="store_const", const=2,
+ dest="verbose", default=1,
+ help="Print info level logs.")
+group.add_option("--noisy", action="store_const", const=3,
+ dest="verbose", help="Print all logs.")
+# Review server
+group = parser.add_option_group("Review server options")
+group.add_option("-s", "--server", action="store", dest="server",
+ default=DEFAULT_REVIEW_SERVER,
+ metavar="SERVER",
+ help=("The server to upload to. The format is host[:port]. "
+ "Defaults to '%default'."))
+group.add_option("-e", "--email", action="store", dest="email",
+ metavar="EMAIL", default=None,
+ help="The username to use. Will prompt if omitted.")
+group.add_option("-H", "--host", action="store", dest="host",
+ metavar="HOST", default=None,
+ help="Overrides the Host header sent with all RPCs.")
+group.add_option("--no_cookies", action="store_false",
+ dest="save_cookies", default=True,
+ help="Do not save authentication cookies to local disk.")
+group.add_option("--account_type", action="store", dest="account_type",
+ metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
+ choices=["GOOGLE", "HOSTED"],
+ help=("Override the default account type "
+ "(defaults to '%default', "
+ "valid choices are 'GOOGLE' and 'HOSTED')."))
+# Issue
+group = parser.add_option_group("Issue options")
+group.add_option("-d", "--description", action="store", dest="description",
+ metavar="DESCRIPTION", default=None,
+ help="Optional description when creating an issue.")
+group.add_option("-f", "--description_file", action="store",
+ dest="description_file", metavar="DESCRIPTION_FILE",
+ default=None,
+ help="Optional path of a file that contains "
+ "the description when creating an issue.")
+group.add_option("-r", "--reviewers", action="store", dest="reviewers",
+ metavar="REVIEWERS", default=None,
+ help="Add reviewers (comma separated email addresses).")
+group.add_option("--cc", action="store", dest="cc",
+ metavar="CC", default="dev@scons.tigris.org",
+ help="Add CC (comma separated email addresses).")
+group.add_option("--private", action="store_true", dest="private",
+ default=False,
+ help="Make the issue restricted to reviewers and those CCed")
+# Upload options
+group = parser.add_option_group("Patch options")
+group.add_option("-m", "--message", action="store", dest="message",
+ metavar="MESSAGE", default=None,
+ help="A message to identify the patch. "
+ "Will prompt if omitted.")
+group.add_option("-i", "--issue", type="int", action="store",
+ metavar="ISSUE", default=None,
+ help="Issue number to which to add. Defaults to new issue.")
+group.add_option("--base_url", action="store", dest="base_url", default=None,
+ help="Base repository URL (listed as \"Base URL\" when "
+ "viewing issue). If omitted, will be guessed automatically "
+ "for SVN repos and left blank for others.")
+group.add_option("--download_base", action="store_true",
+ dest="download_base", default=False,
+ help="Base files will be downloaded by the server "
+ "(side-by-side diffs may not work on files with CRs).")
+group.add_option("--rev", action="store", dest="revision",
+ metavar="REV", default=None,
+ help="Base revision/branch/tree to diff against. Use "
+ "rev1:rev2 range to review already committed changeset.")
+group.add_option("--send_mail", action="store_true",
+ dest="send_mail", default=True,
+ help="Send notification email to reviewers.")
+group.add_option("--vcs", action="store", dest="vcs",
+ metavar="VCS", default="svn",
+ help=("Version control system (optional, usually upload.py "
+ "already guesses the right VCS)."))
+group.add_option("--emulate_svn_auto_props", action="store_true",
+ dest="emulate_svn_auto_props", default=False,
+ help=("Emulate Subversion's auto properties feature."))
+
+
+def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
+ account_type=AUTH_ACCOUNT_TYPE):
+ """Returns an instance of an AbstractRpcServer.
+
+ Args:
+ server: String containing the review server URL.
+ email: String containing user's email address.
+ host_override: If not None, string containing an alternate hostname to use
+ in the host header.
+ save_cookies: Whether authentication cookies should be saved to disk.
+ account_type: Account type for authentication, either 'GOOGLE'
+ or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
+
+ Returns:
+ A new AbstractRpcServer, on which RPC calls can be made.
+ """
+
+ rpc_server_class = HttpRpcServer
+
+ # If this is the dev_appserver, use fake authentication.
+ host = (host_override or server).lower()
+ if re.match(r'(http://)?localhost([:/]|$)', host):
+ if email is None:
+ email = "test@example.com"
+ logging.info("Using debug user %s. Override with --email" % email)
+ server = rpc_server_class(
+ server,
+ lambda: (email, "password"),
+ host_override=host_override,
+ extra_headers={"Cookie":
+ 'dev_appserver_login="%s:False"' % email},
+ save_cookies=save_cookies,
+ account_type=account_type)
+ # Don't try to talk to ClientLogin.
+ server.authenticated = True
+ return server
+
+ def GetUserCredentials():
+ """Prompts the user for a username and password."""
+ # Create a local alias to the email variable to avoid Python's crazy
+ # scoping rules.
+ local_email = email
+ if local_email is None:
+ local_email = GetEmail("Email (login for uploading to %s)" % server)
+ password = None
+ if keyring:
+ password = keyring.get_password(host, local_email)
+ if password is not None:
+ print("Using password from system keyring.")
+ else:
+ password = getpass.getpass("Password for %s: " % local_email)
+ if keyring:
+ answer = input("Store password in system keyring?(y/N) ").strip()
+ if answer == "y":
+ keyring.set_password(host, local_email, password)
+ return (local_email, password)
+
+ return rpc_server_class(server,
+ GetUserCredentials,
+ host_override=host_override,
+ save_cookies=save_cookies)
+
+
+def EncodeMultipartFormData(fields, files):
+ """Encode form fields for multipart/form-data.
+
+ Args:
+ fields: A sequence of (name, value) elements for regular form fields.
+ files: A sequence of (name, filename, value) elements for data to be
+ uploaded as files.
+ Returns:
+ (content_type, body) ready for httplib.HTTP instance.
+
+ Source:
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
+ """
+ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+ CRLF = '\r\n'
+ lines = []
+ for (key, value) in fields:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"' % key)
+ lines.append('')
+ if isinstance(value, str):
+ value = value.encode('utf-8')
+ lines.append(value)
+ for (key, filename, value) in files:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
+ (key, filename))
+ lines.append('Content-Type: %s' % GetContentType(filename))
+ lines.append('')
+ if isinstance(value, str):
+ value = value.encode('utf-8')
+ lines.append(value)
+ lines.append('--' + BOUNDARY + '--')
+ lines.append('')
+ body = CRLF.join(lines)
+ content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+ return content_type, body
+
+
+def GetContentType(filename):
+ """Helper to guess the content-type from the filename."""
+ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+# Use a shell for subcommands on Windows to get a PATH search.
+use_shell = sys.platform.startswith("win")
+
+def RunShellWithReturnCodeAndStderr(command, print_output=False,
+ universal_newlines=True,
+ env=os.environ):
+ """Executes a command and returns the output from stdout, stderr and the return code.
+
+ Args:
+ command: Command to execute.
+ print_output: If True, the output is printed to stdout.
+ If False, both stdout and stderr are ignored.
+ universal_newlines: Use universal_newlines flag (default: True).
+
+ Returns:
+ Tuple (stdout, stderr, return code)
+ """
+ logging.info("Running %s", command)
+ env = env.copy()
+ env['LC_MESSAGES'] = 'C'
+ p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=use_shell, universal_newlines=universal_newlines,
+ env=env)
+ if print_output:
+ output_array = []
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ print(line.strip("\n"))
+ output_array.append(line)
+ output = "".join(output_array)
+ else:
+ output = p.stdout.read()
+ p.wait()
+ errout = p.stderr.read()
+ if print_output and errout:
+ print(errout, file=sys.stderr)
+ p.stdout.close()
+ p.stderr.close()
+ return output, errout, p.returncode
+
+def RunShellWithReturnCode(command, print_output=False,
+ universal_newlines=True,
+ env=os.environ):
+ """Executes a command and returns the output from stdout and the return code."""
+ out, err, retcode = RunShellWithReturnCodeAndStderr(command, print_output,
+ universal_newlines, env)
+ return out, retcode
+
+def RunShell(command, silent_ok=False, universal_newlines=True,
+ print_output=False, env=os.environ):
+ data, retcode = RunShellWithReturnCode(command, print_output,
+ universal_newlines, env)
+ if retcode:
+ ErrorExit("Got error status from %s:\n%s" % (command, data))
+ if not silent_ok and not data:
+ ErrorExit("No output from %s" % command)
+ return data
+
+
+class VersionControlSystem(object):
+ """Abstract base class providing an interface to the VCS."""
+
+ def __init__(self, options):
+ """Constructor.
+
+ Args:
+ options: Command line options.
+ """
+ self.options = options
+
+ def PostProcessDiff(self, diff):
+ """Return the diff with any special post processing this VCS needs, e.g.
+ to include an svn-style "Index:"."""
+ return diff
+
+ def GenerateDiff(self, args):
+ """Return the current diff as a string.
+
+ Args:
+ args: Extra arguments to pass to the diff command.
+ """
+ raise NotImplementedError(
+ "abstract method -- subclass %s must override" % self.__class__)
+
+ def GetUnknownFiles(self):
+ """Return a list of files unknown to the VCS."""
+ raise NotImplementedError(
+ "abstract method -- subclass %s must override" % self.__class__)
+
+ def CheckForUnknownFiles(self):
+ """Show an "are you sure?" prompt if there are unknown files."""
+ unknown_files = self.GetUnknownFiles()
+ if unknown_files:
+ print("The following files are not added to version control:")
+ for line in unknown_files:
+ print(line)
+ prompt = "Are you sure to continue?(y/N) "
+ answer = input(prompt).strip()
+ if answer != "y":
+ ErrorExit("User aborted")
+
+ def GetBaseFile(self, filename):
+ """Get the content of the upstream version of a file.
+
+ Returns:
+ A tuple (base_content, new_content, is_binary, status)
+ base_content: The contents of the base file.
+ new_content: For text files, this is empty. For binary files, this is
+ the contents of the new file, since the diff output won't contain
+ information to reconstruct the current file.
+ is_binary: True iff the file is binary.
+ status: The status of the file.
+ """
+
+ raise NotImplementedError(
+ "abstract method -- subclass %s must override" % self.__class__)
+
+
+ def GetBaseFiles(self, diff):
+ """Helper that calls GetBase file for each file in the patch.
+
+ Returns:
+ A dictionary that maps from filename to GetBaseFile's tuple. Filenames
+ are retrieved based on lines that start with "Index:" or
+ "Property changes on:".
+ """
+ files = {}
+ for line in diff.splitlines(True):
+ if line.startswith('Index:') or line.startswith('Property changes on:'):
+ unused, filename = line.split(':', 1)
+ # On Windows if a file has property changes its filename uses '\'
+ # instead of '/'.
+ filename = filename.strip().replace('\\', '/')
+ files[filename] = self.GetBaseFile(filename)
+ return files
+
+
+ def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
+ files):
+ """Uploads the base files (and if necessary, the current ones as well)."""
+
+ def UploadFile(filename, file_id, content, is_binary, status, is_base):
+ """Uploads a file to the server."""
+ file_too_large = False
+ if is_base:
+ type = "base"
+ else:
+ type = "current"
+ if len(content) > MAX_UPLOAD_SIZE:
+ print(("Not uploading the %s file for %s because it's too large." %
+ (type, filename)))
+ file_too_large = True
+ content = ""
+ checksum = md5(content).hexdigest()
+ if options.verbose > 0 and not file_too_large:
+ print("Uploading %s file for %s" % (type, filename))
+ url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
+ form_fields = [("filename", filename),
+ ("status", status),
+ ("checksum", checksum),
+ ("is_binary", str(is_binary)),
+ ("is_current", str(not is_base)),
+ ]
+ if file_too_large:
+ form_fields.append(("file_too_large", "1"))
+ if options.email:
+ form_fields.append(("user", options.email))
+ ctype, body = EncodeMultipartFormData(form_fields,
+ [("data", filename, content)])
+ response_body = rpc_server.Send(url, body,
+ content_type=ctype)
+ if not response_body.startswith("OK"):
+ StatusUpdate(" --> %s" % response_body)
+ sys.exit(1)
+
+ patches = dict()
+ [patches.setdefault(v, k) for k, v in patch_list]
+ for filename in list(patches.keys()):
+ base_content, new_content, is_binary, status = files[filename]
+ file_id_str = patches.get(filename)
+ if file_id_str.find("nobase") != -1:
+ base_content = None
+ file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
+ file_id = int(file_id_str)
+ if base_content != None:
+ UploadFile(filename, file_id, base_content, is_binary, status, True)
+ if new_content != None:
+ UploadFile(filename, file_id, new_content, is_binary, status, False)
+
+ def IsImage(self, filename):
+ """Returns true if the filename has an image extension."""
+ mimetype = mimetypes.guess_type(filename)[0]
+ if not mimetype:
+ return False
+ return mimetype.startswith("image/")
+
+ def IsBinary(self, filename):
+ """Returns true if the guessed mimetyped isnt't in text group."""
+ mimetype = mimetypes.guess_type(filename)[0]
+ if not mimetype:
+ return False # e.g. README, "real" binaries usually have an extension
+ # special case for text files which don't start with text/
+ if mimetype in TEXT_MIMETYPES:
+ return False
+ return not mimetype.startswith("text/")
+
+
+class SubversionVCS(VersionControlSystem):
+ """Implementation of the VersionControlSystem interface for Subversion."""
+
+ def __init__(self, options):
+ super(SubversionVCS, self).__init__(options)
+ if self.options.revision:
+ match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
+ if not match:
+ ErrorExit("Invalid Subversion revision %s." % self.options.revision)
+ self.rev_start = match.group(1)
+ self.rev_end = match.group(3)
+ else:
+ self.rev_start = self.rev_end = None
+ # Cache output from "svn list -r REVNO dirname".
+ # Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
+ self.svnls_cache = {}
+ # Base URL is required to fetch files deleted in an older revision.
+ # Result is cached to not guess it over and over again in GetBaseFile().
+ required = self.options.download_base or self.options.revision is not None
+ self.svn_base = self._GuessBase(required)
+
+ def GuessBase(self, required):
+ """Wrapper for _GuessBase."""
+ return self.svn_base
+
+ def _GuessBase(self, required):
+ """Returns base URL for current diff.
+
+ Args:
+ required: If true, exits if the url can't be guessed, otherwise None is
+ returned.
+ """
+ info = RunShell(["svn", "info"])
+ for line in info.splitlines():
+ if line.startswith("URL: "):
+ url = line.split()[1]
+ scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(url)
+ guess = ""
+ if netloc == "svn.python.org" and scheme == "svn+ssh":
+ path = "projects" + path
+ scheme = "http"
+ guess = "Python "
+ elif netloc.endswith(".googlecode.com"):
+ scheme = "http"
+ guess = "Google Code "
+ path = path + "/"
+ base = urllib.parse.urlunparse((scheme, netloc, path, params,
+ query, fragment))
+ logging.info("Guessed %sbase = %s", guess, base)
+ return base
+ if required:
+ ErrorExit("Can't find URL in output from svn info")
+ return None
+
+ def GenerateDiff(self, args):
+ cmd = ["svn", "diff"]
+ if self.options.revision:
+ cmd += ["-r", self.options.revision]
+ cmd.extend(args)
+ data = RunShell(cmd)
+ count = 0
+ for line in data.splitlines():
+ if line.startswith("Index:") or line.startswith("Property changes on:"):
+ count += 1
+ logging.info(line)
+ if not count:
+ ErrorExit("No valid patches found in output from svn diff")
+ return data
+
+ def _CollapseKeywords(self, content, keyword_str):
+ """Collapses SVN keywords."""
+ # svn cat translates keywords but svn diff doesn't. As a result of this
+ # behavior patching.PatchChunks() fails with a chunk mismatch error.
+ # This part was originally written by the Review Board development team
+ # who had the same problem (http://reviews.review-board.org/r/276/).
+ # Mapping of keywords to known aliases
+ svn_keywords = {
+ # Standard keywords
+ 'Date': ['Date', 'LastChangedDate'],
+ 'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
+ 'Author': ['Author', 'LastChangedBy'],
+ 'HeadURL': ['HeadURL', 'URL'],
+ 'Id': ['Id'],
+
+ # Aliases
+ 'LastChangedDate': ['LastChangedDate', 'Date'],
+ 'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
+ 'LastChangedBy': ['LastChangedBy', 'Author'],
+ 'URL': ['URL', 'HeadURL'],
+ }
+
+ def repl(m):
+ if m.group(2):
+ return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
+ return "$%s$" % m.group(1)
+ keywords = [keyword
+ for name in keyword_str.split(" ")
+ for keyword in svn_keywords.get(name, [])]
+ return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
+
+ def GetUnknownFiles(self):
+ status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
+ unknown_files = []
+ for line in status.split("\n"):
+ if line and line[0] == "?":
+ unknown_files.append(line)
+ return unknown_files
+
+ def ReadFile(self, filename):
+ """Returns the contents of a file."""
+ file = open(filename, 'rb')
+ result = ""
+ try:
+ result = file.read()
+ finally:
+ file.close()
+ return result
+
+ def GetStatus(self, filename):
+ """Returns the status of a file."""
+ if not self.options.revision:
+ status = RunShell(["svn", "status", "--ignore-externals", filename])
+ if not status:
+ ErrorExit("svn status returned no output for %s" % filename)
+ status_lines = status.splitlines()
+ # If file is in a cl, the output will begin with
+ # "\n--- Changelist 'cl_name':\n". See
+ # http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
+ if (len(status_lines) == 3 and
+ not status_lines[0] and
+ status_lines[1].startswith("--- Changelist")):
+ status = status_lines[2]
+ else:
+ status = status_lines[0]
+ # If we have a revision to diff against we need to run "svn list"
+ # for the old and the new revision and compare the results to get
+ # the correct status for a file.
+ else:
+ dirname, relfilename = os.path.split(filename)
+ if dirname not in self.svnls_cache:
+ cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
+ out, err, returncode = RunShellWithReturnCodeAndStderr(cmd)
+ if returncode:
+ # Directory might not yet exist at start revison
+ # svn: Unable to find repository location for 'abc' in revision nnn
+ if re.match('^svn: Unable to find repository location for .+ in revision \d+', err):
+ old_files = ()
+ else:
+ ErrorExit("Failed to get status for %s:\n%s" % (filename, err))
+ else:
+ old_files = out.splitlines()
+ args = ["svn", "list"]
+ if self.rev_end:
+ args += ["-r", self.rev_end]
+ cmd = args + [dirname or "."]
+ out, returncode = RunShellWithReturnCode(cmd)
+ if returncode:
+ ErrorExit("Failed to run command %s" % cmd)
+ self.svnls_cache[dirname] = (old_files, out.splitlines())
+ old_files, new_files = self.svnls_cache[dirname]
+ if relfilename in old_files and relfilename not in new_files:
+ status = "D "
+ elif relfilename in old_files and relfilename in new_files:
+ status = "M "
+ else:
+ status = "A "
+ return status
+
+ def GetBaseFile(self, filename):
+ status = self.GetStatus(filename)
+ base_content = None
+ new_content = None
+
+ # If a file is copied its status will be "A +", which signifies
+ # "addition-with-history". See "svn st" for more information. We need to
+ # upload the original file or else diff parsing will fail if the file was
+ # edited.
+ if status[0] == "A" and status[3] != "+":
+ # We'll need to upload the new content if we're adding a binary file
+ # since diff's output won't contain it.
+ mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
+ silent_ok=True)
+ base_content = ""
+ is_binary = bool(mimetype) and not mimetype.startswith("text/")
+ if is_binary and self.IsImage(filename):
+ new_content = self.ReadFile(filename)
+ elif (status[0] in ("M", "D", "R") or
+ (status[0] == "A" and status[3] == "+") or # Copied file.
+ (status[0] == " " and status[1] == "M")): # Property change.
+ args = []
+ if self.options.revision:
+ url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
+ else:
+ # Don't change filename, it's needed later.
+ url = filename
+ args += ["-r", "BASE"]
+ cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
+ mimetype, returncode = RunShellWithReturnCode(cmd)
+ if returncode:
+ # File does not exist in the requested revision.
+ # Reset mimetype, it contains an error message.
+ mimetype = ""
+ else:
+ mimetype = mimetype.strip()
+ get_base = False
+ is_binary = (bool(mimetype) and
+ not mimetype.startswith("text/") and
+ not mimetype in TEXT_MIMETYPES)
+ if status[0] == " ":
+ # Empty base content just to force an upload.
+ base_content = ""
+ elif is_binary:
+ if self.IsImage(filename):
+ get_base = True
+ if status[0] == "M":
+ if not self.rev_end:
+ new_content = self.ReadFile(filename)
+ else:
+ url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
+ new_content = RunShell(["svn", "cat", url],
+ universal_newlines=True, silent_ok=True)
+ else:
+ base_content = ""
+ else:
+ get_base = True
+
+ if get_base:
+ if is_binary:
+ universal_newlines = False
+ else:
+ universal_newlines = True
+ if self.rev_start:
+ # "svn cat -r REV delete_file.txt" doesn't work. cat requires
+ # the full URL with "@REV" appended instead of using "-r" option.
+ url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
+ base_content = RunShell(["svn", "cat", url],
+ universal_newlines=universal_newlines,
+ silent_ok=True)
+ else:
+ base_content, ret_code = RunShellWithReturnCode(
+ ["svn", "cat", filename], universal_newlines=universal_newlines)
+ if ret_code and status[0] == "R":
+ # It's a replaced file without local history (see issue208).
+ # The base file needs to be fetched from the server.
+ url = "%s/%s" % (self.svn_base, filename)
+ base_content = RunShell(["svn", "cat", url],
+ universal_newlines=universal_newlines,
+ silent_ok=True)
+ elif ret_code:
+ ErrorExit("Got error status from 'svn cat %s'" % filename)
+ if not is_binary:
+ args = []
+ if self.rev_start:
+ url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
+ else:
+ url = filename
+ args += ["-r", "BASE"]
+ cmd = ["svn"] + args + ["propget", "svn:keywords", url]
+ keywords, returncode = RunShellWithReturnCode(cmd)
+ if keywords and not returncode:
+ base_content = self._CollapseKeywords(base_content, keywords)
+ else:
+ StatusUpdate("svn status returned unexpected output: %s" % status)
+ sys.exit(1)
+ return base_content, new_content, is_binary, status[0:5]
+
+
+class GitVCS(VersionControlSystem):
+ """Implementation of the VersionControlSystem interface for Git."""
+
+ def __init__(self, options):
+ super(GitVCS, self).__init__(options)
+ # Map of filename -> (hash before, hash after) of base file.
+ # Hashes for "no such file" are represented as None.
+ self.hashes = {}
+ # Map of new filename -> old filename for renames.
+ self.renames = {}
+
+ def PostProcessDiff(self, gitdiff):
+ """Converts the diff output to include an svn-style "Index:" line as well
+ as record the hashes of the files, so we can upload them along with our
+ diff."""
+ # Special used by git to indicate "no such content".
+ NULL_HASH = "0"*40
+
+ def IsFileNew(filename):
+ return filename in self.hashes and self.hashes[filename][0] is None
+
+ def AddSubversionPropertyChange(filename):
+ """Add svn's property change information into the patch if given file is
+ new file.
+
+ We use Subversion's auto-props setting to retrieve its property.
+ See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
+ Subversion's [auto-props] setting.
+ """
+ if self.options.emulate_svn_auto_props and IsFileNew(filename):
+ svnprops = GetSubversionPropertyChanges(filename)
+ if svnprops:
+ svndiff.append("\n" + svnprops + "\n")
+
+ svndiff = []
+ filecount = 0
+ filename = None
+ for line in gitdiff.splitlines():
+ match = re.match(r"diff --git a/(.*) b/(.*)$", line)
+ if match:
+ # Add auto property here for previously seen file.
+ if filename is not None:
+ AddSubversionPropertyChange(filename)
+ filecount += 1
+ # Intentionally use the "after" filename so we can show renames.
+ filename = match.group(2)
+ svndiff.append("Index: %s\n" % filename)
+ if match.group(1) != match.group(2):
+ self.renames[match.group(2)] = match.group(1)
+ else:
+ # The "index" line in a git diff looks like this (long hashes elided):
+ # index 82c0d44..b2cee3f 100755
+ # We want to save the left hash, as that identifies the base file.
+ match = re.match(r"index (\w+)\.\.(\w+)", line)
+ if match:
+ before, after = (match.group(1), match.group(2))
+ if before == NULL_HASH:
+ before = None
+ if after == NULL_HASH:
+ after = None
+ self.hashes[filename] = (before, after)
+ svndiff.append(line + "\n")
+ if not filecount:
+ ErrorExit("No valid patches found in output from git diff")
+ # Add auto property for the last seen file.
+ assert filename is not None
+ AddSubversionPropertyChange(filename)
+ return "".join(svndiff)
+
+ def GenerateDiff(self, extra_args):
+ extra_args = extra_args[:]
+ if self.options.revision:
+ if ":" in self.options.revision:
+ extra_args = self.options.revision.split(":", 1) + extra_args
+ else:
+ extra_args = [self.options.revision] + extra_args
+
+ # --no-ext-diff is broken in some versions of Git, so try to work around
+ # this by overriding the environment (but there is still a problem if the
+ # git config key "diff.external" is used).
+ env = os.environ.copy()
+ if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
+ return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ + extra_args, env=env)
+
+ def GetUnknownFiles(self):
+ status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
+ silent_ok=True)
+ return status.splitlines()
+
+ def GetFileContent(self, file_hash, is_binary):
+ """Returns the content of a file identified by its git hash."""
+ data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
+ universal_newlines=not is_binary)
+ if retcode:
+ ErrorExit("Got error status from 'git show %s'" % file_hash)
+ return data
+
+ def GetBaseFile(self, filename):
+ hash_before, hash_after = self.hashes.get(filename, (None,None))
+ base_content = None
+ new_content = None
+ is_binary = self.IsBinary(filename)
+ status = None
+
+ if filename in self.renames:
+ status = "A +" # Match svn attribute name for renames.
+ if filename not in self.hashes:
+ # If a rename doesn't change the content, we never get a hash.
+ base_content = RunShell(["git", "show", "HEAD:" + filename])
+ elif not hash_before:
+ status = "A"
+ base_content = ""
+ elif not hash_after:
+ status = "D"
+ else:
+ status = "M"
+
+ is_image = self.IsImage(filename)
+
+ # Grab the before/after content if we need it.
+ # We should include file contents if it's text or it's an image.
+ if not is_binary or is_image:
+ # Grab the base content if we don't have it already.
+ if base_content is None and hash_before:
+ base_content = self.GetFileContent(hash_before, is_binary)
+ # Only include the "after" file if it's an image; otherwise it
+ # it is reconstructed from the diff.
+ if is_image and hash_after:
+ new_content = self.GetFileContent(hash_after, is_binary)
+
+ return (base_content, new_content, is_binary, status)
+
+
+class MercurialVCS(VersionControlSystem):
+ """Implementation of the VersionControlSystem interface for Mercurial."""
+
+ def __init__(self, options, repo_dir):
+ super(MercurialVCS, self).__init__(options)
+ # Absolute path to repository (we can be in a subdir)
+ self.repo_dir = os.path.normpath(repo_dir)
+ # Compute the subdir
+ cwd = os.path.normpath(os.getcwd())
+ assert cwd.startswith(self.repo_dir)
+ self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
+ if self.options.revision:
+ self.base_rev = self.options.revision
+ else:
+ self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
+
+ def _GetRelPath(self, filename):
+ """Get relative path of a file according to the current directory,
+ given its logical path in the repo."""
+ assert filename.startswith(self.subdir), (filename, self.subdir)
+ return filename[len(self.subdir):].lstrip(r"\/")
+
+ def GenerateDiff(self, extra_args):
+ cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
+ data = RunShell(cmd, silent_ok=True)
+ svndiff = []
+ filecount = 0
+ for line in data.splitlines():
+ m = re.match("diff --git a/(\S+) b/(\S+)", line)
+ if m:
+ # Modify line to make it look like as it comes from svn diff.
+ # With this modification no changes on the server side are required
+ # to make upload.py work with Mercurial repos.
+ # NOTE: for proper handling of moved/copied files, we have to use
+ # the second filename.
+ filename = m.group(2)
+ svndiff.append("Index: %s" % filename)
+ svndiff.append("=" * 67)
+ filecount += 1
+ logging.info(line)
+ else:
+ svndiff.append(line)
+ if not filecount:
+ ErrorExit("No valid patches found in output from hg diff")
+ return "\n".join(svndiff) + "\n"
+
+ def GetUnknownFiles(self):
+ """Return a list of files unknown to the VCS."""
+ args = []
+ status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
+ silent_ok=True)
+ unknown_files = []
+ for line in status.splitlines():
+ st, fn = line.split(" ", 1)
+ if st == "?":
+ unknown_files.append(fn)
+ return unknown_files
+
+ def GetBaseFile(self, filename):
+ # "hg status" and "hg cat" both take a path relative to the current subdir
+ # rather than to the repo root, but "hg diff" has given us the full path
+ # to the repo root.
+ base_content = ""
+ new_content = None
+ is_binary = False
+ oldrelpath = relpath = self._GetRelPath(filename)
+ # "hg status -C" returns two lines for moved/copied files, one otherwise
+ out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
+ out = out.splitlines()
+ # HACK: strip error message about missing file/directory if it isn't in
+ # the working copy
+ if out[0].startswith('%s: ' % relpath):
+ out = out[1:]
+ status, _ = out[0].split(' ', 1)
+ if len(out) > 1 and status == "A":
+ # Moved/copied => considered as modified, use old filename to
+ # retrieve base contents
+ oldrelpath = out[1].strip()
+ status = "M"
+ if ":" in self.base_rev:
+ base_rev = self.base_rev.split(":", 1)[0]
+ else:
+ base_rev = self.base_rev
+ if status != "A":
+ base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
+ silent_ok=True)
+ is_binary = "\0" in base_content # Mercurial's heuristic
+ if status != "R":
+ new_content = open(relpath, "rb").read()
+ is_binary = is_binary or "\0" in new_content
+ if is_binary and base_content:
+ # Fetch again without converting newlines
+ base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
+ silent_ok=True, universal_newlines=False)
+ if not is_binary or not self.IsImage(relpath):
+ new_content = None
+ return base_content, new_content, is_binary, status
+
+
+# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
+def SplitPatch(data):
+ """Splits a patch into separate pieces for each file.
+
+ Args:
+ data: A string containing the output of svn diff.
+
+ Returns:
+ A list of 2-tuple (filename, text) where text is the svn diff output
+ pertaining to filename.
+ """
+ patches = []
+ filename = None
+ diff = []
+ for line in data.splitlines(True):
+ new_filename = None
+ if line.startswith('Index:'):
+ unused, new_filename = line.split(':', 1)
+ new_filename = new_filename.strip()
+ elif line.startswith('Property changes on:'):
+ unused, temp_filename = line.split(':', 1)
+ # When a file is modified, paths use '/' between directories, however
+ # when a property is modified '\' is used on Windows. Make them the same
+ # otherwise the file shows up twice.
+ temp_filename = temp_filename.strip().replace('\\', '/')
+ if temp_filename != filename:
+ # File has property changes but no modifications, create a new diff.
+ new_filename = temp_filename
+ if new_filename:
+ if filename and diff:
+ patches.append((filename, ''.join(diff)))
+ filename = new_filename
+ diff = [line]
+ continue
+ if diff is not None:
+ diff.append(line)
+ if filename and diff:
+ patches.append((filename, ''.join(diff)))
+ return patches
+
+
+def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
+ """Uploads a separate patch for each file in the diff output.
+
+ Returns a list of [patch_key, filename] for each file.
+ """
+ patches = SplitPatch(data)
+ rv = []
+ for patch in patches:
+ if len(patch[1]) > MAX_UPLOAD_SIZE:
+ print(("Not uploading the patch for " + patch[0] +
+ " because the file is too large."))
+ continue
+ form_fields = [("filename", patch[0])]
+ if not options.download_base:
+ form_fields.append(("content_upload", "1"))
+ files = [("data", "data.diff", patch[1])]
+ ctype, body = EncodeMultipartFormData(form_fields, files)
+ url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
+ print("Uploading patch for " + patch[0])
+ response_body = rpc_server.Send(url, body, content_type=ctype)
+ lines = response_body.splitlines()
+ if not lines or lines[0] != "OK":
+ StatusUpdate(" --> %s" % response_body)
+ sys.exit(1)
+ rv.append([lines[1], patch[0]])
+ return rv
+
+
+def GuessVCSName():
+ """Helper to guess the version control system.
+
+ This examines the current directory, guesses which VersionControlSystem
+ we're using, and returns an string indicating which VCS is detected.
+
+ Returns:
+ A pair (vcs, output). vcs is a string indicating which VCS was detected
+ and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
+ output is a string containing any interesting output from the vcs
+ detection routine, or None if there is nothing interesting.
+ """
+ def RunDetectCommand(vcs_type, command):
+ """Helper to detect VCS by executing command.
+
+ Returns:
+ A pair (vcs, output) or None. Throws exception on error.
+ """
+ try:
+ out, returncode = RunShellWithReturnCode(command)
+ if returncode == 0:
+ return (vcs_type, out.strip())
+ except OSError as xxx_todo_changeme:
+ (errcode, message) = xxx_todo_changeme.args
+ if errcode != errno.ENOENT: # command not found code
+ raise
+
+ # Mercurial has a command to get the base directory of a repository
+ # Try running it, but don't die if we don't have hg installed.
+ # NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
+ res = RunDetectCommand(VCS_MERCURIAL, ["hg", "root"])
+ if res != None:
+ return res
+
+ # Subversion has a .svn in all working directories.
+ if os.path.isdir('.svn'):
+ logging.info("Guessed VCS = Subversion")
+ return (VCS_SUBVERSION, None)
+
+ # Git has a command to test if you're in a git tree.
+ # Try running it, but don't die if we don't have git installed.
+ res = RunDetectCommand(VCS_GIT, ["git", "rev-parse",
+ "--is-inside-work-tree"])
+ if res != None:
+ return res
+
+ return (VCS_UNKNOWN, None)
+
+
+def GuessVCS(options):
+ """Helper to guess the version control system.
+
+ This verifies any user-specified VersionControlSystem (by command line
+ or environment variable). If the user didn't specify one, this examines
+ the current directory, guesses which VersionControlSystem we're using,
+ and returns an instance of the appropriate class. Exit with an error
+ if we can't figure it out.
+
+ Returns:
+ A VersionControlSystem instance. Exits if the VCS can't be guessed.
+ """
+ vcs = options.vcs
+ if not vcs:
+ vcs = os.environ.get("CODEREVIEW_VCS")
+ if vcs:
+ v = VCS_ABBREVIATIONS.get(vcs.lower())
+ if v is None:
+ ErrorExit("Unknown version control system %r specified." % vcs)
+ (vcs, extra_output) = (v, None)
+ else:
+ (vcs, extra_output) = GuessVCSName()
+
+ if vcs == VCS_MERCURIAL:
+ if extra_output is None:
+ extra_output = RunShell(["hg", "root"]).strip()
+ return MercurialVCS(options, extra_output)
+ elif vcs == VCS_SUBVERSION:
+ return SubversionVCS(options)
+ elif vcs == VCS_GIT:
+ return GitVCS(options)
+
+ ErrorExit(("Could not guess version control system. "
+ "Are you in a working copy directory?"))
+
+
+def CheckReviewer(reviewer):
+ """Validate a reviewer -- either a nickname or an email addres.
+
+ Args:
+ reviewer: A nickname or an email address.
+
+ Calls ErrorExit() if it is an invalid email address.
+ """
+ if "@" not in reviewer:
+ return # Assume nickname
+ parts = reviewer.split("@")
+ if len(parts) > 2:
+ ErrorExit("Invalid email address: %r" % reviewer)
+ assert len(parts) == 2
+ if "." not in parts[1]:
+ ErrorExit("Invalid email address: %r" % reviewer)
+
+
+def LoadSubversionAutoProperties():
+ """Returns the content of [auto-props] section of Subversion's config file as
+ a dictionary.
+
+ Returns:
+ A dictionary whose key-value pair corresponds the [auto-props] section's
+ key-value pair.
+ In following cases, returns empty dictionary:
+ - config file doesn't exist, or
+ - 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
+ """
+ if os.name == 'nt':
+ subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
+ else:
+ subversion_config = os.path.expanduser("~/.subversion/config")
+ if not os.path.exists(subversion_config):
+ return {}
+ config = configparser.ConfigParser()
+ config.read(subversion_config)
+ if (config.has_section("miscellany") and
+ config.has_option("miscellany", "enable-auto-props") and
+ config.getboolean("miscellany", "enable-auto-props") and
+ config.has_section("auto-props")):
+ props = {}
+ for file_pattern in config.options("auto-props"):
+ props[file_pattern] = ParseSubversionPropertyValues(
+ config.get("auto-props", file_pattern))
+ return props
+ else:
+ return {}
+
+def ParseSubversionPropertyValues(props):
+ """Parse the given property value which comes from [auto-props] section and
+ returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
+
+ See the following doctest for example.
+
+ >>> ParseSubversionPropertyValues('svn:eol-style=LF')
+ [('svn:eol-style', 'LF')]
+ >>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
+ [('svn:mime-type', 'image/jpeg')]
+ >>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
+ [('svn:eol-style', 'LF'), ('svn:executable', '*')]
+ """
+ key_value_pairs = []
+ for prop in props.split(";"):
+ key_value = prop.split("=")
+ assert len(key_value) <= 2
+ if len(key_value) == 1:
+ # If value is not given, use '*' as a Subversion's convention.
+ key_value_pairs.append((key_value[0], "*"))
+ else:
+ key_value_pairs.append((key_value[0], key_value[1]))
+ return key_value_pairs
+
+
+def GetSubversionPropertyChanges(filename):
+ """Return a Subversion's 'Property changes on ...' string, which is used in
+ the patch file.
+
+ Args:
+ filename: filename whose property might be set by [auto-props] config.
+
+ Returns:
+ A string like 'Property changes on |filename| ...' if given |filename|
+ matches any entries in [auto-props] section. None, otherwise.
+ """
+ global svn_auto_props_map
+ if svn_auto_props_map is None:
+ svn_auto_props_map = LoadSubversionAutoProperties()
+
+ all_props = []
+ for file_pattern, props in list(svn_auto_props_map.items()):
+ if fnmatch.fnmatch(filename, file_pattern):
+ all_props.extend(props)
+ if all_props:
+ return FormatSubversionPropertyChanges(filename, all_props)
+ return None
+
+
+def FormatSubversionPropertyChanges(filename, props):
+ """Returns Subversion's 'Property changes on ...' strings using given filename
+ and properties.
+
+ Args:
+ filename: filename
+ props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
+
+ Returns:
+ A string which can be used in the patch file for Subversion.
+
+ See the following doctest for example.
+
+ >>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
+ Property changes on: foo.cc
+ ___________________________________________________________________
+ Added: svn:eol-style
+ + LF
+ <BLANKLINE>
+ """
+ prop_changes_lines = [
+ "Property changes on: %s" % filename,
+ "___________________________________________________________________"]
+ for key, value in props:
+ prop_changes_lines.append("Added: " + key)
+ prop_changes_lines.append(" + " + value)
+ return "\n".join(prop_changes_lines) + "\n"
+
+
+def RealMain(argv, data=None):
+ """The real main function.
+
+ Args:
+ argv: Command line arguments.
+ data: Diff contents. If None (default) the diff is generated by
+ the VersionControlSystem implementation returned by GuessVCS().
+
+ Returns:
+ A 2-tuple (issue id, patchset id).
+ The patchset id is None if the base files are not uploaded by this
+ script (applies only to SVN checkouts).
+ """
+ options, args = parser.parse_args(argv[1:])
+ global verbosity
+ verbosity = options.verbose
+ if verbosity >= 3:
+ logging.getLogger().setLevel(logging.DEBUG)
+ elif verbosity >= 2:
+ logging.getLogger().setLevel(logging.INFO)
+
+ vcs = GuessVCS(options)
+
+ base = options.base_url
+ if isinstance(vcs, SubversionVCS):
+ # Guessing the base field is only supported for Subversion.
+ # Note: Fetching base files may become deprecated in future releases.
+ guessed_base = vcs.GuessBase(options.download_base)
+ if base:
+ if guessed_base and base != guessed_base:
+ print("Using base URL \"%s\" from --base_url instead of \"%s\"" % \
+ (base, guessed_base))
+ else:
+ base = guessed_base
+
+ if not base and options.download_base:
+ options.download_base = True
+ logging.info("Enabled upload of base file")
+ if not options.assume_yes:
+ vcs.CheckForUnknownFiles()
+ if data is None:
+ data = vcs.GenerateDiff(args)
+ data = vcs.PostProcessDiff(data)
+ files = vcs.GetBaseFiles(data)
+ if verbosity >= 1:
+ print("Upload server:", options.server, "(change with -s/--server)")
+ if options.issue:
+ prompt = "Message describing this patch set: "
+ else:
+ prompt = "New issue subject: "
+ message = options.message or input(prompt).strip()
+ if not message:
+ ErrorExit("A non-empty message is required")
+ rpc_server = GetRpcServer(options.server,
+ options.email,
+ options.host,
+ options.save_cookies,
+ options.account_type)
+ form_fields = [("subject", message)]
+ if base:
+ b = urllib.parse.urlparse(base)
+ username, netloc = urllib.parse.splituser(b.netloc)
+ if username:
+ logging.info("Removed username from base URL")
+ base = urllib.parse.urlunparse((b.scheme, netloc, b.path, b.params,
+ b.query, b.fragment))
+ form_fields.append(("base", base))
+ if options.issue:
+ form_fields.append(("issue", str(options.issue)))
+ if options.email:
+ form_fields.append(("user", options.email))
+ if options.reviewers:
+ for reviewer in options.reviewers.split(','):
+ CheckReviewer(reviewer)
+ form_fields.append(("reviewers", options.reviewers))
+ if options.cc:
+ for cc in options.cc.split(','):
+ CheckReviewer(cc)
+ form_fields.append(("cc", options.cc))
+ description = options.description
+ if options.description_file:
+ if options.description:
+ ErrorExit("Can't specify description and description_file")
+ file = open(options.description_file, 'r')
+ description = file.read()
+ file.close()
+ if description:
+ form_fields.append(("description", description))
+ # Send a hash of all the base file so the server can determine if a copy
+ # already exists in an earlier patchset.
+ base_hashes = ""
+ for file, info in files.items():
+ if not info[0] is None:
+ checksum = md5(info[0]).hexdigest()
+ if base_hashes:
+ base_hashes += "|"
+ base_hashes += checksum + ":" + file
+ form_fields.append(("base_hashes", base_hashes))
+ if options.private:
+ if options.issue:
+ print("Warning: Private flag ignored when updating an existing issue.")
+ else:
+ form_fields.append(("private", "1"))
+ # If we're uploading base files, don't send the email before the uploads, so
+ # that it contains the file status.
+ if options.send_mail and options.download_base:
+ form_fields.append(("send_mail", "1"))
+ if not options.download_base:
+ form_fields.append(("content_upload", "1"))
+ if len(data) > MAX_UPLOAD_SIZE:
+ print("Patch is large, so uploading file patches separately.")
+ uploaded_diff_file = []
+ form_fields.append(("separate_patches", "1"))
+ else:
+ uploaded_diff_file = [("data", "data.diff", data)]
+ ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
+ response_body = rpc_server.Send("/upload", body, content_type=ctype)
+ patchset = None
+ if not options.download_base or not uploaded_diff_file:
+ lines = response_body.splitlines()
+ if len(lines) >= 2:
+ msg = lines[0]
+ patchset = lines[1].strip()
+ patches = [x.split(" ", 1) for x in lines[2:]]
+ else:
+ msg = response_body
+ else:
+ msg = response_body
+ StatusUpdate(msg)
+ if not response_body.startswith("Issue created.") and \
+ not response_body.startswith("Issue updated."):
+ sys.exit(0)
+ issue = msg[msg.rfind("/")+1:]
+
+ if not uploaded_diff_file:
+ result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
+ if not options.download_base:
+ patches = result
+
+ if not options.download_base:
+ vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
+ if options.send_mail:
+ rpc_server.Send("/" + issue + "/mail", payload="")
+ return issue, patchset
+
+
+def main():
+ try:
+ logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
+ "%(lineno)s %(message)s "))
+ os.environ['LC_ALL'] = 'C'
+ RealMain(sys.argv)
+ except KeyboardInterrupt:
+ print()
+ StatusUpdate("Interrupted.")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/runtest.py b/runtest.py
index 6beb4ba7..65d7851b 100644..100755
--- a/runtest.py
+++ b/runtest.py
@@ -92,10 +92,10 @@ import time
try:
import threading
- import Queue # 2to3: rename to queue
+ import queue # 2to3: rename to queue
threading_ok = True
except ImportError:
- print "Can't import threading or queue"
+ print("Can't import threading or queue")
threading_ok = False
cwd = os.getcwd()
@@ -187,12 +187,12 @@ class PassThroughOptionParser(OptionParser):
def _process_long_opt(self, rargs, values):
try:
OptionParser._process_long_opt(self, rargs, values)
- except BadOptionError, err:
+ except BadOptionError as err:
self.largs.append(err.opt_str)
def _process_short_opts(self, rargs, values):
try:
OptionParser._process_short_opts(self, rargs, values)
- except BadOptionError, err:
+ except BadOptionError as err:
self.largs.append(err.opt_str)
parser = PassThroughOptionParser(add_help_option=False)
@@ -240,7 +240,7 @@ for o, a in opts:
a = os.path.join(cwd, a)
testlistfile = a
elif o in ['-h', '--help']:
- print helpstr
+ print(helpstr)
sys.exit(0)
elif o in ['-j', '--jobs']:
jobs = int(a)
@@ -343,7 +343,7 @@ else:
st = os.stat(f)
except OSError:
continue
- if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
+ if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
return f
return None
@@ -590,7 +590,7 @@ else:
base = os.path.join(base, os.path.split(url)[1])
if printcommand:
- print command
+ print(command)
if execute_tests:
os.system(command)
else:
@@ -843,9 +843,9 @@ def run_test(t, io_lock, async=True):
if suppress_stdout or suppress_stderr:
sys.stdout.write(header)
if not suppress_stdout and t.stdout:
- print t.stdout
+ print(t.stdout)
if not suppress_stderr and t.stderr:
- print t.stderr
+ print(t.stderr)
print_time_func("Test execution time: %.1f seconds\n", t.test_time)
if io_lock:
io_lock.release()
@@ -863,9 +863,9 @@ class RunTest(threading.Thread):
self.queue.task_done()
if jobs > 1 and threading_ok:
- print "Running tests using %d jobs"%jobs
+ print("Running tests using %d jobs"%jobs)
# Start worker threads
- queue = Queue.Queue()
+ queue = queue.Queue()
io_lock = threading.Lock()
for i in range(1, jobs):
t = RunTest(queue, io_lock)
@@ -878,7 +878,7 @@ if jobs > 1 and threading_ok:
else:
# Run tests serially
if jobs > 1:
- print "Ignoring -j%d option; no python threading module available."%jobs
+ print("Ignoring -j%d option; no python threading module available."%jobs)
for t in tests:
run_test(t, None, False)
diff --git a/src/engine/SCons/Action.py b/src/engine/SCons/Action.py
index c1eef756..e8574cb8 100644
--- a/src/engine/SCons/Action.py
+++ b/src/engine/SCons/Action.py
@@ -114,6 +114,7 @@ import SCons.Errors
import SCons.Executor
import SCons.Util
import SCons.Subst
+import collections
# we use these a lot, so try to optimize them
is_String = SCons.Util.is_String
@@ -165,12 +166,12 @@ def _callable_contents(obj):
"""
try:
# Test if obj is a method.
- return _function_contents(obj.im_func)
+ return _function_contents(obj.__func__)
except AttributeError:
try:
# Test if obj is a callable object.
- return _function_contents(obj.__call__.im_func)
+ return _function_contents(obj.__call__.__func__)
except AttributeError:
try:
@@ -190,12 +191,12 @@ def _object_contents(obj):
"""
try:
# Test if obj is a method.
- return _function_contents(obj.im_func)
+ return _function_contents(obj.__func__)
except AttributeError:
try:
# Test if obj is a callable object.
- return _function_contents(obj.__call__.im_func)
+ return _function_contents(obj.__call__.__func__)
except AttributeError:
try:
@@ -269,17 +270,17 @@ def _code_contents(code):
def _function_contents(func):
"""Return the signature contents of a function."""
- contents = [_code_contents(func.func_code)]
+ contents = [_code_contents(func.__code__)]
# The function contents depends on the value of defaults arguments
- if func.func_defaults:
- contents.append(',(' + ','.join(map(_object_contents,func.func_defaults)) + ')')
+ if func.__defaults__:
+ contents.append(',(' + ','.join(map(_object_contents,func.__defaults__)) + ')')
else:
contents.append(',()')
# The function contents depends on the closure captured cell values.
try:
- closure = func.func_closure or []
+ closure = func.__closure__ or []
except AttributeError:
# Older versions of Python do not support closures.
closure = []
@@ -328,7 +329,7 @@ def _do_create_keywords(args, kw):
cmdstrfunc = args[0]
if cmdstrfunc is None or is_String(cmdstrfunc):
kw['cmdstr'] = cmdstrfunc
- elif callable(cmdstrfunc):
+ elif isinstance(cmdstrfunc, collections.Callable):
kw['strfunction'] = cmdstrfunc
else:
raise SCons.Errors.UserError(
@@ -359,7 +360,7 @@ def _do_create_action(act, kw):
if is_List(act):
return CommandAction(act, **kw)
- if callable(act):
+ if isinstance(act, collections.Callable):
try:
gen = kw['generator']
del kw['generator']
@@ -492,7 +493,7 @@ class _ActionAction(ActionBase):
self.targets = targets
if batch_key:
- if not callable(batch_key):
+ if not isinstance(batch_key, collections.Callable):
# They have set batch_key, but not to their own
# callable. The default behavior here will batch
# *all* targets+sources using this action, separated
@@ -512,7 +513,7 @@ class _ActionAction(ActionBase):
# This code assumes s is a regular string, but should
# work if it's unicode too.
try:
- sys.stdout.write(unicode(s + "\n"))
+ sys.stdout.write(str(s + "\n"))
except UnicodeDecodeError:
sys.stdout.write(s + "\n")
@@ -553,7 +554,7 @@ class _ActionAction(ActionBase):
source = executor.get_all_sources()
t = ' and '.join(map(str, target))
l = '\n '.join(self.presub_lines(env))
- out = u"Building %s with action:\n %s\n" % (t, l)
+ out = "Building %s with action:\n %s\n" % (t, l)
sys.stdout.write(out)
cmd = None
if show and self.strfunction:
@@ -653,7 +654,7 @@ def _subproc(scons_env, cmd, error = 'ignore', **kw):
# Ensure that the ENV values are all strings:
new_env = {}
- for key, value in ENV.items():
+ for key, value in list(ENV.items()):
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
@@ -672,7 +673,7 @@ def _subproc(scons_env, cmd, error = 'ignore', **kw):
try:
return subprocess.Popen(cmd, **kw)
- except EnvironmentError, e:
+ except EnvironmentError as e:
if error == 'raise': raise
# return a dummy Popen instance that only returns error
class dummyPopen(object):
@@ -779,7 +780,7 @@ class CommandAction(_ActionAction):
ENV = get_default_ENV(env)
# Ensure that the ENV values are all strings:
- for key, value in ENV.items():
+ for key, value in list(ENV.items()):
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
@@ -1038,7 +1039,7 @@ class FunctionAction(_ActionAction):
else:
if strfunc is None:
return None
- if callable(strfunc):
+ if isinstance(strfunc, collections.Callable):
return strfunc(target, source, env)
name = self.function_name()
tstr = array(target)
@@ -1060,11 +1061,11 @@ class FunctionAction(_ActionAction):
rsources = list(map(rfile, source))
try:
result = self.execfunction(target=target, source=rsources, env=env)
- except KeyboardInterrupt, e:
+ except KeyboardInterrupt as e:
raise
- except SystemExit, e:
+ except SystemExit as e:
raise
- except Exception, e:
+ except Exception as e:
result = e
exc_info = sys.exc_info()
@@ -1179,11 +1180,11 @@ class ActionCaller(object):
actfunc = self.parent.actfunc
try:
# "self.actfunc" is a function.
- contents = str(actfunc.func_code.co_code)
+ contents = str(actfunc.__code__.co_code)
except AttributeError:
# "self.actfunc" is a callable object.
try:
- contents = str(actfunc.__call__.im_func.func_code.co_code)
+ contents = str(actfunc.__call__.__func__.__code__.co_code)
except AttributeError:
# No __call__() method, so it might be a builtin
# or something like that. Do the best we can.
@@ -1214,7 +1215,7 @@ class ActionCaller(object):
def subst_kw(self, target, source, env):
kw = {}
- for key in self.kw.keys():
+ for key in list(self.kw.keys()):
kw[key] = self.subst(self.kw[key], target, source, env)
return kw
diff --git a/src/engine/SCons/ActionTests.py b/src/engine/SCons/ActionTests.py
index 13c3b6c8..6edf373b 100644
--- a/src/engine/SCons/ActionTests.py
+++ b/src/engine/SCons/ActionTests.py
@@ -133,7 +133,7 @@ class Environment(object):
self.d['SPAWN'] = scons_env['SPAWN']
self.d['PSPAWN'] = scons_env['PSPAWN']
self.d['ESCAPE'] = scons_env['ESCAPE']
- for k, v in kw.items():
+ for k, v in list(kw.items()):
self.d[k] = v
# Just use the underlying scons_subst*() utility methods.
def subst(self, strSubst, raw=0, target=[], source=[], conv=None):
@@ -158,12 +158,12 @@ class Environment(object):
def Clone(self, **kw):
res = Environment()
res.d = SCons.Util.semi_deepcopy(self.d)
- for k, v in kw.items():
+ for k, v in list(kw.items()):
res.d[k] = v
return res
def sig_dict(self):
d = {}
- for k,v in self.items(): d[k] = v
+ for k,v in list(self.items()): d[k] = v
d['TARGETS'] = ['__t1__', '__t2__', '__t3__', '__t4__', '__t5__', '__t6__']
d['TARGET'] = d['TARGETS'][0]
d['SOURCES'] = ['__s1__', '__s2__', '__s3__', '__s4__', '__s5__', '__s6__']
@@ -270,7 +270,7 @@ def test_positional_args(pos_callback, cmd, **kw):
try:
#FUTURE a = SCons.Action.Action(cmd, [], **kw)
a = SCons.Action.Action(cmd, [], **kw)
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
s = str(e)
m = 'Invalid command display variable'
assert s.find(m) != -1, 'Unexpected string: %s' % s
@@ -305,7 +305,7 @@ class ActionTestCase(unittest.TestCase):
# a singleton list returns the contained action
test_positional_args(cmd_action, ["string"])
- try: unicode
+ try: str
except NameError: pass
else:
a2 = eval("SCons.Action.Action(u'string')")
@@ -493,7 +493,7 @@ class _ActionActionTestCase(unittest.TestCase):
def func(): pass
try:
a = SCons.Action.Action('foo', cmdstr='string', strfunction=func)
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
s = str(e)
m = 'Cannot have both strfunction and cmdstr args to Action()'
assert s.find(m) != -1, 'Unexpected string: %s' % s
diff --git a/src/engine/SCons/Builder.py b/src/engine/SCons/Builder.py
index 6dc9e17e..6abcbcfd 100644
--- a/src/engine/SCons/Builder.py
+++ b/src/engine/SCons/Builder.py
@@ -167,7 +167,7 @@ class DictCmdGenerator(SCons.Util.Selector):
try:
ret = SCons.Util.Selector.__call__(self, env, source, ext)
- except KeyError, e:
+ except KeyError as e:
raise UserError("Ambiguous suffixes after environment substitution: %s == %s == %s" % (e.args[0], e.args[1], e.args[2]))
if ret is None:
raise UserError("While building `%s' from `%s': Don't know how to build from a source file with suffix `%s'. Expected a suffix in this list: %s." % \
@@ -179,7 +179,7 @@ class CallableSelector(SCons.Util.Selector):
finds if it can."""
def __call__(self, env, source):
value = SCons.Util.Selector.__call__(self, env, source)
- if callable(value):
+ if isinstance(value, collections.Callable):
value = value(env, source)
return value
@@ -230,7 +230,7 @@ class OverrideWarner(collections.UserDict):
def warn(self):
if self.already_warned:
return
- for k in self.keys():
+ for k in list(self.keys()):
if k in misleading_keywords:
alt = misleading_keywords[k]
msg = "Did you mean to use `%s' instead of `%s'?" % (alt, k)
@@ -336,7 +336,7 @@ class EmitterProxy(object):
# in strings. Maybe we should change that?
while SCons.Util.is_String(emitter) and emitter in env:
emitter = env[emitter]
- if callable(emitter):
+ if isinstance(emitter, collections.Callable):
target, source = emitter(target, source, env)
elif SCons.Util.is_List(emitter):
for e in emitter:
@@ -426,7 +426,7 @@ class BuilderBase(object):
src_builder = [ src_builder ]
self.src_builder = src_builder
- def __nonzero__(self):
+ def __bool__(self):
raise InternalError("Do not test for the Node.builder attribute directly; use Node.has_builder() instead")
def get_name(self, env):
@@ -638,18 +638,18 @@ class BuilderBase(object):
def get_prefix(self, env, sources=[]):
prefix = self.prefix
- if callable(prefix):
+ if isinstance(prefix, collections.Callable):
prefix = prefix(env, sources)
return env.subst(prefix)
def set_suffix(self, suffix):
- if not callable(suffix):
+ if not isinstance(suffix, collections.Callable):
suffix = self.adjust_suffix(suffix)
self.suffix = suffix
def get_suffix(self, env, sources=[]):
suffix = self.suffix
- if callable(suffix):
+ if isinstance(suffix, collections.Callable):
suffix = suffix(env, sources)
return env.subst(suffix)
@@ -658,7 +658,7 @@ class BuilderBase(object):
src_suffix = []
elif not SCons.Util.is_List(src_suffix):
src_suffix = [ src_suffix ]
- self.src_suffix = [callable(suf) and suf or self.adjust_suffix(suf) for suf in src_suffix]
+ self.src_suffix = [isinstance(suf, collections.Callable) and suf or self.adjust_suffix(suf) for suf in src_suffix]
def get_src_suffix(self, env):
"""Get the first src_suffix in the list of src_suffixes."""
@@ -868,7 +868,7 @@ def is_a_Builder(obj):
"""
return (isinstance(obj, BuilderBase)
or isinstance(obj, CompositeBuilder)
- or callable(obj))
+ or isinstance(obj, collections.Callable))
# Local Variables:
# tab-width:4
diff --git a/src/engine/SCons/BuilderTests.py b/src/engine/SCons/BuilderTests.py
index 766b8fe1..da03a3c9 100644
--- a/src/engine/SCons/BuilderTests.py
+++ b/src/engine/SCons/BuilderTests.py
@@ -77,7 +77,7 @@ class Environment(object):
self.d['SHELL'] = scons_env['SHELL']
self.d['SPAWN'] = scons_env['SPAWN']
self.d['ESCAPE'] = scons_env['ESCAPE']
- for k, v in kw.items():
+ for k, v in list(kw.items()):
self.d[k] = v
global env_arg2nodes_called
env_arg2nodes_called = None
@@ -138,7 +138,7 @@ class Environment(object):
return list(self.d.items())
def sig_dict(self):
d = {}
- for k,v in self.items(): d[k] = v
+ for k,v in list(self.items()): d[k] = v
d['TARGETS'] = ['__t1__', '__t2__', '__t3__', '__t4__', '__t5__', '__t6__']
d['TARGET'] = d['TARGETS'][0]
d['SOURCES'] = ['__s1__', '__s2__', '__s3__', '__s4__', '__s5__', '__s6__']
@@ -305,11 +305,11 @@ class BuilderTestCase(unittest.TestCase):
#be = target.get_build_env()
#assert be['VAR'] == 'foo', be['VAR']
- try: unicode
+ try: str
except NameError:
uni = str
else:
- uni = unicode
+ uni = str
target = builder(env, target = uni('n12 n13'),
source = [uni('n14 n15')])[0]
@@ -325,7 +325,7 @@ class BuilderTestCase(unittest.TestCase):
flag = 0
try:
target = builder(env, None, source=n20)
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown if a source node can't create a target."
@@ -341,7 +341,7 @@ class BuilderTestCase(unittest.TestCase):
suffix = '.s')
try:
builder(env, target = 'n22', source = 'n22')
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
pass
else:
raise Exception("Did not catch expected UserError.")
@@ -1497,7 +1497,7 @@ class CompositeBuilderTestCase(unittest.TestCase):
flag = 0
try:
builder(env, target='test3', source=['test2.bar', 'test1.foo'])[0]
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown when we call a builder with files of different suffixes."
expect = "While building `['test3']' from `test1.foo': Cannot build multiple sources with different extensions: .bar, .foo"
@@ -1528,8 +1528,8 @@ class CompositeBuilderTestCase(unittest.TestCase):
try:
tgt.build()
flag = 1
- except SCons.Errors.UserError, e:
- print e
+ except SCons.Errors.UserError as e:
+ print(e)
flag = 0
assert flag, "It should be possible to define actions in composite builders using variables."
env['FOO_SUFFIX'] = '.BAR2'
@@ -1581,7 +1581,7 @@ class CompositeBuilderTestCase(unittest.TestCase):
flag = 0
try:
builder(env, target='t5', source=['test5a.foo', 'test5b.inb'])[0]
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown when we call a builder with files of different suffixes."
expect = "While building `['t5']' from `test5b.bar': Cannot build multiple sources with different extensions: .foo, .bar"
@@ -1590,7 +1590,7 @@ class CompositeBuilderTestCase(unittest.TestCase):
flag = 0
try:
builder(env, target='t6', source=['test6a.bar', 'test6b.ina'])[0]
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown when we call a builder with files of different suffixes."
expect = "While building `['t6']' from `test6b.foo': Cannot build multiple sources with different extensions: .bar, .foo"
@@ -1599,7 +1599,7 @@ class CompositeBuilderTestCase(unittest.TestCase):
flag = 0
try:
builder(env, target='t4', source=['test4a.ina', 'test4b.inb'])[0]
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown when we call a builder with files of different suffixes."
expect = "While building `['t4']' from `test4b.bar': Cannot build multiple sources with different extensions: .foo, .bar"
@@ -1608,7 +1608,7 @@ class CompositeBuilderTestCase(unittest.TestCase):
flag = 0
try:
builder(env, target='t7', source=[env.fs.File('test7')])[0]
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown when we call a builder with files of different suffixes."
expect = "While building `['t7']': Cannot deduce file extension from source files: ['test7']"
@@ -1617,7 +1617,7 @@ class CompositeBuilderTestCase(unittest.TestCase):
flag = 0
try:
builder(env, target='t8', source=['test8.unknown'])[0]
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
flag = 1
assert flag, "UserError should be thrown when we call a builder target with an unknown suffix."
expect = "While building `['t8']' from `['test8.unknown']': Don't know how to build from a source file with suffix `.unknown'. Expected a suffix in this list: ['.foo', '.bar']."
diff --git a/src/engine/SCons/CacheDirTests.py b/src/engine/SCons/CacheDirTests.py
index 21b435a8..269040db 100644
--- a/src/engine/SCons/CacheDirTests.py
+++ b/src/engine/SCons/CacheDirTests.py
@@ -241,7 +241,7 @@ class FileTestCase(BaseTestCase):
warn_caught = 0
try:
f7.push_to_cache()
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
assert e.exc_info[0] == SCons.Warnings.CacheWriteErrorWarning
warn_caught = 1
assert warn_caught
diff --git a/src/engine/SCons/Debug.py b/src/engine/SCons/Debug.py
index 1c0c6385..363c8b7c 100644
--- a/src/engine/SCons/Debug.py
+++ b/src/engine/SCons/Debug.py
@@ -73,7 +73,7 @@ def dumpLoggedInstances(classes, file=sys.stdout):
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
- for key, value in obj.__dict__.items():
+ for key, value in list(obj.__dict__.items()):
file.write(' %20s : %s\n' % (key, value))
@@ -143,7 +143,7 @@ def caller_trace(back=0):
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
- for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
+ for v,c in sorted([(-v,c) for c,v in list(caller_dicts[key].items())]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
diff --git a/src/engine/SCons/Defaults.py b/src/engine/SCons/Defaults.py
index a99bcc78..fe1f87b3 100644
--- a/src/engine/SCons/Defaults.py
+++ b/src/engine/SCons/Defaults.py
@@ -31,7 +31,7 @@ from distutils.msvccompiler.
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -50,6 +50,7 @@ import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
+import collections
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
@@ -221,7 +222,7 @@ def mkdir_func(dest):
for entry in dest:
try:
os.makedirs(str(entry))
- except os.error, e:
+ except os.error as e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
@@ -325,9 +326,9 @@ def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
if not itms:
return itms
- if not callable(c):
+ if not isinstance(c, collections.Callable):
env_c = env['_concat']
- if env_c != _concat and callable(env_c):
+ if env_c != _concat and isinstance(env_c, collections.Callable):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
@@ -381,7 +382,7 @@ def processDefines(defs):
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
- for macro,value in d.iteritems():
+ for macro,value in d.items():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
diff --git a/src/engine/SCons/DefaultsTests.py b/src/engine/SCons/DefaultsTests.py
index fd10c129..a36ca7ca 100644
--- a/src/engine/SCons/DefaultsTests.py
+++ b/src/engine/SCons/DefaultsTests.py
@@ -69,7 +69,7 @@ class DefaultsTestCase(unittest.TestCase):
test.write(file, "test\n")
try:
mkdir_func(file)
- except os.error, e:
+ except os.error as e:
pass
else:
fail("expected os.error")
diff --git a/src/engine/SCons/Environment.py b/src/engine/SCons/Environment.py
index 55a82065..88b05538 100644
--- a/src/engine/SCons/Environment.py
+++ b/src/engine/SCons/Environment.py
@@ -58,6 +58,7 @@ import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Warnings
+import collections
class _Null(object):
pass
@@ -127,7 +128,7 @@ future_reserved_construction_var_names = [
def copy_non_reserved_keywords(dict):
result = semi_deepcopy(dict)
- for k in result.keys():
+ for k in list(result.keys()):
if k in reserved_construction_var_names:
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % k)
@@ -146,12 +147,12 @@ def _set_future_reserved(env, key, value):
def _set_BUILDERS(env, key, value):
try:
bd = env._dict[key]
- for k in bd.keys():
+ for k in list(bd.keys()):
del bd[k]
except KeyError:
bd = BuilderDict(kwbd, env)
env._dict[key] = bd
- for k, v in value.items():
+ for k, v in list(value.items()):
if not SCons.Builder.is_a_Builder(v):
raise SCons.Errors.UserError('%s is not a Builder.' % repr(v))
bd.update(value)
@@ -323,7 +324,7 @@ class BuilderDict(UserDict):
delattr(self.env, item)
def update(self, dict):
- for i, v in dict.items():
+ for i, v in list(dict.items()):
self.__setitem__(i, v)
@@ -517,7 +518,7 @@ class SubstitutionEnvironment(object):
def subst_kw(self, kw, raw=0, target=None, source=None):
nkw = {}
- for k, v in kw.items():
+ for k, v in list(kw.items()):
k = self.subst(k, raw, target, source)
if SCons.Util.is_String(v):
v = self.subst(v, raw, target, source)
@@ -591,7 +592,7 @@ class SubstitutionEnvironment(object):
out,err = p.communicate()
status = p.wait()
if err:
- sys.stderr.write(unicode(err))
+ sys.stderr.write(str(err))
if status:
raise OSError("'%s' exited %d" % (command, status))
return out
@@ -629,7 +630,7 @@ class SubstitutionEnvironment(object):
if not o: return self
overrides = {}
merges = None
- for key, value in o.items():
+ for key, value in list(o.items()):
if key == 'parse_flags':
merges = value
else:
@@ -814,7 +815,7 @@ class SubstitutionEnvironment(object):
if not unique:
self.Append(**args)
return self
- for key, value in args.items():
+ for key, value in list(args.items()):
if not value:
continue
try:
@@ -1004,7 +1005,7 @@ class Base(SubstitutionEnvironment):
# Now restore the passed-in and customized variables
# to the environment, since the values the user set explicitly
# should override any values set by the tools.
- for key, val in save.items():
+ for key, val in list(save.items()):
self._dict[key] = val
# Finally, apply any flags to be merged in
@@ -1152,7 +1153,7 @@ class Base(SubstitutionEnvironment):
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
- for key, val in kw.items():
+ for key, val in list(kw.items()):
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
@@ -1205,7 +1206,7 @@ class Base(SubstitutionEnvironment):
# based on what we think the value looks like.
if SCons.Util.is_List(val):
if key == 'CPPDEFINES':
- orig = orig.items()
+ orig = list(orig.items())
orig += val
self._dict[key] = orig
else:
@@ -1216,7 +1217,7 @@ class Base(SubstitutionEnvironment):
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
- for k, v in val.items():
+ for k, v in list(val.items()):
orig[k] = v
else:
orig[val] = None
@@ -1262,7 +1263,7 @@ class Base(SubstitutionEnvironment):
values move to end.
"""
kw = copy_non_reserved_keywords(kw)
- for key, val in kw.items():
+ for key, val in list(kw.items()):
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
@@ -1286,7 +1287,7 @@ class Base(SubstitutionEnvironment):
tmp.append((i,))
val = tmp
if SCons.Util.is_Dict(dk):
- dk = dk.items()
+ dk = list(dk.items())
elif SCons.Util.is_String(dk):
dk = [(dk,)]
else:
@@ -1327,11 +1328,11 @@ class Base(SubstitutionEnvironment):
tmp.append((i,))
dk = tmp
if SCons.Util.is_Dict(val):
- val = val.items()
+ val = list(val.items())
elif SCons.Util.is_String(val):
val = [(val,)]
if delete_existing:
- dk = filter(lambda x, val=val: x not in val, dk)
+ dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + val
else:
dk = [x for x in dk if x not in val]
@@ -1340,7 +1341,7 @@ class Base(SubstitutionEnvironment):
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
- dk = filter(lambda x, val=val: x not in val, dk)
+ dk = list(filter(lambda x, val=val: x not in val, dk))
self._dict[key] = dk + [val]
else:
if not val in dk:
@@ -1350,7 +1351,7 @@ class Base(SubstitutionEnvironment):
if SCons.Util.is_String(dk):
dk = [dk]
elif SCons.Util.is_Dict(dk):
- dk = dk.items()
+ dk = list(dk.items())
if SCons.Util.is_String(val):
if val in dk:
val = []
@@ -1358,7 +1359,7 @@ class Base(SubstitutionEnvironment):
val = [val]
elif SCons.Util.is_Dict(val):
tmp = []
- for i,j in val.iteritems():
+ for i,j in val.items():
if j is not None:
tmp.append((i,j))
else:
@@ -1402,7 +1403,7 @@ class Base(SubstitutionEnvironment):
# so the tools can use the new variables
kw = copy_non_reserved_keywords(kw)
new = {}
- for key, value in kw.items():
+ for key, value in list(kw.items()):
new[key] = SCons.Subst.scons_subst_once(value, self, key)
clone.Replace(**new)
@@ -1469,7 +1470,7 @@ class Base(SubstitutionEnvironment):
copy_function = self._copy_from_cache
elif function == 'timestamp-match':
function = self._changed_timestamp_match
- elif not callable(function):
+ elif not isinstance(function, collections.Callable):
raise UserError("Unknown Decider value %s" % repr(function))
# We don't use AddMethod because we don't want to turn the
@@ -1602,7 +1603,7 @@ class Base(SubstitutionEnvironment):
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
- for key, val in kw.items():
+ for key, val in list(kw.items()):
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
@@ -1656,7 +1657,7 @@ class Base(SubstitutionEnvironment):
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
- for k, v in val.items():
+ for k, v in list(val.items()):
orig[k] = v
else:
orig[val] = None
@@ -1693,7 +1694,7 @@ class Base(SubstitutionEnvironment):
values move to front.
"""
kw = copy_non_reserved_keywords(kw)
- for key, val in kw.items():
+ for key, val in list(kw.items()):
if SCons.Util.is_List(val):
val = _delete_duplicates(val, not delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
@@ -1768,7 +1769,7 @@ class Base(SubstitutionEnvironment):
return os.path.join(dir, new_prefix+name+new_suffix)
def SetDefault(self, **kw):
- for k in kw.keys():
+ for k in list(kw.keys()):
if k in self._dict:
del kw[k]
self.Replace(**kw)
@@ -1830,7 +1831,7 @@ class Base(SubstitutionEnvironment):
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
- for executor in uniq.keys():
+ for executor in list(uniq.keys()):
executor.add_pre_action(action)
return nodes
@@ -1840,7 +1841,7 @@ class Base(SubstitutionEnvironment):
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
- for executor in uniq.keys():
+ for executor in list(uniq.keys()):
executor.add_post_action(action)
return nodes
@@ -2235,7 +2236,7 @@ class Base(SubstitutionEnvironment):
while (node != node.srcnode()):
node = node.srcnode()
return node
- sources = map( final_source, sources );
+ sources = list(map( final_source, sources ));
# remove duplicates
return list(set(sources))
@@ -2378,7 +2379,7 @@ def NoSubstitutionProxy(subject):
def __setattr__(self, name, value):
return setattr(self.__dict__['__subject'], name, value)
def executor_to_lvars(self, kwdict):
- if kwdict.has_key('executor'):
+ if 'executor' in kwdict:
kwdict['lvars'] = kwdict['executor'].get_lvars()
del kwdict['executor']
else:
diff --git a/src/engine/SCons/EnvironmentTests.py b/src/engine/SCons/EnvironmentTests.py
index 45cf8765..3af879a6 100644
--- a/src/engine/SCons/EnvironmentTests.py
+++ b/src/engine/SCons/EnvironmentTests.py
@@ -160,7 +160,7 @@ class TestEnvironmentFixture(object):
default_keys = { 'CC' : 'cc',
'CCFLAGS' : '-DNDEBUG',
'ENV' : { 'TMP' : '/tmp' } }
- for key, value in default_keys.items():
+ for key, value in list(default_keys.items()):
if key not in kw:
kw[key] = value
if 'BUILDERS' not in kw:
@@ -263,7 +263,7 @@ class SubstitutionTestCase(unittest.TestCase):
assert isinstance(nodes[0], X)
assert nodes[0].name == "Util.py UtilTests.py"
- try: unicode
+ try: str
except NameError: pass
else:
code = """if 1:
@@ -272,7 +272,7 @@ class SubstitutionTestCase(unittest.TestCase):
assert isinstance(nodes[0], X)
assert nodes[0].name == u"Util.py UtilTests.py"
\n"""
- exec code in globals(), locals()
+ exec(code, globals(), locals())
nodes = env.arg2nodes(["Util.py", "UtilTests.py"], Factory)
assert len(nodes) == 2, nodes
@@ -655,7 +655,7 @@ sys.exit(0)
cmd = '%s %s' % (python, test.workpath('fail.py'))
try:
env.backtick(cmd)
- except OSError, e:
+ except OSError as e:
assert str(e) == "'%s' exited 1" % cmd, str(e)
else:
self.fail("did not catch expected OSError")
@@ -1586,17 +1586,17 @@ def exists(env):
env['XXX'] = copy.copy(input)
try:
env.Append(XXX = append)
- except Exception, e:
- if failed == 0: print
- print " %s Append %s exception: %s" % \
- (repr(input), repr(append), e)
+ except Exception as e:
+ if failed == 0: print()
+ print(" %s Append %s exception: %s" % \
+ (repr(input), repr(append), e))
failed = failed + 1
else:
result = env['XXX']
if result != expect:
- if failed == 0: print
- print " %s Append %s => %s did not match %s" % \
- (repr(input), repr(append), repr(result), repr(expect))
+ if failed == 0: print()
+ print(" %s Append %s => %s did not match %s" % \
+ (repr(input), repr(append), repr(result), repr(expect)))
failed = failed + 1
del cases[:3]
assert failed == 0, "%d Append() cases failed" % failed
@@ -1935,7 +1935,7 @@ def generate(env):
assert x is None, x
sub2_xxx_exe = test.workpath('sub2', 'xxx.exe')
- os.chmod(sub2_xxx_exe, 0755)
+ os.chmod(sub2_xxx_exe, 0o755)
env = self.TestEnvironment(ENV = { 'PATH' : [sub1, sub2] })
@@ -1943,7 +1943,7 @@ def generate(env):
assert x == 'xxx.exe', x
sub1_xxx_exe = test.workpath('sub1', 'xxx.exe')
- os.chmod(sub1_xxx_exe, 0755)
+ os.chmod(sub1_xxx_exe, 0o755)
x = env.Detect('xxx.exe')
assert x == 'xxx.exe', x
@@ -2258,17 +2258,17 @@ f5: \
env['XXX'] = copy.copy(input)
try:
env.Prepend(XXX = prepend)
- except Exception, e:
- if failed == 0: print
- print " %s Prepend %s exception: %s" % \
- (repr(input), repr(prepend), e)
+ except Exception as e:
+ if failed == 0: print()
+ print(" %s Prepend %s exception: %s" % \
+ (repr(input), repr(prepend), e))
failed = failed + 1
else:
result = env['XXX']
if result != expect:
- if failed == 0: print
- print " %s Prepend %s => %s did not match %s" % \
- (repr(input), repr(prepend), repr(result), repr(expect))
+ if failed == 0: print()
+ print(" %s Prepend %s => %s did not match %s" % \
+ (repr(input), repr(prepend), repr(result), repr(expect)))
failed = failed + 1
del cases[:3]
assert failed == 0, "%d Prepend() cases failed" % failed
@@ -2506,10 +2506,10 @@ def generate(env):
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
- os.chmod(sub3_xxx_exe, 0777)
+ os.chmod(sub3_xxx_exe, 0o777)
test.write(sub4_xxx_exe, "\n")
- os.chmod(sub4_xxx_exe, 0777)
+ os.chmod(sub4_xxx_exe, 0o777)
env_path = os.environ['PATH']
diff --git a/src/engine/SCons/ErrorsTests.py b/src/engine/SCons/ErrorsTests.py
index 9c8b925f..97c9d55b 100644
--- a/src/engine/SCons/ErrorsTests.py
+++ b/src/engine/SCons/ErrorsTests.py
@@ -35,7 +35,7 @@ class ErrorsTestCase(unittest.TestCase):
raise SCons.Errors.BuildError(
errstr = "foo", status=57, filename="file", exc_info=(1,2,3),
node = "n", executor="e", action="a", command="c")
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
assert e.errstr == "foo"
assert e.status == 57
assert e.exitstatus == 2, e.exitstatus
@@ -50,7 +50,7 @@ class ErrorsTestCase(unittest.TestCase):
try:
raise SCons.Errors.BuildError("n", "foo", 57, 3, "file",
"e", "a", "c", (1,2,3))
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
assert e.errstr == "foo", e.errstr
assert e.status == 57, e.status
assert e.exitstatus == 3, e.exitstatus
@@ -64,7 +64,7 @@ class ErrorsTestCase(unittest.TestCase):
try:
raise SCons.Errors.BuildError()
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
assert e.errstr == "Unknown error"
assert e.status == 2
assert e.exitstatus == 2
@@ -80,21 +80,21 @@ class ErrorsTestCase(unittest.TestCase):
"""Test the InternalError exception."""
try:
raise SCons.Errors.InternalError("test internal error")
- except SCons.Errors.InternalError, e:
+ except SCons.Errors.InternalError as e:
assert e.args == ("test internal error",)
def test_UserError(self):
"""Test the UserError exception."""
try:
raise SCons.Errors.UserError("test user error")
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert e.args == ("test user error",)
def test_ExplicitExit(self):
"""Test the ExplicitExit exception."""
try:
raise SCons.Errors.ExplicitExit("node")
- except SCons.Errors.ExplicitExit, e:
+ except SCons.Errors.ExplicitExit as e:
assert e.node == "node"
if __name__ == "__main__":
diff --git a/src/engine/SCons/ExecutorTests.py b/src/engine/SCons/ExecutorTests.py
index 62689840..3bc5beeb 100644
--- a/src/engine/SCons/ExecutorTests.py
+++ b/src/engine/SCons/ExecutorTests.py
@@ -307,7 +307,7 @@ class ExecutorTestCase(unittest.TestCase):
try:
r = x.prepare()
- except SCons.Errors.StopError, e:
+ except SCons.Errors.StopError as e:
assert str(e) == "Source `s2' not found, needed by target `t1'.", e
else:
raise AssertionError("did not catch expected StopError: %s" % r)
diff --git a/src/engine/SCons/Job.py b/src/engine/SCons/Job.py
index 184f5ba3..226a34e7 100644
--- a/src/engine/SCons/Job.py
+++ b/src/engine/SCons/Job.py
@@ -278,14 +278,14 @@ else:
try:
prev_size = threading.stack_size(stack_size*1024)
- except AttributeError, e:
+ except AttributeError as e:
# Only print a warning if the stack size has been
# explicitly set.
if not explicit_stack_size is None:
msg = "Setting stack size is unsupported by this version of Python:\n " + \
e.args[0]
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
- except ValueError, e:
+ except ValueError as e:
msg = "Setting stack size failed:\n " + str(e)
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
diff --git a/src/engine/SCons/Memoize.py b/src/engine/SCons/Memoize.py
index e77aacf7..9fe68511 100644
--- a/src/engine/SCons/Memoize.py
+++ b/src/engine/SCons/Memoize.py
@@ -143,7 +143,7 @@ class Counter(object):
CounterList.append(self)
def display(self):
fmt = " %7d hits %7d misses %s()"
- print fmt % (self.hit, self.miss, self.name)
+ print(fmt % (self.hit, self.miss, self.name))
def __cmp__(self, other):
try:
return cmp(self.name, other.name)
@@ -215,7 +215,7 @@ class Memoizer(object):
def Dump(title=None):
if title:
- print title
+ print(title)
CounterList.sort()
for counter in CounterList:
counter.display()
diff --git a/src/engine/SCons/MemoizeTests.py b/src/engine/SCons/MemoizeTests.py
index 9876c27f..b6750e0d 100644
--- a/src/engine/SCons/MemoizeTests.py
+++ b/src/engine/SCons/MemoizeTests.py
@@ -30,9 +30,7 @@ import SCons.Memoize
-class FakeObject(object):
-
- __metaclass__ = SCons.Memoize.Memoized_Metaclass
+class FakeObject(object, metaclass=SCons.Memoize.Memoized_Metaclass):
memoizer_counters = []
diff --git a/src/engine/SCons/Node/FS.py b/src/engine/SCons/Node/FS.py
index f31ca831..22dca1f9 100644
--- a/src/engine/SCons/Node/FS.py
+++ b/src/engine/SCons/Node/FS.py
@@ -54,6 +54,7 @@ import SCons.Util
import SCons.Warnings
from SCons.Debug import Trace
+import collections
do_store_info = True
print_duplicate = 0
@@ -550,7 +551,7 @@ class EntryProxy(SCons.Util.Proxy):
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
- except AttributeError, e:
+ except AttributeError as e:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
@@ -1508,7 +1509,7 @@ class Dir(Base):
This clears any cached information that is invalidated by changing
the repository."""
- for node in self.entries.values():
+ for node in list(self.entries.values()):
if node != self.dir:
if node != self and isinstance(node, Dir):
node.__clearRepositoryCache(duplicate)
@@ -2055,7 +2056,7 @@ class Dir(Base):
# We use the .name attribute from the Node because the keys of
# the dir.entries dictionary are normalized (that is, all upper
# case) on case-insensitive systems like Windows.
- node_names = [ v.name for k, v in dir.entries.items()
+ node_names = [ v.name for k, v in list(dir.entries.items())
if k not in ('.', '..') ]
names.extend(node_names)
if not strings:
@@ -2420,7 +2421,7 @@ class File(Base):
fname = self.rfile().abspath
try:
contents = open(fname, "rb").read()
- except EnvironmentError, e:
+ except EnvironmentError as e:
if not e.filename:
e.filename = fname
raise
@@ -2455,7 +2456,7 @@ class File(Base):
try:
cs = SCons.Util.MD5filesignature(fname,
chunksize=SCons.Node.FS.File.md5_chunksize*1024)
- except EnvironmentError, e:
+ except EnvironmentError as e:
if not e.filename:
e.filename = fname
raise
@@ -2793,7 +2794,7 @@ class File(Base):
def _rmv_existing(self):
self.clear_memoized_values()
if print_duplicate:
- print "dup: removing existing target %s"%self
+ print("dup: removing existing target %s"%self)
e = Unlink(self, [], None)
if isinstance(e, SCons.Errors.BuildError):
raise e
@@ -2817,7 +2818,7 @@ class File(Base):
else:
try:
self._createDir()
- except SCons.Errors.StopError, drive:
+ except SCons.Errors.StopError as drive:
desc = "No drive `%s' for target `%s'." % (drive, self)
raise SCons.Errors.StopError(desc)
@@ -2835,7 +2836,7 @@ class File(Base):
def do_duplicate(self, src):
self._createDir()
if print_duplicate:
- print "dup: relinking variant '%s' from '%s'"%(self, src)
+ print("dup: relinking variant '%s' from '%s'"%(self, src))
Unlink(self, None, None)
e = Link(self, src, None)
if isinstance(e, SCons.Errors.BuildError):
@@ -2870,7 +2871,7 @@ class File(Base):
# The source file does not exist. Make sure no old
# copy remains in the variant directory.
if print_duplicate:
- print "dup: no src for %s, unlinking old variant copy"%self
+ print("dup: no src for %s, unlinking old variant copy"%self)
if Base.exists(self) or self.islink():
self.fs.unlink(self.path)
# Return None explicitly because the Base.exists() call
@@ -3196,10 +3197,10 @@ class FileFinder(object):
except KeyError:
pass
- if verbose and not callable(verbose):
+ if verbose and not isinstance(verbose, collections.Callable):
if not SCons.Util.is_String(verbose):
verbose = "find_file"
- _verbose = u' %s: ' % verbose
+ _verbose = ' %s: ' % verbose
verbose = lambda s: sys.stdout.write(_verbose + s)
filedir, filename = os.path.split(filename)
diff --git a/src/engine/SCons/Node/FSTests.py b/src/engine/SCons/Node/FSTests.py
index a60b8a4b..e8442e94 100644
--- a/src/engine/SCons/Node/FSTests.py
+++ b/src/engine/SCons/Node/FSTests.py
@@ -20,7 +20,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -567,13 +567,13 @@ class VariantDirTestCase(unittest.TestCase):
dp = dnode.srcnode().path
expect = os.path.normpath(srcnode_map.get(dir, dir))
if dp != expect:
- print "Dir `%s' srcnode() `%s' != expected `%s'" % (dir, dp, expect)
+ print("Dir `%s' srcnode() `%s' != expected `%s'" % (dir, dp, expect))
errors = errors + 1
fp = fnode.srcnode().path
expect = os.path.normpath(srcnode_map.get(f, f))
if fp != expect:
- print "File `%s' srcnode() `%s' != expected `%s'" % (f, fp, expect)
+ print("File `%s' srcnode() `%s' != expected `%s'" % (f, fp, expect))
errors = errors + 1
for dir in dir_list:
@@ -585,14 +585,14 @@ class VariantDirTestCase(unittest.TestCase):
tp = t[0].path
expect = os.path.normpath(alter_map.get(dir, dir))
if tp != expect:
- print "Dir `%s' alter_targets() `%s' != expected `%s'" % (dir, tp, expect)
+ print("Dir `%s' alter_targets() `%s' != expected `%s'" % (dir, tp, expect))
errors = errors + 1
t, m = fnode.alter_targets()
tp = t[0].path
expect = os.path.normpath(alter_map.get(f, f))
if tp != expect:
- print "File `%s' alter_targets() `%s' != expected `%s'" % (f, tp, expect)
+ print("File `%s' alter_targets() `%s' != expected `%s'" % (f, tp, expect))
errors = errors + 1
self.failIf(errors)
@@ -1088,7 +1088,7 @@ class FSTestCase(_tempdirTestCase):
try:
f2 = fs.File(sep.join(['f1', 'f2']), directory = d1)
- except TypeError, x:
+ except TypeError as x:
assert str(x) == ("Tried to lookup File '%s' as a Dir." %
d1_f1), x
except:
@@ -1096,7 +1096,7 @@ class FSTestCase(_tempdirTestCase):
try:
dir = fs.Dir(sep.join(['d1', 'f1']))
- except TypeError, x:
+ except TypeError as x:
assert str(x) == ("Tried to lookup File '%s' as a Dir." %
d1_f1), x
except:
@@ -1104,7 +1104,7 @@ class FSTestCase(_tempdirTestCase):
try:
f2 = fs.File('d1')
- except TypeError, x:
+ except TypeError as x:
assert str(x) == ("Tried to lookup Dir '%s' as a File." %
'd1'), x
except:
@@ -1305,7 +1305,7 @@ class FSTestCase(_tempdirTestCase):
assert f1.get_contents() == "Foo\x1aBar", f1.get_contents()
# This tests to make sure we can decode UTF-8 text files.
- test_string = u"Foo\x1aBar"
+ test_string = "Foo\x1aBar"
test.write("utf8_file", test_string.encode('utf-8'))
f1 = fs.File(test.workpath("utf8_file"))
assert eval('f1.get_text_contents() == u"Foo\x1aBar"'), \
@@ -1645,7 +1645,7 @@ class FSTestCase(_tempdirTestCase):
def unc_workpath(dirs, test=test):
import ntpath
- x = apply(test.workpath, dirs)
+ x = test.workpath(*dirs)
drive, path = ntpath.splitdrive(x)
unc, path = ntpath.splitunc(path)
path = strip_slash(path)
@@ -1911,9 +1911,9 @@ class FSTestCase(_tempdirTestCase):
del cases[:3]
result = dir.rel_path(other)
if result != expect:
- if failed == 0: print
+ if failed == 0: print()
fmt = " dir_path(%(dir)s, %(other)s) => '%(result)s' did not match '%(expect)s'"
- print fmt % locals()
+ print(fmt % locals())
failed = failed + 1
assert failed == 0, "%d rel_path() cases failed" % failed
@@ -2520,9 +2520,9 @@ class GlobTestCase(_tempdirTestCase):
fmt = lambda n: n
if r != result:
import pprint
- print "Glob(%s) expected:" % repr(input)
+ print("Glob(%s) expected:" % repr(input))
pprint.pprint(list(map(fmt, result)))
- print "Glob(%s) got:" % repr(input)
+ print("Glob(%s) got:" % repr(input))
pprint.pprint(list(map(fmt, r)))
self.fail()
@@ -3621,7 +3621,7 @@ class SpecialAttrTestCase(unittest.TestCase):
caught = None
try:
fs.Dir('ddd').get_subst_proxy().no_such_attr
- except AttributeError, e:
+ except AttributeError as e:
assert str(e) == "Dir instance 'ddd' has no attribute 'no_such_attr'", e
caught = 1
assert caught, "did not catch expected AttributeError"
@@ -3629,7 +3629,7 @@ class SpecialAttrTestCase(unittest.TestCase):
caught = None
try:
fs.Entry('eee').get_subst_proxy().no_such_attr
- except AttributeError, e:
+ except AttributeError as e:
# Gets disambiguated to File instance by get_subst_proxy().
assert str(e) == "File instance 'eee' has no attribute 'no_such_attr'", e
caught = 1
@@ -3638,7 +3638,7 @@ class SpecialAttrTestCase(unittest.TestCase):
caught = None
try:
fs.File('fff').get_subst_proxy().no_such_attr
- except AttributeError, e:
+ except AttributeError as e:
assert str(e) == "File instance 'fff' has no attribute 'no_such_attr'", e
caught = 1
assert caught, "did not catch expected AttributeError"
diff --git a/src/engine/SCons/Node/__init__.py b/src/engine/SCons/Node/__init__.py
index 992284dc..8f48d860 100644
--- a/src/engine/SCons/Node/__init__.py
+++ b/src/engine/SCons/Node/__init__.py
@@ -371,7 +371,7 @@ class Node(object):
"""
try:
self.get_executor()(self, **kw)
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
e.node = self
raise
@@ -827,7 +827,7 @@ class Node(object):
"""Adds dependencies."""
try:
self._add_child(self.depends, self.depends_set, depend)
- except TypeError, e:
+ except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
@@ -844,7 +844,7 @@ class Node(object):
"""Adds dependencies to ignore."""
try:
self._add_child(self.ignore, self.ignore_set, depend)
- except TypeError, e:
+ except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
@@ -858,7 +858,7 @@ class Node(object):
return
try:
self._add_child(self.sources, self.sources_set, source)
- except TypeError, e:
+ except TypeError as e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
@@ -1197,8 +1197,8 @@ class Node(object):
new_bkids = new.bsources + new.bdepends + new.bimplicit
new_bkidsigs = new.bsourcesigs + new.bdependsigs + new.bimplicitsigs
- osig = dict(zip(old_bkids, old_bkidsigs))
- nsig = dict(zip(new_bkids, new_bkidsigs))
+ osig = dict(list(zip(old_bkids, old_bkidsigs)))
+ nsig = dict(list(zip(new_bkids, new_bkidsigs)))
# The sources and dependencies we'll want to report are all stored
# as relative paths to this target's directory, but we want to
diff --git a/src/engine/SCons/Options/__init__.py b/src/engine/SCons/Options/__init__.py
index f6c84835..2544aecd 100644
--- a/src/engine/SCons/Options/__init__.py
+++ b/src/engine/SCons/Options/__init__.py
@@ -33,11 +33,11 @@ and will then be removed entirely (some day).
import SCons.Variables
import SCons.Warnings
-from BoolOption import BoolOption # okay
-from EnumOption import EnumOption # okay
-from ListOption import ListOption # naja
-from PackageOption import PackageOption # naja
-from PathOption import PathOption # okay
+from .BoolOption import BoolOption # okay
+from .EnumOption import EnumOption # okay
+from .ListOption import ListOption # naja
+from .PackageOption import PackageOption # naja
+from .PathOption import PathOption # okay
warned = False
diff --git a/src/engine/SCons/Platform/__init__.py b/src/engine/SCons/Platform/__init__.py
index 81a49e7e..6ef8b059 100644
--- a/src/engine/SCons/Platform/__init__.py
+++ b/src/engine/SCons/Platform/__init__.py
@@ -223,8 +223,8 @@ class TempFileMunge(object):
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
- print("Using tempfile "+native_tmp+" for command line:\n"+
- str(cmd[0]) + " " + " ".join(args))
+ print(("Using tempfile "+native_tmp+" for command line:\n"+
+ str(cmd[0]) + " " + " ".join(args)))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
def Platform(name = platform_default()):
diff --git a/src/engine/SCons/Platform/aix.py b/src/engine/SCons/Platform/aix.py
index 0229112d..f6853b55 100644
--- a/src/engine/SCons/Platform/aix.py
+++ b/src/engine/SCons/Platform/aix.py
@@ -34,7 +34,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
-import posix
+from . import posix
def get_xlc(env, xlc=None, xlc_r=None, packages=[]):
# Use the AIX package installer tool lslpp to figure out where a
diff --git a/src/engine/SCons/Platform/cygwin.py b/src/engine/SCons/Platform/cygwin.py
index a0126826..e7c8b8a3 100644
--- a/src/engine/SCons/Platform/cygwin.py
+++ b/src/engine/SCons/Platform/cygwin.py
@@ -32,7 +32,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import posix
+from . import posix
from SCons.Platform import TempFileMunge
def generate(env):
diff --git a/src/engine/SCons/Platform/darwin.py b/src/engine/SCons/Platform/darwin.py
index 005673b2..1cf4aeb5 100644
--- a/src/engine/SCons/Platform/darwin.py
+++ b/src/engine/SCons/Platform/darwin.py
@@ -32,7 +32,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import posix
+from . import posix
import os
def generate(env):
diff --git a/src/engine/SCons/Platform/hpux.py b/src/engine/SCons/Platform/hpux.py
index 43d284b5..0e0bbcf8 100644
--- a/src/engine/SCons/Platform/hpux.py
+++ b/src/engine/SCons/Platform/hpux.py
@@ -32,7 +32,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import posix
+from . import posix
def generate(env):
posix.generate(env)
diff --git a/src/engine/SCons/Platform/irix.py b/src/engine/SCons/Platform/irix.py
index 2baee0bb..2e5f2176 100644
--- a/src/engine/SCons/Platform/irix.py
+++ b/src/engine/SCons/Platform/irix.py
@@ -32,7 +32,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import posix
+from . import posix
def generate(env):
posix.generate(env)
diff --git a/src/engine/SCons/Platform/os2.py b/src/engine/SCons/Platform/os2.py
index 0fa45531..5ca26bcc 100644
--- a/src/engine/SCons/Platform/os2.py
+++ b/src/engine/SCons/Platform/os2.py
@@ -31,7 +31,7 @@ selection method.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import win32
+from . import win32
def generate(env):
if 'ENV' not in env:
diff --git a/src/engine/SCons/Platform/posix.py b/src/engine/SCons/Platform/posix.py
index ece48d7d..d1f6c78d 100644
--- a/src/engine/SCons/Platform/posix.py
+++ b/src/engine/SCons/Platform/posix.py
@@ -77,7 +77,7 @@ def exec_fork(l, env):
exitval = 127
try:
os.execvpe(l[0], l, env)
- except OSError, e:
+ except OSError as e:
exitval = exitvalmap.get(e[0], e[0])
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
@@ -92,7 +92,7 @@ def _get_env_command(sh, escape, cmd, args, env):
s = ' '.join(args)
if env:
l = ['env', '-'] + \
- [escape(t[0])+'='+escape(t[1]) for t in env.items()] + \
+ [escape(t[0])+'='+escape(t[1]) for t in list(env.items())] + \
[sh, '-c', escape(s)]
s = ' '.join(l)
return s
@@ -125,7 +125,8 @@ def process_cmd_output(cmd_stdout, cmd_stderr, stdout, stderr):
else:
#sys.__stderr__.write( "str(stderr) = %s\n" % str )
stderr.write(str)
- except select.error, (_errno, _strerror):
+ except select.error as xxx_todo_changeme:
+ (_errno, _strerror) = xxx_todo_changeme.args
if _errno != errno.EINTR:
raise
@@ -164,7 +165,7 @@ def exec_piped_fork(l, env, stdout, stderr):
exitval = 127
try:
os.execvpe(l[0], l, env)
- except OSError, e:
+ except OSError as e:
exitval = exitvalmap.get(e[0], e[0])
stderr.write("scons: %s: %s\n" % (l[0], e[1]))
os._exit(exitval)
diff --git a/src/engine/SCons/Platform/sunos.py b/src/engine/SCons/Platform/sunos.py
index d23d65c5..057fddfa 100644
--- a/src/engine/SCons/Platform/sunos.py
+++ b/src/engine/SCons/Platform/sunos.py
@@ -32,7 +32,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import posix
+from . import posix
def generate(env):
posix.generate(env)
diff --git a/src/engine/SCons/Platform/win32.py b/src/engine/SCons/Platform/win32.py
index 5f20685c..879817de 100644
--- a/src/engine/SCons/Platform/win32.py
+++ b/src/engine/SCons/Platform/win32.py
@@ -155,7 +155,7 @@ def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
try:
args = [sh, '/C', escape(' '.join(args)) ]
ret = spawnve(os.P_WAIT, sh, args, env)
- except OSError, e:
+ except OSError as e:
# catch any error
try:
ret = exitvalmap[e[0]]
@@ -183,7 +183,7 @@ def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
def exec_spawn(l, env):
try:
result = spawnve(os.P_WAIT, l[0], l, env)
- except OSError, e:
+ except OSError as e:
try:
result = exitvalmap[e[0]]
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
diff --git a/src/engine/SCons/SConf.py b/src/engine/SCons/SConf.py
index f3a35454..987f88d2 100644
--- a/src/engine/SCons/SConf.py
+++ b/src/engine/SCons/SConf.py
@@ -118,7 +118,7 @@ def CreateConfigHBuilder(env):
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
- for k in _ac_config_hs.keys():
+ for k in list(_ac_config_hs.keys()):
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
@@ -239,7 +239,7 @@ class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
- print type, value
+ print(type, value)
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
@@ -318,7 +318,7 @@ class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
- if env.decide_source.func_code is not force_build.func_code:
+ if env.decide_source.__code__ is not force_build.__code__:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
@@ -332,7 +332,7 @@ class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
- except Exception, e:
+ except Exception as e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
@@ -652,7 +652,7 @@ class SConfBase(object):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
- for name in tests.keys():
+ for name in list(tests.keys()):
self.AddTest(name, tests[name])
def _createDir( self, node ):
diff --git a/src/engine/SCons/SConfTests.py b/src/engine/SCons/SConfTests.py
index e6048860..ba524fd5 100644
--- a/src/engine/SCons/SConfTests.py
+++ b/src/engine/SCons/SConfTests.py
@@ -60,7 +60,7 @@ class SConfTestCase(unittest.TestCase):
# We try to reset scons' state (including all global variables)
import SCons.SConsign
SCons.SConsign.write() # simulate normal scons-finish
- for n in sys.modules.keys():
+ for n in list(sys.modules.keys()):
if n.split('.')[0] == 'SCons' and n[:12] != 'SCons.compat':
m = sys.modules[n]
if isinstance(m, ModuleType):
diff --git a/src/engine/SCons/SConsign.py b/src/engine/SCons/SConsign.py
index 6555fcb8..5ce61be2 100644
--- a/src/engine/SCons/SConsign.py
+++ b/src/engine/SCons/SConsign.py
@@ -84,7 +84,7 @@ def Get_DataBase(dir):
DB_sync_list.append(db)
return db, "c"
except TypeError:
- print "DataBase =", DataBase
+ print("DataBase =", DataBase)
raise
def Reset():
@@ -172,7 +172,7 @@ class Base(object):
pass
def merge(self):
- for key, node in self.to_be_merged.items():
+ for key, node in list(self.to_be_merged.items()):
entry = node.get_stored_info()
try:
ninfo = entry.ninfo
@@ -215,10 +215,10 @@ class DB(Base):
raise TypeError
except KeyboardInterrupt:
raise
- except Exception, e:
+ except Exception as e:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt sconsign entry : %s (%s)\n"%(self.dir.tpath, e))
- for key, entry in self.entries.items():
+ for key, entry in list(self.entries.items()):
entry.convert_from_sconsign(dir, key)
if mode == "r":
@@ -245,7 +245,7 @@ class DB(Base):
# the Repository; we only write to our own .sconsign file,
# not to .sconsign files in Repositories.
path = normcase(self.dir.path)
- for key, entry in self.entries.items():
+ for key, entry in list(self.entries.items()):
entry.convert_to_sconsign()
db[path] = pickle.dumps(self.entries, 1)
@@ -274,7 +274,7 @@ class Dir(Base):
raise TypeError
if dir:
- for key, entry in self.entries.items():
+ for key, entry in list(self.entries.items()):
entry.convert_from_sconsign(dir, key)
class DirFile(Dir):
@@ -333,14 +333,14 @@ class DirFile(Dir):
fname = self.sconsign
except IOError:
return
- for key, entry in self.entries.items():
+ for key, entry in list(self.entries.items()):
entry.convert_to_sconsign()
pickle.dump(self.entries, file, 1)
file.close()
if fname != self.sconsign:
try:
mode = os.stat(self.sconsign)[0]
- os.chmod(self.sconsign, 0666)
+ os.chmod(self.sconsign, 0o666)
os.unlink(self.sconsign)
except (IOError, OSError):
# Try to carry on in the face of either OSError
diff --git a/src/engine/SCons/Scanner/C.py b/src/engine/SCons/Scanner/C.py
index 3311a095..74b01a4c 100644
--- a/src/engine/SCons/Scanner/C.py
+++ b/src/engine/SCons/Scanner/C.py
@@ -59,7 +59,7 @@ class SConsCPPScanner(SCons.cpp.PreProcessor):
def read_file(self, file):
try:
fp = open(str(file.rfile()))
- except EnvironmentError, e:
+ except EnvironmentError as e:
self.missing.append((file, self.current_file))
return ''
else:
diff --git a/src/engine/SCons/Scanner/Fortran.py b/src/engine/SCons/Scanner/Fortran.py
index 1b551306..5339ab2b 100644
--- a/src/engine/SCons/Scanner/Fortran.py
+++ b/src/engine/SCons/Scanner/Fortran.py
@@ -35,6 +35,7 @@ import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
+import collections
class F90Scanner(SCons.Scanner.Classic):
"""
@@ -109,7 +110,7 @@ class F90Scanner(SCons.Scanner.Classic):
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
- if callable(path):
+ if isinstance(path, collections.Callable):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
diff --git a/src/engine/SCons/Scanner/LaTeX.py b/src/engine/SCons/Scanner/LaTeX.py
index 2cb1ed58..1e0fea15 100644
--- a/src/engine/SCons/Scanner/LaTeX.py
+++ b/src/engine/SCons/Scanner/LaTeX.py
@@ -200,14 +200,14 @@ class LaTeX(SCons.Scanner.Base):
"""
def __init__(self, dictionary):
self.dictionary = {}
- for k,n in dictionary.items():
+ for k,n in list(dictionary.items()):
self.dictionary[k] = ( SCons.Scanner.FindPathDirs(n),
FindENVPathDirs(n) )
def __call__(self, env, dir=None, target=None, source=None,
argument=None):
di = {}
- for k,(c,cENV) in self.dictionary.items():
+ for k,(c,cENV) in list(self.dictionary.items()):
di[k] = ( c(env, dir=None, target=None, source=None,
argument=None) ,
cENV(env, dir=None, target=None, source=None,
diff --git a/src/engine/SCons/Scanner/Prog.py b/src/engine/SCons/Scanner/Prog.py
index 49e93a56..6e2da21e 100644
--- a/src/engine/SCons/Scanner/Prog.py
+++ b/src/engine/SCons/Scanner/Prog.py
@@ -27,6 +27,7 @@ import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
+import collections
# global, set by --debug=findlibs
print_find_libs = None
@@ -76,7 +77,7 @@ def scan(node, env, libpath = ()):
result = []
- if callable(libpath):
+ if isinstance(libpath, collections.Callable):
libpath = libpath()
find_file = SCons.Node.FS.find_file
diff --git a/src/engine/SCons/Scanner/ProgTests.py b/src/engine/SCons/Scanner/ProgTests.py
index 411e0358..f564c913 100644
--- a/src/engine/SCons/Scanner/ProgTests.py
+++ b/src/engine/SCons/Scanner/ProgTests.py
@@ -230,7 +230,7 @@ def suite():
suite.addTest(ProgramScannerTestCase6())
suite.addTest(ProgramScannerTestCase7())
suite.addTest(ProgramScannerTestCase8())
- try: unicode
+ try: str
except NameError: pass
else:
code = """if 1:
@@ -245,7 +245,7 @@ def suite():
assert deps_match(deps, ['d1/l2.lib', 'd1/d2/l3.lib']), map(str, deps)
suite.addTest(ProgramScannerTestCase4())
\n"""
- exec code
+ exec(code)
return suite
if __name__ == "__main__":
diff --git a/src/engine/SCons/Scanner/ScannerTests.py b/src/engine/SCons/Scanner/ScannerTests.py
index ee269222..5a4639d0 100644
--- a/src/engine/SCons/Scanner/ScannerTests.py
+++ b/src/engine/SCons/Scanner/ScannerTests.py
@@ -569,7 +569,7 @@ class ClassicCPPTestCase(unittest.TestCase):
assert n == 'path/bbb', n
assert i == 'bbb', i
- n, i = s.find_include(('<', u'ccc'), 'foo', ('path',))
+ n, i = s.find_include(('<', 'ccc'), 'foo', ('path',))
assert n == 'path/ccc', n
assert i == 'ccc', i
diff --git a/src/engine/SCons/Scanner/__init__.py b/src/engine/SCons/Scanner/__init__.py
index 562a361e..6ec8df96 100644
--- a/src/engine/SCons/Scanner/__init__.py
+++ b/src/engine/SCons/Scanner/__init__.py
@@ -33,6 +33,7 @@ import re
import SCons.Node.FS
import SCons.Util
+import collections
class _Null(object):
@@ -178,7 +179,7 @@ class Base(object):
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
- if callable(recursive):
+ if isinstance(recursive, collections.Callable):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
@@ -369,7 +370,7 @@ class Classic(Current):
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
- if callable(path):
+ if isinstance(path, collections.Callable):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
diff --git a/src/engine/SCons/Script/Interactive.py b/src/engine/SCons/Script/Interactive.py
index ffb50962..87fe1cf1 100644
--- a/src/engine/SCons/Script/Interactive.py
+++ b/src/engine/SCons/Script/Interactive.py
@@ -120,7 +120,7 @@ class SConsInteractiveCmd(cmd.Cmd):
def __init__(self, **kw):
cmd.Cmd.__init__(self)
- for key, val in kw.items():
+ for key, val in list(kw.items()):
setattr(self, key, val)
if sys.platform == 'win32':
@@ -129,12 +129,12 @@ class SConsInteractiveCmd(cmd.Cmd):
self.shell_variable = 'SHELL'
def default(self, argv):
- print "*** Unknown command: %s" % argv[0]
+ print("*** Unknown command: %s" % argv[0])
def onecmd(self, line):
line = line.strip()
if not line:
- print self.lastcmd
+ print(self.lastcmd)
return self.emptyline()
self.lastcmd = line
if line[0] == '!':
@@ -249,7 +249,7 @@ class SConsInteractiveCmd(cmd.Cmd):
while n:
n = walker.get_next()
- for node in seen_nodes.keys():
+ for node in list(seen_nodes.keys()):
# Call node.clear() to clear most of the state
node.clear()
# node.clear() doesn't reset node.state, so call
@@ -274,7 +274,7 @@ class SConsInteractiveCmd(cmd.Cmd):
return self.do_build(['build', '--clean'] + argv[1:])
def do_EOF(self, argv):
- print
+ print()
self.do_exit(argv)
def _do_one_help(self, arg):
@@ -357,7 +357,7 @@ class SConsInteractiveCmd(cmd.Cmd):
# Doing the right thing with an argument list currently
# requires different shell= values on Windows and Linux.
p = subprocess.Popen(argv, shell=(sys.platform=='win32'))
- except EnvironmentError, e:
+ except EnvironmentError as e:
sys.stderr.write('scons: %s: %s\n' % (argv[0], e.strerror))
else:
p.wait()
diff --git a/src/engine/SCons/Script/Main.py b/src/engine/SCons/Script/Main.py
index 837c1039..9a52937e 100644
--- a/src/engine/SCons/Script/Main.py
+++ b/src/engine/SCons/Script/Main.py
@@ -68,6 +68,7 @@ import SCons.Util
import SCons.Warnings
import SCons.Script.Interactive
+import collections
def fetch_win32_parallel_msg():
# A subsidiary function that exists solely to isolate this import
@@ -104,7 +105,7 @@ class Progressor(object):
self.interval = interval
self.overwrite = overwrite
- if callable(obj):
+ if isinstance(obj, collections.Callable):
self.func = obj
elif SCons.Util.is_List(obj):
self.func = self.spinner
@@ -224,7 +225,7 @@ class BuildTask(SCons.Taskmaster.OutOfDateTask):
self.exception_set()
self.do_failed()
else:
- print "scons: Nothing to be done for `%s'." % t
+ print("scons: Nothing to be done for `%s'." % t)
SCons.Taskmaster.OutOfDateTask.executed(self)
else:
SCons.Taskmaster.OutOfDateTask.executed(self)
@@ -290,8 +291,8 @@ class BuildTask(SCons.Taskmaster.OutOfDateTask):
if self.options.debug_includes:
tree = t.render_include_tree()
if tree:
- print
- print tree
+ print()
+ print(tree)
SCons.Taskmaster.OutOfDateTask.postprocess(self)
def make_ready(self):
@@ -326,10 +327,10 @@ class CleanTask(SCons.Taskmaster.AlwaysTask):
else:
errstr = "Path '%s' exists but isn't a file or directory."
raise SCons.Errors.UserError(errstr % (pathstr))
- except SCons.Errors.UserError, e:
- print e
- except (IOError, OSError), e:
- print "scons: Could not remove '%s':" % pathstr, e.strerror
+ except SCons.Errors.UserError as e:
+ print(e)
+ except (IOError, OSError) as e:
+ print("scons: Could not remove '%s':" % pathstr, e.strerror)
def show(self):
target = self.targets[0]
@@ -348,13 +349,13 @@ class CleanTask(SCons.Taskmaster.AlwaysTask):
for t in self.targets:
try:
removed = t.remove()
- except OSError, e:
+ except OSError as e:
# An OSError may indicate something like a permissions
# issue, an IOError would indicate something like
# the file not existing. In either case, print a
# message and keep going to try to remove as many
# targets aa possible.
- print "scons: Could not remove '%s':" % str(t), e.strerror
+ print("scons: Could not remove '%s':" % str(t), e.strerror)
else:
if removed:
display("Removed " + str(t))
@@ -595,7 +596,7 @@ def _scons_internal_error():
"""Handle all errors but user errors. Print out a message telling
the user what to do in this case and print a normal trace.
"""
- print 'internal error'
+ print('internal error')
traceback.print_exc()
sys.exit(2)
@@ -707,7 +708,7 @@ def _load_site_scons_dir(topdir, site_dir_name=None):
# the error checking makes it longer.
try:
m = sys.modules['SCons.Script']
- except Exception, e:
+ except Exception as e:
fmt = 'cannot import site_init.py: missing SCons.Script module %s'
raise SCons.Errors.InternalError(fmt % repr(e))
try:
@@ -715,15 +716,15 @@ def _load_site_scons_dir(topdir, site_dir_name=None):
modname = os.path.basename(pathname)[:-len(sfx)]
site_m = {"__file__": pathname, "__name__": modname, "__doc__": None}
re_special = re.compile("__[^_]+__")
- for k in m.__dict__.keys():
+ for k in list(m.__dict__.keys()):
if not re_special.match(k):
site_m[k] = m.__dict__[k]
# This is the magic.
- exec fp in site_m
+ exec(fp, site_m)
except KeyboardInterrupt:
raise
- except Exception, e:
+ except Exception as e:
fmt = '*** Error loading site_init file %s:\n'
sys.stderr.write(fmt % repr(site_init_file))
raise
@@ -733,7 +734,7 @@ def _load_site_scons_dir(topdir, site_dir_name=None):
m.__dict__[k] = site_m[k]
except KeyboardInterrupt:
raise
- except ImportError, e:
+ except ImportError as e:
fmt = '*** cannot import site init file %s:\n'
sys.stderr.write(fmt % repr(site_init_file))
raise
@@ -785,7 +786,7 @@ def _load_all_site_scons_dirs(topdir, verbose=None):
dirs=sysdirs + [topdir]
for d in dirs:
if verbose: # this is used by unit tests.
- print "Loading site dir ", d
+ print("Loading site dir ", d)
_load_site_scons_dir(d)
def test_load_all_site_scons_dirs(d):
@@ -977,7 +978,7 @@ def _main(parser):
try:
for script in scripts:
SCons.Script._SConscript._SConscript(fs, script)
- except SCons.Errors.StopError, e:
+ except SCons.Errors.StopError as e:
# We had problems reading an SConscript file, such as it
# couldn't be copied in to the VariantDir. Since we're just
# reading SConscript files and haven't started building
@@ -1034,8 +1035,8 @@ def _main(parser):
# SConscript files. Give them the options usage.
raise SConsPrintHelpException
else:
- print help_text
- print "Use scons -H for help about command-line options."
+ print(help_text)
+ print("Use scons -H for help about command-line options.")
exit_status = 0
return
@@ -1298,7 +1299,7 @@ def _exec_main(parser, values):
prof = Profile()
try:
prof.runcall(_main, parser)
- except SConsPrintHelpException, e:
+ except SConsPrintHelpException as e:
prof.dump_stats(options.profile_file)
raise e
except SystemExit:
@@ -1334,7 +1335,7 @@ def main():
parts.append("__COPYRIGHT__")
version = ''.join(parts)
- import SConsOptions
+ from . import SConsOptions
parser = SConsOptions.Parser(version)
values = SConsOptions.SConsValues(parser.get_default_values())
@@ -1342,22 +1343,22 @@ def main():
try:
_exec_main(parser, values)
- except SystemExit, s:
+ except SystemExit as s:
if s:
exit_status = s
except KeyboardInterrupt:
print("scons: Build interrupted.")
sys.exit(2)
- except SyntaxError, e:
+ except SyntaxError as e:
_scons_syntax_error(e)
except SCons.Errors.InternalError:
_scons_internal_error()
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
_scons_user_error(e)
except SConsPrintHelpException:
parser.print_help()
exit_status = 0
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
exit_status = e.exitstatus
except:
# An exception here is likely a builtin Python exception Python
@@ -1393,10 +1394,10 @@ def main():
else:
ct = last_command_end - first_command_start
scons_time = total_time - sconscript_time - ct
- print "Total build time: %f seconds"%total_time
- print "Total SConscript file execution time: %f seconds"%sconscript_time
- print "Total SCons execution time: %f seconds"%scons_time
- print "Total command execution time: %f seconds"%ct
+ print("Total build time: %f seconds"%total_time)
+ print("Total SConscript file execution time: %f seconds"%sconscript_time)
+ print("Total SCons execution time: %f seconds"%scons_time)
+ print("Total command execution time: %f seconds"%ct)
sys.exit(exit_status)
diff --git a/src/engine/SCons/Script/SConsOptions.py b/src/engine/SCons/Script/SConsOptions.py
index 645ab11b..559db971 100644
--- a/src/engine/SCons/Script/SConsOptions.py
+++ b/src/engine/SCons/Script/SConsOptions.py
@@ -161,7 +161,7 @@ class SConsValues(optparse.Values):
elif name == 'diskcheck':
try:
value = diskcheck_convert(value)
- except ValueError, v:
+ except ValueError as v:
raise SCons.Errors.UserError("Not a valid diskcheck value: %s"%v)
if 'diskcheck' not in self.__dict__:
# No --diskcheck= option was specified on the command line.
@@ -611,7 +611,7 @@ def Parser(version):
deprecated_debug_options=deprecated_debug_options):
if value in debug_options:
parser.values.debug.append(value)
- elif value in deprecated_debug_options.keys():
+ elif value in list(deprecated_debug_options.keys()):
parser.values.debug.append(value)
try:
parser.values.delayed_warnings
@@ -635,7 +635,7 @@ def Parser(version):
def opt_diskcheck(option, opt, value, parser):
try:
diskcheck_value = diskcheck_convert(value)
- except ValueError, e:
+ except ValueError as e:
raise OptionValueError("`%s' is not a valid diskcheck type" % e)
setattr(parser.values, option.dest, diskcheck_value)
@@ -802,7 +802,7 @@ def Parser(version):
tree_options = ["all", "derived", "prune", "status"]
def opt_tree(option, opt, value, parser, tree_options=tree_options):
- import Main
+ from . import Main
tp = Main.TreePrinter()
for o in value.split(','):
if o == 'all':
diff --git a/src/engine/SCons/Script/SConscript.py b/src/engine/SCons/Script/SConscript.py
index bd515d25..fefffef2 100644
--- a/src/engine/SCons/Script/SConscript.py
+++ b/src/engine/SCons/Script/SConscript.py
@@ -26,7 +26,7 @@ files.
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -113,7 +113,7 @@ def compute_exports(exports):
retval[export] = loc[export]
except KeyError:
retval[export] = glob[export]
- except KeyError, x:
+ except KeyError as x:
raise SCons.Errors.UserError("Export of non-existent variable '%s'"%x)
return retval
@@ -145,7 +145,7 @@ def Return(*vars, **kw):
for var in fvars:
for v in var.split():
retval.append(call_stack[-1].globals[v])
- except KeyError, x:
+ except KeyError as x:
raise SCons.Errors.UserError("Return of non-existent variable '%s'"%x)
if len(retval) == 1:
@@ -174,7 +174,7 @@ def _SConscript(fs, *files, **kw):
try:
SCons.Script.sconscript_reading = SCons.Script.sconscript_reading + 1
if fn == "-":
- exec sys.stdin in call_stack[-1].globals
+ exec(sys.stdin, call_stack[-1].globals)
else:
if isinstance(fn, SCons.Node.Node):
f = fn
@@ -257,7 +257,7 @@ def _SConscript(fs, *files, **kw):
pass
try:
try:
- exec _file_ in call_stack[-1].globals
+ exec(_file_, call_stack[-1].globals)
except SConscriptReturn:
pass
finally:
@@ -282,7 +282,7 @@ def _SConscript(fs, *files, **kw):
rdir._create() # Make sure there's a directory there.
try:
os.chdir(rdir.get_abspath())
- except OSError, e:
+ except OSError as e:
# We still couldn't chdir there, so raise the error,
# but only if actions are being executed.
#
@@ -467,15 +467,15 @@ class SConsEnvironment(SCons.Environment.Base):
scons_ver_string = '%d.%d.%d' % (major, minor, revision)
else:
scons_ver_string = '%d.%d' % (major, minor)
- print "SCons %s or greater required, but you have SCons %s" % \
- (scons_ver_string, SCons.__version__)
+ print("SCons %s or greater required, but you have SCons %s" % \
+ (scons_ver_string, SCons.__version__))
sys.exit(2)
def EnsurePythonVersion(self, major, minor):
"""Exit abnormally if the Python version is not late enough."""
if sys.version_info < (major, minor):
v = sys.version.split()[0]
- print "Python %d.%d or greater required, but you have Python %s" %(major,minor,v)
+ print("Python %d.%d or greater required, but you have Python %s" %(major,minor,v))
sys.exit(2)
def Exit(self, value=0):
@@ -514,7 +514,7 @@ class SConsEnvironment(SCons.Environment.Base):
globals[v] = exports[v]
else:
globals[v] = global_exports[v]
- except KeyError,x:
+ except KeyError as x:
raise SCons.Errors.UserError("Import of non-existent variable '%s'"%x)
def SConscript(self, *ls, **kw):
@@ -529,7 +529,7 @@ class SConsEnvironment(SCons.Environment.Base):
return x
ls = list(map(subst_element, ls))
subst_kw = {}
- for key, val in kw.items():
+ for key, val in list(kw.items()):
if SCons.Util.is_String(val):
val = self.subst(val)
elif SCons.Util.is_List(val):
diff --git a/src/engine/SCons/Script/__init__.py b/src/engine/SCons/Script/__init__.py
index bb7b632a..9f2837c5 100644
--- a/src/engine/SCons/Script/__init__.py
+++ b/src/engine/SCons/Script/__init__.py
@@ -66,7 +66,7 @@ if "--debug=memoizer" in _args:
except SCons.Warnings.Warning:
# Some warning was thrown. Arrange for it to be displayed
# or not after warnings are configured.
- import Main
+ from . import Main
exc_type, exc_value, tb = sys.exc_info()
Main.delayed_warnings.append((exc_type, exc_value))
del _args
@@ -85,7 +85,7 @@ import SCons.Util
import SCons.Variables
import SCons.Defaults
-import Main
+from . import Main
main = Main.main
@@ -128,7 +128,7 @@ GetBuildFailures = Main.GetBuildFailures
#repositories = Main.repositories
#
-import SConscript
+from . import SConscript
_SConscript = SConscript
call_stack = _SConscript.call_stack
@@ -364,7 +364,7 @@ GlobalDefaultBuilders = [
]
for name in GlobalDefaultEnvironmentFunctions + GlobalDefaultBuilders:
- exec "%s = _SConscript.DefaultEnvironmentCall(%s)" % (name, repr(name))
+ exec("%s = _SConscript.DefaultEnvironmentCall(%s)" % (name, repr(name)))
del name
# There are a handful of variables that used to live in the
diff --git a/src/engine/SCons/Subst.py b/src/engine/SCons/Subst.py
index 98097dc8..cca9bbcc 100644
--- a/src/engine/SCons/Subst.py
+++ b/src/engine/SCons/Subst.py
@@ -438,7 +438,7 @@ def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
- except Exception, e:
+ except Exception as e:
if e.__class__ in AllowableExceptions:
return ''
raise_exception(e, lvars['TARGETS'], s)
@@ -472,7 +472,7 @@ def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={
def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):
return conv(substitute(l, lvars))
return list(map(func, s))
- elif callable(s):
+ elif isinstance(s, collections.Callable):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
@@ -653,7 +653,7 @@ def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gv
s = eval(key, self.gvars, lvars)
except KeyboardInterrupt:
raise
- except Exception, e:
+ except Exception as e:
if e.__class__ in AllowableExceptions:
return
raise_exception(e, lvars['TARGETS'], s)
@@ -681,7 +681,7 @@ def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gv
for a in s:
self.substitute(a, lvars, 1)
self.next_word()
- elif callable(s):
+ elif isinstance(s, collections.Callable):
try:
s = s(target=lvars['TARGETS'],
source=lvars['SOURCES'],
diff --git a/src/engine/SCons/SubstTests.py b/src/engine/SCons/SubstTests.py
index 420fd739..da210207 100644
--- a/src/engine/SCons/SubstTests.py
+++ b/src/engine/SCons/SubstTests.py
@@ -241,14 +241,14 @@ class SubstTestCase(unittest.TestCase):
expect = convert(expect)
try:
result = function(input, env, **kwargs)
- except Exception, e:
+ except Exception as e:
fmt = " input %s generated %s (%s)"
- print fmt % (repr(input), e.__class__.__name__, repr(e))
+ print(fmt % (repr(input), e.__class__.__name__, repr(e)))
failed = failed + 1
else:
if result != expect:
- if failed == 0: print
- print " input %s => %s did not match %s" % (repr(input), repr(result), repr(expect))
+ if failed == 0: print()
+ print(" input %s => %s did not match %s" % (repr(input), repr(result), repr(expect)))
failed = failed + 1
del cases[:2]
fmt = "%d %s() cases failed"
@@ -460,18 +460,18 @@ class scons_subst_TestCase(SubstTestCase):
input, eraw, ecmd, esig = subst_cases[:4]
result = scons_subst(input, env, mode=SUBST_RAW, gvars=gvars)
if result != eraw:
- if failed == 0: print
- print " input %s => RAW %s did not match %s" % (repr(input), repr(result), repr(eraw))
+ if failed == 0: print()
+ print(" input %s => RAW %s did not match %s" % (repr(input), repr(result), repr(eraw)))
failed = failed + 1
result = scons_subst(input, env, mode=SUBST_CMD, gvars=gvars)
if result != ecmd:
- if failed == 0: print
- print " input %s => CMD %s did not match %s" % (repr(input), repr(result), repr(ecmd))
+ if failed == 0: print()
+ print(" input %s => CMD %s did not match %s" % (repr(input), repr(result), repr(ecmd)))
failed = failed + 1
result = scons_subst(input, env, mode=SUBST_SIG, gvars=gvars)
if result != esig:
- if failed == 0: print
- print " input %s => SIG %s did not match %s" % (repr(input), repr(result), repr(esig))
+ if failed == 0: print()
+ print(" input %s => SIG %s did not match %s" % (repr(input), repr(result), repr(esig)))
failed = failed + 1
del subst_cases[:4]
assert failed == 0, "%d subst() mode cases failed" % failed
@@ -514,7 +514,7 @@ class scons_subst_TestCase(SubstTestCase):
class Foo(object):
pass
scons_subst('${foo.bar}', env, gvars={'foo':Foo()})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = [
"AttributeError `bar' trying to evaluate `${foo.bar}'",
"AttributeError `Foo instance has no attribute 'bar'' trying to evaluate `${foo.bar}'",
@@ -530,7 +530,7 @@ class scons_subst_TestCase(SubstTestCase):
env = DummyEnv(self.loc)
try:
scons_subst('$foo.bar.3.0', env)
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = [
# Python 2.3, 2.4
"SyntaxError `invalid syntax (line 1)' trying to evaluate `$foo.bar.3.0'",
@@ -546,7 +546,7 @@ class scons_subst_TestCase(SubstTestCase):
env = DummyEnv(self.loc)
try:
scons_subst("${NONE[2]}", env, gvars={'NONE':None})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = [
# Python 2.3, 2.4
"TypeError `unsubscriptable object' trying to evaluate `${NONE[2]}'",
@@ -565,7 +565,7 @@ class scons_subst_TestCase(SubstTestCase):
def func(a, b, c):
pass
scons_subst("${func(1)}", env, gvars={'func':func})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = [
# Python 2.3, 2.4, 2.5
"TypeError `func() takes exactly 3 arguments (1 given)' trying to evaluate `${func(1)}'"
@@ -946,18 +946,18 @@ class scons_subst_list_TestCase(SubstTestCase):
input, eraw, ecmd, esig = subst_list_cases[:4]
result = scons_subst_list(input, env, mode=SUBST_RAW, gvars=gvars)
if result != eraw:
- if failed == 0: print
- print " input %s => RAW %s did not match %s" % (repr(input), repr(result), repr(eraw))
+ if failed == 0: print()
+ print(" input %s => RAW %s did not match %s" % (repr(input), repr(result), repr(eraw)))
failed = failed + 1
result = scons_subst_list(input, env, mode=SUBST_CMD, gvars=gvars)
if result != ecmd:
- if failed == 0: print
- print " input %s => CMD %s did not match %s" % (repr(input), repr(result), repr(ecmd))
+ if failed == 0: print()
+ print(" input %s => CMD %s did not match %s" % (repr(input), repr(result), repr(ecmd)))
failed = failed + 1
result = scons_subst_list(input, env, mode=SUBST_SIG, gvars=gvars)
if result != esig:
- if failed == 0: print
- print " input %s => SIG %s did not match %s" % (repr(input), repr(result), repr(esig))
+ if failed == 0: print()
+ print(" input %s => SIG %s did not match %s" % (repr(input), repr(result), repr(esig)))
failed = failed + 1
del subst_list_cases[:4]
assert failed == 0, "%d subst() mode cases failed" % failed
@@ -969,7 +969,7 @@ class scons_subst_list_TestCase(SubstTestCase):
class Foo(object):
pass
scons_subst_list('${foo.bar}', env, gvars={'foo':Foo()})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = [
"AttributeError `bar' trying to evaluate `${foo.bar}'",
"AttributeError `Foo instance has no attribute 'bar'' trying to evaluate `${foo.bar}'",
@@ -985,7 +985,7 @@ class scons_subst_list_TestCase(SubstTestCase):
env = DummyEnv()
try:
scons_subst_list('$foo.bar.3.0', env)
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = [
"SyntaxError `invalid syntax' trying to evaluate `$foo.bar.3.0'",
"SyntaxError `invalid syntax (line 1)' trying to evaluate `$foo.bar.3.0'",
@@ -1093,8 +1093,8 @@ class scons_subst_once_TestCase(unittest.TestCase):
input, key, expect = cases[:3]
result = scons_subst_once(input, env, key)
if result != expect:
- if failed == 0: print
- print " input %s (%s) => %s did not match %s" % (repr(input), repr(key), repr(result), repr(expect))
+ if failed == 0: print()
+ print(" input %s (%s) => %s did not match %s" % (repr(input), repr(key), repr(result), repr(expect)))
failed = failed + 1
del cases[:3]
assert failed == 0, "%d subst() cases failed" % failed
diff --git a/src/engine/SCons/Taskmaster.py b/src/engine/SCons/Taskmaster.py
index 64ab84da..54d04a84 100644
--- a/src/engine/SCons/Taskmaster.py
+++ b/src/engine/SCons/Taskmaster.py
@@ -107,7 +107,7 @@ fmt = "%(considered)3d "\
def dump_stats():
for n in sorted(StatsNodes, key=lambda a: str(a)):
- print (fmt % n.stats.__dict__) + str(n)
+ print((fmt % n.stats.__dict__) + str(n))
@@ -164,7 +164,7 @@ class Task(object):
"""
global print_prepare
T = self.tm.trace
- if T: T.write(self.trace_message(u'Task.prepare()', self.node))
+ if T: T.write(self.trace_message('Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
@@ -189,13 +189,13 @@ class Task(object):
executor.prepare()
for t in executor.get_action_targets():
if print_prepare:
- print "Preparing target %s..."%t
+ print("Preparing target %s..."%t)
for s in t.side_effects:
- print "...with side-effect %s..."%s
+ print("...with side-effect %s..."%s)
t.prepare()
for s in t.side_effects:
if print_prepare:
- print "...Preparing side-effect %s..."%s
+ print("...Preparing side-effect %s..."%s)
s.prepare()
def get_target(self):
@@ -224,7 +224,7 @@ class Task(object):
prepare(), executed() or failed().
"""
T = self.tm.trace
- if T: T.write(self.trace_message(u'Task.execute()', self.node))
+ if T: T.write(self.trace_message('Task.execute()', self.node))
try:
cached_targets = []
@@ -254,7 +254,7 @@ class Task(object):
raise
except SCons.Errors.BuildError:
raise
- except Exception, e:
+ except Exception as e:
buildError = SCons.Errors.convert_to_BuildError(e)
buildError.node = self.targets[0]
buildError.exc_info = sys.exc_info()
@@ -383,7 +383,7 @@ class Task(object):
This is the default behavior for building only what's necessary.
"""
T = self.tm.trace
- if T: T.write(self.trace_message(u'Task.make_ready_current()',
+ if T: T.write(self.trace_message('Task.make_ready_current()',
self.node))
self.out_of_date = []
@@ -393,7 +393,7 @@ class Task(object):
t.disambiguate().make_ready()
is_up_to_date = not t.has_builder() or \
(not t.always_build and t.is_up_to_date())
- except EnvironmentError, e:
+ except EnvironmentError as e:
raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
if not is_up_to_date:
@@ -428,7 +428,7 @@ class Task(object):
that can be put back on the candidates list.
"""
T = self.tm.trace
- if T: T.write(self.trace_message(u'Task.postprocess()', self.node))
+ if T: T.write(self.trace_message('Task.postprocess()', self.node))
# We may have built multiple targets, some of which may have
# common parents waiting for this build. Count up how many
@@ -445,7 +445,7 @@ class Task(object):
# A node can only be in the pending_children set if it has
# some waiting_parents.
if t.waiting_parents:
- if T: T.write(self.trace_message(u'Task.postprocess()',
+ if T: T.write(self.trace_message('Task.postprocess()',
t,
'removing'))
pending_children.discard(t)
@@ -462,9 +462,9 @@ class Task(object):
if p.ref_count == 0:
self.tm.candidates.append(p)
- for p, subtract in parents.items():
+ for p, subtract in list(parents.items()):
p.ref_count = p.ref_count - subtract
- if T: T.write(self.trace_message(u'Task.postprocess()',
+ if T: T.write(self.trace_message('Task.postprocess()',
p,
'adjusted parent ref count'))
if p.ref_count == 0:
@@ -524,7 +524,7 @@ class Task(object):
except ValueError:
exc_type, exc_value = exc
exc_traceback = None
- raise exc_type, exc_value, exc_traceback
+ raise exc_type(exc_value).with_traceback(exc_traceback)
class AlwaysTask(Task):
def needs_execute(self):
@@ -748,12 +748,12 @@ class Taskmaster(object):
self.ready_exc = None
T = self.trace
- if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))
+ if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
- if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
+ if T: T.write(self.trace_message('No candidate anymore.') + '\n')
return None
node = node.disambiguate()
@@ -776,7 +776,7 @@ class Taskmaster(object):
else:
S = None
- if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
+ if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
@@ -784,7 +784,7 @@ class Taskmaster(object):
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
- if T: T.write(self.trace_message(u' already handled (executed)'))
+ if T: T.write(self.trace_message(' already handled (executed)'))
continue
executor = node.get_executor()
@@ -797,7 +797,7 @@ class Taskmaster(object):
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
- except Exception, e:
+ except Exception as e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
@@ -815,7 +815,7 @@ class Taskmaster(object):
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
- if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
+ if T: T.write(self.trace_message(' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
@@ -874,7 +874,7 @@ class Taskmaster(object):
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
- if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
+ if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
@@ -900,7 +900,7 @@ class Taskmaster(object):
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
- if T: T.write(self.trace_message(u'Evaluating %s\n' %
+ if T: T.write(self.trace_message('Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
diff --git a/src/engine/SCons/TaskmasterTests.py b/src/engine/SCons/TaskmasterTests.py
index 85ade8df..0140278a 100644
--- a/src/engine/SCons/TaskmasterTests.py
+++ b/src/engine/SCons/TaskmasterTests.py
@@ -20,7 +20,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -692,7 +692,7 @@ class TaskmasterTestCase(unittest.TestCase):
tm = SCons.Taskmaster.Taskmaster([n3])
try:
t = tm.next_task()
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == "Dependency cycle: n3 -> n1 -> n2 -> n3", str(e)
else:
assert 'Did not catch expected UserError'
@@ -846,7 +846,7 @@ class TaskmasterTestCase(unittest.TestCase):
exc_caught = None
try:
t.prepare()
- except MyException, e:
+ except MyException as e:
exc_caught = 1
except:
pass
@@ -899,7 +899,7 @@ class TaskmasterTestCase(unittest.TestCase):
t = tm.next_task()
try:
t.prepare()
- except Exception, e:
+ except Exception as e:
assert str(e) == "Executor.prepare() exception", e
else:
raise AssertionError("did not catch expected exception")
@@ -953,7 +953,7 @@ class TaskmasterTestCase(unittest.TestCase):
t = tm.next_task()
try:
t.execute()
- except SCons.Errors.BuildError, e:
+ except SCons.Errors.BuildError as e:
assert e.node == n4, e.node
assert e.errstr == "OtherError : ", e.errstr
assert len(e.exc_info) == 3, e.exc_info
diff --git a/src/engine/SCons/Tool/FortranCommon.py b/src/engine/SCons/Tool/FortranCommon.py
index 4c5730cf..c21128ef 100644
--- a/src/engine/SCons/Tool/FortranCommon.py
+++ b/src/engine/SCons/Tool/FortranCommon.py
@@ -61,7 +61,7 @@ def isfortran(env, source):
def _fortranEmitter(target, source, env):
node = source[0].rfile()
if not node.exists() and not node.is_derived():
- print "Could not locate " + str(node.name)
+ print("Could not locate " + str(node.name))
return ([], [])
mod_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
cre = re.compile(mod_regex,re.M)
diff --git a/src/engine/SCons/Tool/GettextCommon.py b/src/engine/SCons/Tool/GettextCommon.py
index cd2f306c..ea81f0d1 100644
--- a/src/engine/SCons/Tool/GettextCommon.py
+++ b/src/engine/SCons/Tool/GettextCommon.py
@@ -196,7 +196,7 @@ class _POFileBuilder(BuilderBase):
import SCons.Util
import SCons.Node
linguas_files = None
- if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE']:
+ if 'LINGUAS_FILE' in env and env['LINGUAS_FILE']:
linguas_files = env['LINGUAS_FILE']
# This prevents endless recursion loop (we'll be invoked once for
# each target appended here, we must not extend the list again).
@@ -341,7 +341,7 @@ class RPaths(object):
def _init_po_files(target, source, env):
""" Action function for `POInit` builder. """
nop = lambda target, source, env : 0
- if env.has_key('POAUTOINIT'):
+ if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT']
else:
autoinit = False
@@ -365,7 +365,7 @@ def _init_po_files(target, source, env):
#############################################################################
def _detect_xgettext(env):
""" Detects *xgettext(1)* binary """
- if env.has_key('XGETTEXT'):
+ if 'XGETTEXT' in env:
return env['XGETTEXT']
xgettext = env.Detect('xgettext');
if xgettext:
@@ -380,7 +380,7 @@ def _xgettext_exists(env):
#############################################################################
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
- if env.has_key('MSGINIT'):
+ if 'MSGINIT' in env:
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
@@ -395,7 +395,7 @@ def _msginit_exists(env):
#############################################################################
def _detect_msgmerge(env):
""" Detects *msgmerge(1)* program. """
- if env.has_key('MSGMERGE'):
+ if 'MSGMERGE' in env:
return env['MSGMERGE']
msgmerge = env.Detect('msgmerge');
if msgmerge:
@@ -410,7 +410,7 @@ def _msgmerge_exists(env):
#############################################################################
def _detect_msgfmt(env):
""" Detects *msgmfmt(1)* program. """
- if env.has_key('MSGFMT'):
+ if 'MSGFMT' in env:
return env['MSGFMT']
msgfmt = env.Detect('msgfmt');
if msgfmt:
diff --git a/src/engine/SCons/Tool/MSCommon/common.py b/src/engine/SCons/Tool/MSCommon/common.py
index caf2b379..dcf69c81 100644
--- a/src/engine/SCons/Tool/MSCommon/common.py
+++ b/src/engine/SCons/Tool/MSCommon/common.py
@@ -38,7 +38,7 @@ import SCons.Util
logfile = os.environ.get('SCONS_MSCOMMON_DEBUG')
if logfile == '-':
def debug(x):
- print x
+ print(x)
elif logfile:
try:
import logging
@@ -113,7 +113,7 @@ def normalize_env(env, keys, force=False):
Note: the environment is copied."""
normenv = {}
if env:
- for k in env.keys():
+ for k in list(env.keys()):
normenv[k] = copy.deepcopy(env[k]).encode('mbcs')
for k in keys:
@@ -217,7 +217,7 @@ def parse_output(output, keep = ("INCLUDE", "LIB", "LIBPATH", "PATH")):
dkeep[key].append(p)
for line in output.splitlines():
- for k,v in rdk.items():
+ for k,v in list(rdk.items()):
m = v.match(line)
if m:
add_env(m, k)
diff --git a/src/engine/SCons/Tool/MSCommon/netframework.py b/src/engine/SCons/Tool/MSCommon/netframework.py
index 6124e5b4..1a6478f6 100644
--- a/src/engine/SCons/Tool/MSCommon/netframework.py
+++ b/src/engine/SCons/Tool/MSCommon/netframework.py
@@ -28,7 +28,7 @@ __doc__ = """
import os
import re
-from common import read_reg, debug
+from .common import read_reg, debug
# Original value recorded by dcournapeau
_FRAMEWORKDIR_HKEY_ROOT = r'Software\Microsoft\.NETFramework\InstallRoot'
@@ -40,7 +40,7 @@ def find_framework_root():
try:
froot = read_reg(_FRAMEWORKDIR_HKEY_ROOT)
debug("Found framework install root in registry: %s" % froot)
- except WindowsError, e:
+ except WindowsError as e:
debug("Could not read reg key %s" % _FRAMEWORKDIR_HKEY_ROOT)
return None
diff --git a/src/engine/SCons/Tool/MSCommon/sdk.py b/src/engine/SCons/Tool/MSCommon/sdk.py
index 2bf5eef8..f71035b5 100644
--- a/src/engine/SCons/Tool/MSCommon/sdk.py
+++ b/src/engine/SCons/Tool/MSCommon/sdk.py
@@ -33,7 +33,7 @@ import os
import SCons.Errors
import SCons.Util
-import common
+from . import common
debug = common.debug
@@ -80,7 +80,7 @@ class SDKDefinition(object):
try:
sdk_dir = common.read_reg(hkey)
- except WindowsError, e:
+ except WindowsError as e:
debug('find_sdk_dir(): no SDK registry key %s' % repr(hkey))
return None
@@ -299,7 +299,7 @@ def get_cur_sdk_dir_from_reg():
try:
val = common.read_reg(_CURINSTALLED_SDK_HKEY_ROOT)
debug("Found current sdk dir in registry: %s" % val)
- except WindowsError, e:
+ except WindowsError as e:
debug("Did not find current sdk in registry")
return None
@@ -350,7 +350,7 @@ def mssdk_setup_env(env):
debug('sdk.py:mssdk_setup_env thinks msvs_version is None')
return
msvs_version = env.subst(msvs_version)
- import vs
+ from . import vs
msvs = vs.get_vs_by_version(msvs_version)
debug('sdk.py:mssdk_setup_env:msvs is :%s'%msvs)
if not msvs:
diff --git a/src/engine/SCons/Tool/MSCommon/vc.py b/src/engine/SCons/Tool/MSCommon/vc.py
index 1266ee8d..35b95d5c 100644
--- a/src/engine/SCons/Tool/MSCommon/vc.py
+++ b/src/engine/SCons/Tool/MSCommon/vc.py
@@ -42,11 +42,11 @@ from string import digits as string_digits
import SCons.Warnings
-import common
+from . import common
debug = common.debug
-import sdk
+from . import sdk
get_installed_sdks = sdk.get_installed_sdks
@@ -119,14 +119,14 @@ def get_host_target(env):
try:
host = _ARCH_TO_CANONICAL[host_platform.lower()]
- except KeyError, e:
+ except KeyError as e:
msg = "Unrecognized host architecture %s"
raise ValueError(msg % repr(host_platform))
try:
target = _ARCH_TO_CANONICAL[target_platform.lower()]
- except KeyError, e:
- all_archs = str(_ARCH_TO_CANONICAL.keys())
+ except KeyError as e:
+ all_archs = str(list(_ARCH_TO_CANONICAL.keys()))
raise ValueError("Unrecognized target architecture %s\n\tValid architectures: %s" % (target_platform, all_archs))
return (host, target,req_target_platform)
@@ -168,7 +168,7 @@ def msvc_version_to_maj_min(msvc_version):
maj = int(t[0])
min = int(t[1])
return maj, min
- except ValueError, e:
+ except ValueError as e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
def is_host_target_supported(host_target, msvc_version):
@@ -217,7 +217,7 @@ def find_vc_pdir(msvc_version):
key = root + key
try:
comps = common.read_reg(key)
- except WindowsError, e:
+ except WindowsError as e:
debug('find_vc_dir(): no VC registry key %s' % repr(key))
else:
debug('find_vc_dir(): found VC in registry: %s' % comps)
@@ -289,7 +289,7 @@ def get_installed_vcs():
installed_versions.append(ver)
else:
debug('find_vc_pdir return None for ver %s' % ver)
- except VisualCException, e:
+ except VisualCException as e:
debug('did not find VC %s: caught exception %s' % (ver, str(e)))
return installed_versions
@@ -393,7 +393,7 @@ def msvc_find_valid_batch_script(env,version):
try:
(vc_script,sdk_script) = find_batch_file(env,version,host_platform,tp)
debug('vc.py:msvc_find_valid_batch_script() vc_script:%s sdk_script:%s'%(vc_script,sdk_script))
- except VisualCException, e:
+ except VisualCException as e:
msg = str(e)
debug('Caught exception while looking for batch file (%s)' % msg)
warn_msg = "VC version %s not installed. " + \
@@ -408,7 +408,7 @@ def msvc_find_valid_batch_script(env,version):
if vc_script:
try:
d = script_env(vc_script, args=arg)
- except BatchFileExecutionError, e:
+ except BatchFileExecutionError as e:
debug('vc.py:msvc_find_valid_batch_script() use_script 3: failed running VC script %s: %s: Error:%s'%(repr(vc_script),arg,e))
vc_script=None
continue
@@ -416,7 +416,7 @@ def msvc_find_valid_batch_script(env,version):
debug('vc.py:msvc_find_valid_batch_script() use_script 4: trying sdk script: %s'%(sdk_script))
try:
d = script_env(sdk_script,args=[])
- except BatchFileExecutionError,e:
+ except BatchFileExecutionError as e:
debug('vc.py:msvc_find_valid_batch_script() use_script 5: failed running SDK script %s: Error:%s'%(repr(sdk_script),e))
continue
elif not vc_script and not sdk_script:
@@ -468,7 +468,7 @@ def msvc_setup_env(env):
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
- for k, v in d.items():
+ for k, v in list(d.items()):
debug('vc.py:msvc_setup_env() env:%s -> %s'%(k,v))
env.PrependENVPath(k, v, delete_existing=True)
diff --git a/src/engine/SCons/Tool/MSCommon/vs.py b/src/engine/SCons/Tool/MSCommon/vs.py
index d5bf2c3e..3219719e 100644
--- a/src/engine/SCons/Tool/MSCommon/vs.py
+++ b/src/engine/SCons/Tool/MSCommon/vs.py
@@ -31,7 +31,7 @@ import os
import SCons.Errors
import SCons.Util
-from common import debug, \
+from .common import debug, \
get_output, \
is_win64, \
normalize_env, \
@@ -85,7 +85,7 @@ class VisualStudio(object):
key = root + key
try:
comps = read_reg(key)
- except WindowsError, e:
+ except WindowsError as e:
debug('find_vs_dir_by_reg(): no VS registry key %s' % repr(key))
else:
debug('find_vs_dir_by_reg(): found VS in registry: %s' % comps)
@@ -536,7 +536,7 @@ def msvs_setup_env(env):
env['ENV'] = save_ENV
vars = parse_output(output, vars)
- for k, v in vars.items():
+ for k, v in list(vars.items()):
env.PrependENVPath(k, v, delete_existing=1)
def query_versions():
diff --git a/src/engine/SCons/Tool/__init__.py b/src/engine/SCons/Tool/__init__.py
index b80d6e4b..ac180a92 100644
--- a/src/engine/SCons/Tool/__init__.py
+++ b/src/engine/SCons/Tool/__init__.py
@@ -113,7 +113,7 @@ class Tool(object):
finally:
if file:
file.close()
- except ImportError, e:
+ except ImportError as e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
@@ -125,7 +125,7 @@ class Tool(object):
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
- except ImportError, e:
+ except ImportError as e:
pass
finally:
sys.path = oldpythonpath
@@ -143,7 +143,7 @@ class Tool(object):
if file:
file.close()
return module
- except ImportError, e:
+ except ImportError as e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
@@ -152,10 +152,10 @@ class Tool(object):
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
- except ImportError, e:
+ except ImportError as e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
- except ImportError, e:
+ except ImportError as e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
@@ -254,7 +254,7 @@ def VersionShLibLinkNames(version, libname, env):
suffix_re = re.escape('.' + version + shlib_suffix)
linkname = re.sub(suffix_re, shlib_suffix, libname)
if Verbose:
- print "VersionShLibLinkNames: linkname = ",linkname
+ print("VersionShLibLinkNames: linkname = ",linkname)
linknames.append(linkname)
elif platform == 'posix':
# For libfoo.so.x.y.z, linknames libfoo.so libfoo.so.x.y libfoo.so.x
@@ -262,7 +262,7 @@ def VersionShLibLinkNames(version, libname, env):
# First linkname has no version number
linkname = re.sub(suffix_re, shlib_suffix, libname)
if Verbose:
- print "VersionShLibLinkNames: linkname = ",linkname
+ print("VersionShLibLinkNames: linkname = ",linkname)
linknames.append(linkname)
versionparts = version.split('.')
major_name = linkname + "." + versionparts[0]
@@ -271,7 +271,7 @@ def VersionShLibLinkNames(version, libname, env):
#for linkname in [major_name, minor_name]:
for linkname in [major_name, ]:
if Verbose:
- print "VersionShLibLinkNames: linkname ",linkname, ", target ",libname
+ print("VersionShLibLinkNames: linkname ",linkname, ", target ",libname)
linknames.append(linkname)
# note: no Windows case here (win32 or cygwin);
# MSVC doesn't support this type of versioned shared libs.
@@ -294,10 +294,10 @@ symlinks for the platform we are on"""
shlib_suffix = env.subst('$SHLIBSUFFIX')
shlink_flags = SCons.Util.CLVar(env.subst('$SHLINKFLAGS'))
if Verbose:
- print "VersionShLib: libname = ",libname
- print "VersionShLib: platform = ",platform
- print "VersionShLib: shlib_suffix = ",shlib_suffix
- print "VersionShLib: target = ",str(target[0])
+ print("VersionShLib: libname = ",libname)
+ print("VersionShLib: platform = ",platform)
+ print("VersionShLib: shlib_suffix = ",shlib_suffix)
+ print("VersionShLib: target = ",str(target[0]))
if version:
# set the shared library link flags
@@ -308,7 +308,7 @@ symlinks for the platform we are on"""
soname = re.sub(suffix_re, shlib_suffix, libname) + '.' + major
shlink_flags += [ '-Wl,-Bsymbolic', '-Wl,-soname=%s' % soname ]
if Verbose:
- print " soname ",soname,", shlink_flags ",shlink_flags
+ print(" soname ",soname,", shlink_flags ",shlink_flags)
elif platform == 'cygwin':
shlink_flags += [ '-Wl,-Bsymbolic',
'-Wl,--out-implib,${TARGET.base}.a' ]
@@ -317,7 +317,7 @@ symlinks for the platform we are on"""
'-compatibility_version', '%s' % version,
'-undefined', 'dynamic_lookup' ]
if Verbose:
- print "VersionShLib: shlink_flags = ",shlink_flags
+ print("VersionShLib: shlink_flags = ",shlink_flags)
envlink = env.Clone()
envlink['SHLINKFLAGS'] = shlink_flags
else:
@@ -330,7 +330,7 @@ symlinks for the platform we are on"""
libname = target[0].path
linknames = VersionShLibLinkNames(version, libname, env)
if Verbose:
- print "VerShLib: linknames ",linknames
+ print("VerShLib: linknames ",linknames)
# Here we just need the file name w/o path as the target of the link
lib_ver = target[0].name
# make symlink of adjacent names in linknames
@@ -343,7 +343,7 @@ symlinks for the platform we are on"""
pass
os.symlink(os.path.basename(linkname),lastlinkname)
if Verbose:
- print "VerShLib: made sym link of %s -> %s" % (lastlinkname,linkname)
+ print("VerShLib: made sym link of %s -> %s" % (lastlinkname,linkname))
lastlinkname = linkname
# finish chain of sym links with link to the actual library
if len(linknames)>0:
@@ -353,7 +353,7 @@ symlinks for the platform we are on"""
pass
os.symlink(lib_ver,lastlinkname)
if Verbose:
- print "VerShLib: made sym link of %s -> %s" % (linkname, lib_ver)
+ print("VerShLib: made sym link of %s -> %s" % (linkname, lib_ver))
return result
ShLibAction = SCons.Action.Action(VersionedSharedLibrary, None)
@@ -631,7 +631,7 @@ class ToolInitializer(object):
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
- for method in self.methods.values():
+ for method in list(self.methods.values()):
env.RemoveMethod(method)
def apply_tools(self, env):
diff --git a/src/engine/SCons/Tool/aixcc.py b/src/engine/SCons/Tool/aixcc.py
index 9668f799..b1da31e4 100644
--- a/src/engine/SCons/Tool/aixcc.py
+++ b/src/engine/SCons/Tool/aixcc.py
@@ -36,7 +36,7 @@ import os.path
import SCons.Platform.aix
-import cc
+from . import cc
packages = ['vac.C', 'ibmcxx.cmp']
diff --git a/src/engine/SCons/Tool/aixf77.py b/src/engine/SCons/Tool/aixf77.py
index a667e843..21786ee0 100644
--- a/src/engine/SCons/Tool/aixf77.py
+++ b/src/engine/SCons/Tool/aixf77.py
@@ -36,7 +36,7 @@ import os.path
#import SCons.Platform.aix
-import f77
+from . import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
diff --git a/src/engine/SCons/Tool/aixlink.py b/src/engine/SCons/Tool/aixlink.py
index 35125220..fc65afb9 100644
--- a/src/engine/SCons/Tool/aixlink.py
+++ b/src/engine/SCons/Tool/aixlink.py
@@ -37,8 +37,8 @@ import os.path
import SCons.Util
-import aixcc
-import link
+from . import aixcc
+from . import link
cplusplus = __import__('c++', globals(), locals(), [])
diff --git a/src/engine/SCons/Tool/applelink.py b/src/engine/SCons/Tool/applelink.py
index 19390982..ba955a4e 100644
--- a/src/engine/SCons/Tool/applelink.py
+++ b/src/engine/SCons/Tool/applelink.py
@@ -37,7 +37,7 @@ import SCons.Util
# Even though the Mac is based on the GNU toolchain, it doesn't understand
# the -rpath option, so we use the "link" tool instead of "gnulink".
-import link
+from . import link
def generate(env):
"""Add Builders and construction variables for applelink to an
diff --git a/src/engine/SCons/Tool/cvf.py b/src/engine/SCons/Tool/cvf.py
index 2a28e6a9..da2c910a 100644
--- a/src/engine/SCons/Tool/cvf.py
+++ b/src/engine/SCons/Tool/cvf.py
@@ -29,7 +29,7 @@ Tool-specific initialization for the Compaq Visual Fortran compiler.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import fortran
+from . import fortran
compilers = ['f90']
diff --git a/src/engine/SCons/Tool/cyglink.py b/src/engine/SCons/Tool/cyglink.py
index 87716cf9..1685d7f9 100644
--- a/src/engine/SCons/Tool/cyglink.py
+++ b/src/engine/SCons/Tool/cyglink.py
@@ -11,7 +11,7 @@ selection method.
import SCons.Action
import SCons.Util
-import gnulink
+from . import gnulink
def shlib_generator(target, source, env, for_signature):
cmd = SCons.Util.CLVar(['$SHLINK'])
diff --git a/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/docbook.py b/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/docbook.py
index c0706023..ef032066 100644
--- a/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/docbook.py
+++ b/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/docbook.py
@@ -82,7 +82,7 @@ def adjustColumnWidths(ctx, nodeset):
relParts.append(relPart)
absParts.append(pixels)
- col = col.next
+ col = col.__next__
# Ok, now we have the relative widths and absolute widths in
# two parallel arrays.
@@ -116,7 +116,7 @@ def adjustColumnWidths(ctx, nodeset):
pixelWidth = convertLength(tableWidth)
if pixelWidth <= absTotal:
- print "Table is wider than table width"
+ print("Table is wider than table width")
else:
pixelWidth = pixelWidth - absTotal
@@ -151,7 +151,7 @@ def adjustColumnWidths(ctx, nodeset):
col.setProp("width", widths[count])
count = count+1
- col = col.next
+ col = col.__next__
return nodeset
@@ -163,10 +163,10 @@ def convertLength(length):
m = re.search('([+-]?[\d\.]+)(\S+)', length)
if m != None and m.lastindex > 1:
unit = pixelsPerInch
- if unitHash.has_key(m.group(2)):
+ if m.group(2) in unitHash:
unit = unitHash[m.group(2)]
else:
- print "Unrecognized length: " + m.group(2)
+ print("Unrecognized length: " + m.group(2))
pixels = unit * float(m.group(1))
else:
diff --git a/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/xslt.py b/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/xslt.py
index c712f65f..8554dd1c 100644
--- a/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/xslt.py
+++ b/src/engine/SCons/Tool/docbook/docbook-xsl-1.76.1/extensions/xslt.py
@@ -18,7 +18,7 @@ try:
xmlfile = sys.argv[1]
xslfile = sys.argv[2]
except IndexError:
- print usage
+ print(usage)
sys.exit(1)
def quote(astring):
@@ -38,12 +38,12 @@ try:
while (sys.argv[count]):
try:
name, value = sys.argv[count].split("=", 2)
- if params.has_key(name):
- print "Warning: '%s' re-specified; replacing value" % name
+ if name in params:
+ print("Warning: '%s' re-specified; replacing value" % name)
params[name] = quote(value)
except ValueError:
- print "Invalid parameter specification: '" + sys.argv[count] + "'"
- print usage
+ print("Invalid parameter specification: '" + sys.argv[count] + "'")
+ print(usage)
sys.exit(1)
count = count+1
except IndexError:
@@ -70,7 +70,7 @@ result = style.applyStylesheet(doc, params)
if outfile:
style.saveResultToFilename(outfile, result, 0)
else:
- print result
+ print(result)
# Free things up
style.freeStylesheet()
diff --git a/src/engine/SCons/Tool/dvipdf.py b/src/engine/SCons/Tool/dvipdf.py
index 7c41e9c2..374b9c58 100644
--- a/src/engine/SCons/Tool/dvipdf.py
+++ b/src/engine/SCons/Tool/dvipdf.py
@@ -100,7 +100,7 @@ def generate(env):
if DVIPDFAction is None:
DVIPDFAction = SCons.Action.Action(DviPdfFunction, strfunction = DviPdfStrFunction)
- import pdf
+ from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
diff --git a/src/engine/SCons/Tool/f03.py b/src/engine/SCons/Tool/f03.py
index 3aab1c04..6c309715 100644
--- a/src/engine/SCons/Tool/f03.py
+++ b/src/engine/SCons/Tool/f03.py
@@ -36,7 +36,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
import SCons.Tool
import SCons.Util
-import fortran
+from . import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f03_to_env
compilers = ['f03']
diff --git a/src/engine/SCons/Tool/f95.py b/src/engine/SCons/Tool/f95.py
index 5ce5e570..5baa31ee 100644
--- a/src/engine/SCons/Tool/f95.py
+++ b/src/engine/SCons/Tool/f95.py
@@ -36,7 +36,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
import SCons.Tool
import SCons.Util
-import fortran
+from . import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f95_to_env
compilers = ['f95']
diff --git a/src/engine/SCons/Tool/filesystem.py b/src/engine/SCons/Tool/filesystem.py
index 31c8abc0..3b8ee4c8 100644
--- a/src/engine/SCons/Tool/filesystem.py
+++ b/src/engine/SCons/Tool/filesystem.py
@@ -66,7 +66,7 @@ def generate(env):
try:
env['BUILDERS']['CopyTo']
env['BUILDERS']['CopyAs']
- except KeyError, e:
+ except KeyError as e:
global copyToBuilder
if copyToBuilder is None:
copyToBuilder = SCons.Builder.Builder(
diff --git a/src/engine/SCons/Tool/gcc.py b/src/engine/SCons/Tool/gcc.py
index 71f60a3e..4f87b24f 100644
--- a/src/engine/SCons/Tool/gcc.py
+++ b/src/engine/SCons/Tool/gcc.py
@@ -33,7 +33,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import cc
+from . import cc
import os
import re
import subprocess
diff --git a/src/engine/SCons/Tool/gfortran.py b/src/engine/SCons/Tool/gfortran.py
index 4f3e7e46..7b05e683 100644
--- a/src/engine/SCons/Tool/gfortran.py
+++ b/src/engine/SCons/Tool/gfortran.py
@@ -36,7 +36,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-import fortran
+from . import fortran
def generate(env):
"""Add Builders and construction variables for gfortran to an
diff --git a/src/engine/SCons/Tool/gnulink.py b/src/engine/SCons/Tool/gnulink.py
index bf71270f..ea8d7bd1 100644
--- a/src/engine/SCons/Tool/gnulink.py
+++ b/src/engine/SCons/Tool/gnulink.py
@@ -35,7 +35,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-import link
+from . import link
linkers = ['g++', 'gcc']
diff --git a/src/engine/SCons/Tool/gs.py b/src/engine/SCons/Tool/gs.py
index ada169ac..c5506ce1 100644
--- a/src/engine/SCons/Tool/gs.py
+++ b/src/engine/SCons/Tool/gs.py
@@ -57,7 +57,7 @@ def generate(env):
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
- import pdf
+ from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
diff --git a/src/engine/SCons/Tool/hpcc.py b/src/engine/SCons/Tool/hpcc.py
index 30f49648..51d2e380 100644
--- a/src/engine/SCons/Tool/hpcc.py
+++ b/src/engine/SCons/Tool/hpcc.py
@@ -34,7 +34,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-import cc
+from . import cc
def generate(env):
"""Add Builders and construction variables for aCC & cc to an Environment."""
diff --git a/src/engine/SCons/Tool/hplink.py b/src/engine/SCons/Tool/hplink.py
index 17dbe057..10ef30ba 100644
--- a/src/engine/SCons/Tool/hplink.py
+++ b/src/engine/SCons/Tool/hplink.py
@@ -37,7 +37,7 @@ import os.path
import SCons.Util
-import link
+from . import link
ccLinker = None
diff --git a/src/engine/SCons/Tool/icc.py b/src/engine/SCons/Tool/icc.py
index db156422..11ea075f 100644
--- a/src/engine/SCons/Tool/icc.py
+++ b/src/engine/SCons/Tool/icc.py
@@ -33,7 +33,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import cc
+from . import cc
def generate(env):
"""Add Builders and construction variables for the OS/2 to an Environment."""
diff --git a/src/engine/SCons/Tool/ifl.py b/src/engine/SCons/Tool/ifl.py
index 30b3672e..865d2baa 100644
--- a/src/engine/SCons/Tool/ifl.py
+++ b/src/engine/SCons/Tool/ifl.py
@@ -35,7 +35,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
-from FortranCommon import add_all_to_env
+from .FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifl to an Environment."""
diff --git a/src/engine/SCons/Tool/ifort.py b/src/engine/SCons/Tool/ifort.py
index 4b2fd658..638bd12d 100644
--- a/src/engine/SCons/Tool/ifort.py
+++ b/src/engine/SCons/Tool/ifort.py
@@ -36,7 +36,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
-from FortranCommon import add_all_to_env
+from .FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
diff --git a/src/engine/SCons/Tool/install.py b/src/engine/SCons/Tool/install.py
index 0c81d05d..4236e819 100644
--- a/src/engine/SCons/Tool/install.py
+++ b/src/engine/SCons/Tool/install.py
@@ -82,21 +82,21 @@ def scons_copytree(src, dst, symlinks=False):
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
- except (IOError, os.error), why:
+ except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the CopytreeError from the recursive copytree so that we can
# continue with other files
- except CopytreeError, err:
+ except CopytreeError as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except WindowsError:
# can't copy file access times on Windows
pass
- except OSError, why:
+ except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
- raise CopytreeError, errors
+ raise CopytreeError(errors)
#
@@ -174,7 +174,7 @@ def versionedLibVersion(dest, env):
version_File = version_re.findall(versioned_re.findall(libname)[-1])[-1]
if Verbose:
- print "install: version_File ", version_File
+ print("install: version_File ", version_File)
# result is False if we did not find a versioned shared library name, so return and empty list
if not result:
return (None, libname, install_dir)
@@ -188,7 +188,7 @@ def versionedLibVersion(dest, env):
if version != version_File:
#raise SCons.Errors.UserError("SHLIBVERSION '%s' does not match the version # '%s' in the filename" % (version, version_File) )
- print "SHLIBVERSION '%s' does not match the version # '%s' in the filename, proceeding based on file name" % (version, version_File)
+ print("SHLIBVERSION '%s' does not match the version # '%s' in the filename, proceeding based on file name" % (version, version_File))
version = version_File
return (version, libname, install_dir)
@@ -202,7 +202,7 @@ def versionedLibLinks(dest, source, env):
# libname includes the version number if one was given
linknames = SCons.Tool.VersionShLibLinkNames(version,libname,env)
if Verbose:
- print "versionedLibLinks: linknames ",linknames
+ print("versionedLibLinks: linknames ",linknames)
# Here we just need the file name w/o path as the target of the link
lib_ver = libname
# make symlink of adjacent names in linknames
@@ -210,7 +210,7 @@ def versionedLibLinks(dest, source, env):
linkname = linknames[count]
fulllinkname = os.path.join(install_dir, linkname)
if Verbose:
- print "full link name ",fulllinkname
+ print("full link name ",fulllinkname)
if count > 0:
try:
os.remove(lastlinkname)
@@ -218,7 +218,7 @@ def versionedLibLinks(dest, source, env):
pass
os.symlink(os.path.basename(fulllinkname),lastlinkname)
if Verbose:
- print "versionedLibLinks: made sym link of %s -> %s" % (lastlinkname,os.path.basename(fulllinkname))
+ print("versionedLibLinks: made sym link of %s -> %s" % (lastlinkname,os.path.basename(fulllinkname)))
lastlinkname = fulllinkname
# finish chain of sym links with link to the actual library
if len(linknames)>0:
@@ -228,7 +228,7 @@ def versionedLibLinks(dest, source, env):
pass
os.symlink(lib_ver,lastlinkname)
if Verbose:
- print "versionedLibLinks: made sym link of %s -> %s" % (lib_ver,lastlinkname)
+ print("versionedLibLinks: made sym link of %s -> %s" % (lib_ver,lastlinkname))
return
def installFunc(target, source, env):
@@ -298,7 +298,7 @@ def add_versioned_targets_to_INSTALLED_FILES(target, source, env):
Verbose = False
_INSTALLED_FILES.extend(target)
if Verbose:
- print "ver lib emitter ",repr(target)
+ print("ver lib emitter ",repr(target))
# see if we have a versioned shared library, if so generate side effects
version, libname, install_dir = versionedLibVersion(target[0].path, env)
@@ -307,13 +307,13 @@ def add_versioned_targets_to_INSTALLED_FILES(target, source, env):
linknames = SCons.Tool.VersionShLibLinkNames(version,libname,env)
for linkname in linknames:
if Verbose:
- print "make side effect of %s" % os.path.join(install_dir, linkname)
+ print("make side effect of %s" % os.path.join(install_dir, linkname))
fulllinkname = os.path.join(install_dir, linkname)
env.SideEffect(fulllinkname,target[0])
env.Clean(target[0],fulllinkname)
_INSTALLED_FILES.append(fulllinkname)
if Verbose:
- print "installed list ", _INSTALLED_FILES
+ print("installed list ", _INSTALLED_FILES)
_UNIQUE_INSTALLED_FILES = None
return (target, source)
diff --git a/src/engine/SCons/Tool/intelc.py b/src/engine/SCons/Tool/intelc.py
index 42010927..8b178a75 100644
--- a/src/engine/SCons/Tool/intelc.py
+++ b/src/engine/SCons/Tool/intelc.py
@@ -30,7 +30,7 @@ selection method.
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -206,17 +206,16 @@ def get_all_compiler_versions():
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
- except MissingRegistryError, e:
+ except MissingRegistryError as e:
# Registry key is left dangling (potentially
# after uninstalling).
- print \
- "scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
+ print("scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
- "scons: *** was not cleaned up properly.\n" % subkey
+ "scons: *** was not cleaned up properly.\n" % subkey)
else:
- print "scons: *** Ignoring "+str(value)
+ print("scons: *** Ignoring "+str(value))
i = i + 1
except EnvironmentError:
@@ -424,8 +423,8 @@ def generate(env, version=None, abi=None, topdir=None, verbose=0):
bindir="bin"
libdir="lib"
if verbose:
- print "Intel C compiler: using version %s (%g), abi %s, in '%s/%s'"%\
- (repr(version), linux_ver_normalize(version),abi,topdir,bindir)
+ print("Intel C compiler: using version %s (%g), abi %s, in '%s/%s'"%\
+ (repr(version), linux_ver_normalize(version),abi,topdir,bindir))
if is_linux:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
@@ -439,14 +438,14 @@ def generate(env, version=None, abi=None, topdir=None, verbose=0):
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
- for p in paths.keys():
+ for p in list(paths.keys()):
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_mac:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
- for p in paths.keys():
+ for p in list(paths.keys()):
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_windows:
# env key reg valname default subdir of top
diff --git a/src/engine/SCons/Tool/latex.py b/src/engine/SCons/Tool/latex.py
index 1c71743a..f30356b2 100644
--- a/src/engine/SCons/Tool/latex.py
+++ b/src/engine/SCons/Tool/latex.py
@@ -55,10 +55,10 @@ def generate(env):
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
- import dvi
+ from . import dvi
dvi.generate(env)
- import pdf
+ from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['DVI']
diff --git a/src/engine/SCons/Tool/link.py b/src/engine/SCons/Tool/link.py
index 3f20fe0e..5539f623 100644
--- a/src/engine/SCons/Tool/link.py
+++ b/src/engine/SCons/Tool/link.py
@@ -81,7 +81,7 @@ def shlib_emitter(target, source, env):
env.SideEffect(name, target[0])
env.Clean(target[0], name)
if Verbose:
- print "shlib_emitter: add side effect - ",name
+ print("shlib_emitter: add side effect - ",name)
except KeyError:
version = None
return (target, source)
@@ -104,16 +104,16 @@ def shlib_emitter_names(target, source, env):
# generate library name with the version number
version_name = target[0].name + '.' + version
if Verbose:
- print "shlib_emitter_names: target is ", version_name
- print "shlib_emitter_names: side effect: ", name
+ print("shlib_emitter_names: target is ", version_name)
+ print("shlib_emitter_names: side effect: ", name)
# add version_name to list of names to be a Side effect
version_names.append(version_name)
if Verbose:
- print "shlib_emitter_names: versionparts ",versionparts
+ print("shlib_emitter_names: versionparts ",versionparts)
for ver in versionparts[0:-1]:
name = name + '.' + ver
if Verbose:
- print "shlib_emitter_names: side effect: ", name
+ print("shlib_emitter_names: side effect: ", name)
# add name to list of names to be a Side effect
version_names.append(name)
elif platform == 'darwin':
@@ -123,8 +123,8 @@ def shlib_emitter_names(target, source, env):
suffix_re = re.escape(shlib_suffix)
version_name = re.sub(suffix_re, '.' + version + shlib_suffix, name)
if Verbose:
- print "shlib_emitter_names: target is ", version_name
- print "shlib_emitter_names: side effect: ", name
+ print("shlib_emitter_names: target is ", version_name)
+ print("shlib_emitter_names: side effect: ", name)
# add version_name to list of names to be a Side effect
version_names.append(version_name)
elif platform == 'cygwin':
@@ -134,8 +134,8 @@ def shlib_emitter_names(target, source, env):
suffix_re = re.escape(shlib_suffix)
version_name = re.sub(suffix_re, '-' + re.sub('\.', '-', version) + shlib_suffix, name)
if Verbose:
- print "shlib_emitter_names: target is ", version_name
- print "shlib_emitter_names: side effect: ", name
+ print("shlib_emitter_names: target is ", version_name)
+ print("shlib_emitter_names: side effect: ", name)
# add version_name to list of names to be a Side effect
version_names.append(version_name)
diff --git a/src/engine/SCons/Tool/midl.py b/src/engine/SCons/Tool/midl.py
index 64b927a0..7a59e334 100644
--- a/src/engine/SCons/Tool/midl.py
+++ b/src/engine/SCons/Tool/midl.py
@@ -39,7 +39,7 @@ import SCons.Defaults
import SCons.Scanner.IDL
import SCons.Util
-from MSCommon import msvc_exists
+from .MSCommon import msvc_exists
def midl_emitter(target, source, env):
"""Produces a list of outputs from the MIDL compiler"""
diff --git a/src/engine/SCons/Tool/msgfmt.py b/src/engine/SCons/Tool/msgfmt.py
index 352ba775..4fe6afd2 100644
--- a/src/engine/SCons/Tool/msgfmt.py
+++ b/src/engine/SCons/Tool/msgfmt.py
@@ -41,7 +41,7 @@ class _MOFileBuilder(BuilderBase):
import SCons.Util
from SCons.Tool.GettextCommon import _read_linguas_from_files
linguas_files = None
- if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE'] is not None:
+ if 'LINGUAS_FILE' in env and env['LINGUAS_FILE'] is not None:
linguas_files = env['LINGUAS_FILE']
# This should prevent from endless recursion.
env['LINGUAS_FILE'] = None
diff --git a/src/engine/SCons/Tool/msginit.py b/src/engine/SCons/Tool/msginit.py
index 5e9c0e4c..39f460d9 100644
--- a/src/engine/SCons/Tool/msginit.py
+++ b/src/engine/SCons/Tool/msginit.py
@@ -35,7 +35,7 @@ def _optional_no_translator_flag(env):
""" Return '--no-translator' flag if we run *msginit(1)* in non-interactive
mode."""
import SCons.Util
- if env.has_key('POAUTOINIT'):
+ if 'POAUTOINIT' in env:
autoinit = env['POAUTOINIT']
else:
autoinit = False
@@ -66,7 +66,7 @@ def _POInitBuilderWrapper(env, target=None, source=_null, **kw):
if source is _null:
if 'POTDOMAIN' in kw:
domain = kw['POTDOMAIN']
- elif env.has_key('POTDOMAIN'):
+ elif 'POTDOMAIN' in env:
domain = env['POTDOMAIN']
else:
domain = 'messages'
diff --git a/src/engine/SCons/Tool/msgmerge.py b/src/engine/SCons/Tool/msgmerge.py
index f3710ab3..11d7b48e 100644
--- a/src/engine/SCons/Tool/msgmerge.py
+++ b/src/engine/SCons/Tool/msgmerge.py
@@ -58,7 +58,7 @@ def _POUpdateBuilderWrapper(env, target=None, source=_null, **kw):
if source is _null:
if 'POTDOMAIN' in kw:
domain = kw['POTDOMAIN']
- elif env.has_key('POTDOMAIN') and env['POTDOMAIN']:
+ elif 'POTDOMAIN' in env and env['POTDOMAIN']:
domain = env['POTDOMAIN']
else:
domain = 'messages'
diff --git a/src/engine/SCons/Tool/mslib.py b/src/engine/SCons/Tool/mslib.py
index 8a4af57d..df8d877e 100644
--- a/src/engine/SCons/Tool/mslib.py
+++ b/src/engine/SCons/Tool/mslib.py
@@ -39,7 +39,7 @@ import SCons.Tool.msvs
import SCons.Tool.msvc
import SCons.Util
-from MSCommon import msvc_exists, msvc_setup_env_once
+from .MSCommon import msvc_exists, msvc_setup_env_once
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
diff --git a/src/engine/SCons/Tool/mslink.py b/src/engine/SCons/Tool/mslink.py
index 40f112b6..b56d34a3 100644
--- a/src/engine/SCons/Tool/mslink.py
+++ b/src/engine/SCons/Tool/mslink.py
@@ -44,7 +44,7 @@ import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
-from MSCommon import msvc_setup_env_once, msvc_exists
+from .MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
@@ -195,7 +195,7 @@ def RegServerFunc(target, source, env):
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
- print "Registered %s sucessfully" % target[0]
+ print("Registered %s sucessfully" % target[0])
return ret
return 0
@@ -212,10 +212,10 @@ def embedManifestDllCheck(target, source, env):
if os.path.exists(manifestSrc):
ret = (embedManifestDllAction) ([target[0]],None,env)
if ret:
- raise SCons.Errors.UserError, "Unable to embed manifest into %s" % (target[0])
+ raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
- print '(embed: no %s.manifest found; not embedding.)'%str(target[0])
+ print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0
def embedManifestExeCheck(target, source, env):
@@ -226,10 +226,10 @@ def embedManifestExeCheck(target, source, env):
if os.path.exists(manifestSrc):
ret = (embedManifestExeAction) ([target[0]],None,env)
if ret:
- raise SCons.Errors.UserError, "Unable to embed manifest into %s" % (target[0])
+ raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
- print '(embed: no %s.manifest found; not embedding.)'%str(target[0])
+ print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0
embedManifestDllCheckAction = SCons.Action.Action(embedManifestDllCheck, None)
diff --git a/src/engine/SCons/Tool/mssdk.py b/src/engine/SCons/Tool/mssdk.py
index 6103f30b..f3730021 100644
--- a/src/engine/SCons/Tool/mssdk.py
+++ b/src/engine/SCons/Tool/mssdk.py
@@ -33,7 +33,7 @@ It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
-from MSCommon import mssdk_exists, \
+from .MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
diff --git a/src/engine/SCons/Tool/msvc.py b/src/engine/SCons/Tool/msvc.py
index d42c2570..0bc296f4 100644
--- a/src/engine/SCons/Tool/msvc.py
+++ b/src/engine/SCons/Tool/msvc.py
@@ -47,7 +47,7 @@ import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
-from MSCommon import msvc_exists, msvc_setup_env_once
+from .MSCommon import msvc_exists, msvc_setup_env_once
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
diff --git a/src/engine/SCons/Tool/msvs.py b/src/engine/SCons/Tool/msvs.py
index 06ce4861..0879a285 100644
--- a/src/engine/SCons/Tool/msvs.py
+++ b/src/engine/SCons/Tool/msvs.py
@@ -51,7 +51,7 @@ import SCons.PathList
import SCons.Util
import SCons.Warnings
-from MSCommon import msvc_exists, msvc_setup_env_once
+from .MSCommon import msvc_exists, msvc_setup_env_once
from SCons.Defaults import processDefines
##############################################################################
@@ -351,13 +351,13 @@ class _DSPGenerator(object):
config.platform = 'Win32'
self.configs[variant] = config
- print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dspfile) + "'"
+ print("Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dspfile) + "'")
for i in range(len(variants)):
AddConfig(self, variants[i], buildtarget[i], outdir[i], runfile[i], cmdargs)
self.platforms = []
- for key in self.configs.keys():
+ for key in list(self.configs.keys()):
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
@@ -480,7 +480,7 @@ class _GenerateV6DSP(_DSPGenerator):
'Resource Files': 'r|rc|ico|cur|bmp|dlg|rc2|rct|bin|cnt|rtf|gif|jpg|jpeg|jpe',
'Other Files': ''}
- for kind in sorted(categories.keys(), key=lambda a: a.lower()):
+ for kind in sorted(list(categories.keys()), key=lambda a: a.lower()):
if not self.sources[kind]:
continue # skip empty groups
@@ -551,7 +551,7 @@ class _GenerateV6DSP(_DSPGenerator):
def Build(self):
try:
self.file = open(self.dspabs,'w')
- except IOError, detail:
+ except IOError as detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
@@ -744,7 +744,7 @@ class _GenerateV7DSP(_DSPGenerator):
self.file.write(pdata + '-->\n')
def printSources(self, hierarchy, commonprefix):
- sorteditems = sorted(hierarchy.items(), key=lambda a: a[0].lower())
+ sorteditems = sorted(list(hierarchy.items()), key=lambda a: a[0].lower())
# First folders, then files
for key, value in sorteditems:
@@ -774,7 +774,7 @@ class _GenerateV7DSP(_DSPGenerator):
self.file.write('\t<Files>\n')
- cats = sorted([k for k in categories.keys() if self.sources[k]],
+ cats = sorted([k for k in list(categories.keys()) if self.sources[k]],
key=lambda a: a.lower())
for kind in cats:
if len(cats) > 1:
@@ -861,7 +861,7 @@ class _GenerateV7DSP(_DSPGenerator):
def Build(self):
try:
self.file = open(self.dspabs,'w')
- except IOError, detail:
+ except IOError as detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
@@ -1029,7 +1029,7 @@ class _GenerateV10DSP(_DSPGenerator):
self.filtersabs = self.dspabs + '.filters'
try:
self.filters_file = open(self.filtersabs, 'w')
- except IOError, detail:
+ except IOError as detail:
raise SCons.Errors.InternalError('Unable to open "' + self.filtersabs + '" for writing:' + str(detail))
self.filters_file.write('<?xml version="1.0" encoding="utf-8"?>\n'
@@ -1055,7 +1055,7 @@ class _GenerateV10DSP(_DSPGenerator):
self.file.write(pdata + '-->\n')
def printFilters(self, hierarchy, name):
- sorteditems = sorted(hierarchy.items(), key = lambda a: a[0].lower())
+ sorteditems = sorted(list(hierarchy.items()), key = lambda a: a[0].lower())
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
@@ -1072,7 +1072,7 @@ class _GenerateV10DSP(_DSPGenerator):
'Resource Files': 'None',
'Other Files': 'None'}
- sorteditems = sorted(hierarchy.items(), key = lambda a: a[0].lower())
+ sorteditems = sorted(list(hierarchy.items()), key = lambda a: a[0].lower())
# First folders, then files
for key, value in sorteditems:
@@ -1098,7 +1098,7 @@ class _GenerateV10DSP(_DSPGenerator):
'Resource Files': 'r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe',
'Other Files': ''}
- cats = sorted([k for k in categories.keys() if self.sources[k]],
+ cats = sorted([k for k in list(categories.keys()) if self.sources[k]],
key = lambda a: a.lower())
# print vcxproj.filters file first
@@ -1158,12 +1158,12 @@ class _GenerateV10DSP(_DSPGenerator):
'\t</ItemGroup>\n' % str(self.sconscript))
def Parse(self):
- print "_GenerateV10DSP.Parse()"
+ print("_GenerateV10DSP.Parse()")
def Build(self):
try:
self.file = open(self.dspabs, 'w')
- except IOError, detail:
+ except IOError as detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
@@ -1242,7 +1242,7 @@ class _GenerateV7DSW(_DSWGenerator):
config.platform = 'Win32'
self.configs[variant] = config
- print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dswfile) + "'"
+ print("Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dswfile) + "'")
if 'variant' not in env:
raise SCons.Errors.InternalError("You must specify a 'variant' argument (i.e. 'Debug' or " +\
@@ -1254,7 +1254,7 @@ class _GenerateV7DSW(_DSWGenerator):
AddConfig(self, variant)
self.platforms = []
- for key in self.configs.keys():
+ for key in list(self.configs.keys()):
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
@@ -1424,7 +1424,7 @@ class _GenerateV7DSW(_DSWGenerator):
def Build(self):
try:
self.file = open(self.dswfile,'w')
- except IOError, detail:
+ except IOError as detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintSolution()
@@ -1473,7 +1473,7 @@ class _GenerateV6DSW(_DSWGenerator):
def Build(self):
try:
self.file = open(self.dswfile,'w')
- except IOError, detail:
+ except IOError as detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintWorkspace()
@@ -1530,8 +1530,8 @@ def GenerateProject(target, source, env):
if not dspfile is builddspfile:
try:
bdsp = open(str(builddspfile), "w+")
- except IOError, detail:
- print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
+ except IOError as detail:
+ print('Unable to open "' + str(dspfile) + '" for writing:',detail,'\n')
raise
bdsp.write("This is just a placeholder file.\nThe real project file is here:\n%s\n" % dspfile.get_abspath())
@@ -1546,8 +1546,8 @@ def GenerateProject(target, source, env):
try:
bdsw = open(str(builddswfile), "w+")
- except IOError, detail:
- print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
+ except IOError as detail:
+ print('Unable to open "' + str(dspfile) + '" for writing:',detail,'\n')
raise
bdsw.write("This is just a placeholder file.\nThe real workspace file is here:\n%s\n" % dswfile.get_abspath())
diff --git a/src/engine/SCons/Tool/msvsTests.py b/src/engine/SCons/Tool/msvsTests.py
index 7d966c1b..1466db6c 100644
--- a/src/engine/SCons/Tool/msvsTests.py
+++ b/src/engine/SCons/Tool/msvsTests.py
@@ -743,7 +743,7 @@ if __name__ == "__main__":
]
for test_class in test_classes:
- print "TEST: ", test_class.__doc__
+ print("TEST: ", test_class.__doc__)
back_osenv = copy.deepcopy(os.environ)
try:
# XXX: overriding the os.environ is bad, but doing it
diff --git a/src/engine/SCons/Tool/packaging/__init__.py b/src/engine/SCons/Tool/packaging/__init__.py
index 95311a2b..c3de2aae 100644
--- a/src/engine/SCons/Tool/packaging/__init__.py
+++ b/src/engine/SCons/Tool/packaging/__init__.py
@@ -72,7 +72,7 @@ def Tag(env, target, source, *more_tags, **kw_tags):
target=env.Flatten(target)
for t in target:
- for (k,v) in kw_tags.items():
+ for (k,v) in list(kw_tags.items()):
# all file tags have to start with PACKAGING_, so we can later
# differentiate between "normal" object attributes and the
# packaging attributes. As the user should not be bothered with
@@ -120,7 +120,7 @@ def Package(env, target=None, source=None, **kw):
try:
file,path,desc=imp.find_module(type, __path__)
return imp.load_module(type, file, path, desc)
- except ImportError, e:
+ except ImportError as e:
raise EnvironmentError("packager %s not available: %s"%(type,str(e)))
packagers=list(map(load_packager, PACKAGETYPE))
@@ -141,7 +141,7 @@ def Package(env, target=None, source=None, **kw):
if 'PACKAGEROOT' not in kw:
kw['PACKAGEROOT'] = default_name%kw
- except KeyError, e:
+ except KeyError as e:
raise SCons.Errors.UserError( "Missing Packagetag '%s'"%e.args[0] )
# setup the source files
@@ -157,10 +157,10 @@ def Package(env, target=None, source=None, **kw):
assert( len(target) == 0 )
- except KeyError, e:
+ except KeyError as e:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (e.args[0],packager.__name__) )
- except TypeError, e:
+ except TypeError as e:
# this exception means that a needed argument for the packager is
# missing. As our packagers get their "tags" as named function
# arguments we need to find out which one is missing.
diff --git a/src/engine/SCons/Tool/packaging/ipk.py b/src/engine/SCons/Tool/packaging/ipk.py
index 65494459..ad27a62d 100644
--- a/src/engine/SCons/Tool/packaging/ipk.py
+++ b/src/engine/SCons/Tool/packaging/ipk.py
@@ -169,7 +169,7 @@ Description: $X_IPK_DESCRIPTION
#
# close all opened files
- for f in opened_files.values():
+ for f in list(opened_files.values()):
f.close()
# call a user specified function
diff --git a/src/engine/SCons/Tool/packaging/msi.py b/src/engine/SCons/Tool/packaging/msi.py
index fe78c9c9..70fdc482 100644
--- a/src/engine/SCons/Tool/packaging/msi.py
+++ b/src/engine/SCons/Tool/packaging/msi.py
@@ -172,7 +172,7 @@ def generate_guids(root):
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
- for (key,value) in needs_id.items():
+ for (key,value) in list(needs_id.items()):
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
@@ -216,7 +216,7 @@ def build_wxsfile(target, source, env):
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
- except KeyError, e:
+ except KeyError as e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
@@ -335,7 +335,7 @@ def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set,
}
# fill in the default tags given above.
- for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
+ for k,v in [ (k, v) for (k,v) in list(h.items()) if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
@@ -382,7 +382,7 @@ def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
- for (feature, files) in create_feature_dict(files).items():
+ for (feature, files) in list(create_feature_dict(files).items()):
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
diff --git a/src/engine/SCons/Tool/packaging/rpm.py b/src/engine/SCons/Tool/packaging/rpm.py
index 07857d18..4958065a 100644
--- a/src/engine/SCons/Tool/packaging/rpm.py
+++ b/src/engine/SCons/Tool/packaging/rpm.py
@@ -107,7 +107,7 @@ def collectintargz(target, source, env):
try:
#tarball = env['SOURCE_URL'].split('/')[-1]
tarball = env['SOURCE_URL'].split('/')[-1]
- except KeyError, e:
+ except KeyError as e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
@@ -143,7 +143,7 @@ def build_specfile(target, source, env):
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
- except KeyError, e:
+ except KeyError as e:
raise SCons.Errors.UserError( '"%s" package field for RPM is missing.' % e.args[0] )
@@ -277,7 +277,7 @@ def build_specfile_filesection(spec, files):
for file in files:
# build the tagset
tags = {}
- for k in supported_tags.keys():
+ for k in list(supported_tags.keys()):
try:
tags[k]=getattr(file, k)
except AttributeError:
@@ -331,7 +331,7 @@ class SimpleTagCompiler(object):
for key, replacement in domestic:
try:
str = str + replacement % values[key]
- except KeyError, e:
+ except KeyError as e:
if self.mandatory:
raise e
@@ -340,11 +340,11 @@ class SimpleTagCompiler(object):
for key, replacement in international:
try:
#int_values_for_key = [ (get_country_code(k),v) for k,v in values.items() if strip_country_code(k) == key ]
- x = [t for t in values.items() if strip_country_code(t[0]) == key]
+ x = [t for t in list(values.items()) if strip_country_code(t[0]) == key]
int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x]
for v in int_values_for_key:
str = str + replacement % v
- except KeyError, e:
+ except KeyError as e:
if self.mandatory:
raise e
diff --git a/src/engine/SCons/Tool/pdflatex.py b/src/engine/SCons/Tool/pdflatex.py
index 922e718a..fbffb239 100644
--- a/src/engine/SCons/Tool/pdflatex.py
+++ b/src/engine/SCons/Tool/pdflatex.py
@@ -62,7 +62,7 @@ def generate(env):
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
- import pdf
+ from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
diff --git a/src/engine/SCons/Tool/pdftex.py b/src/engine/SCons/Tool/pdftex.py
index 30c56afb..e9a0bda0 100644
--- a/src/engine/SCons/Tool/pdftex.py
+++ b/src/engine/SCons/Tool/pdftex.py
@@ -85,7 +85,7 @@ def generate(env):
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
- import pdf
+ from . import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
diff --git a/src/engine/SCons/Tool/qt.py b/src/engine/SCons/Tool/qt.py
index 716c7d52..fdfdd26c 100644
--- a/src/engine/SCons/Tool/qt.py
+++ b/src/engine/SCons/Tool/qt.py
@@ -130,12 +130,12 @@ class _Automoc(object):
if not obj.has_builder():
# binary obj file provided
if debug:
- print "scons: qt: '%s' seems to be a binary. Discarded." % str(obj)
+ print("scons: qt: '%s' seems to be a binary. Discarded." % str(obj))
continue
cpp = obj.sources[0]
if not splitext(str(cpp))[1] in cxx_suffixes:
if debug:
- print "scons: qt: '%s' is no cxx file. Discarded." % str(cpp)
+ print("scons: qt: '%s' is no cxx file. Discarded." % str(cpp))
# c or fortran source
continue
#cpp_contents = comment.sub('', cpp.get_text_contents())
@@ -148,12 +148,12 @@ class _Automoc(object):
h = find_file(hname, (cpp.get_dir(),), env.File)
if h:
if debug:
- print "scons: qt: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
+ print("scons: qt: Scanning '%s' (header of '%s')" % (str(h), str(cpp)))
#h_contents = comment.sub('', h.get_text_contents())
h_contents = h.get_text_contents()
break
if not h and debug:
- print "scons: qt: no header for '%s'." % (str(cpp))
+ print("scons: qt: no header for '%s'." % (str(cpp)))
if h and q_object_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.Moc(h)
@@ -161,14 +161,14 @@ class _Automoc(object):
out_sources.append(moc_o)
#moc_cpp.target_scanner = SCons.Defaults.CScan
if debug:
- print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp))
+ print("scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp)))
if cpp and q_object_search.search(cpp_contents):
# cpp file with Q_OBJECT macro found -> add moc
# (to be included in cpp)
moc = env.Moc(cpp)
env.Ignore(moc, moc)
if debug:
- print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
+ print("scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc)))
#moc.source_scanner = SCons.Defaults.CScan
# restore the original env attributes (FIXME)
objBuilder.env = objBuilderEnv
diff --git a/src/engine/SCons/Tool/rpmutils.py b/src/engine/SCons/Tool/rpmutils.py
index 90e3d744..e96c54c2 100644
--- a/src/engine/SCons/Tool/rpmutils.py
+++ b/src/engine/SCons/Tool/rpmutils.py
@@ -491,7 +491,7 @@ def updateRpmDicts(rpmrc, pyfile):
key = tokens[0]
if key in sections:
# Have we met this section before?
- if not data.has_key(tokens[0]):
+ if tokens[0] not in data:
# No, so insert it
data[key] = {}
# Insert data
@@ -509,7 +509,7 @@ def updateRpmDicts(rpmrc, pyfile):
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
- for key, entries in data.iteritems():
+ for key, entries in data.items():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
@@ -519,7 +519,7 @@ def updateRpmDicts(rpmrc, pyfile):
pass
def usage():
- print "rpmutils.py rpmrc.in rpmutils.py"
+ print("rpmutils.py rpmrc.in rpmutils.py")
def main():
import sys
diff --git a/src/engine/SCons/Tool/sgicc.py b/src/engine/SCons/Tool/sgicc.py
index 662eb7dd..94a04976 100644
--- a/src/engine/SCons/Tool/sgicc.py
+++ b/src/engine/SCons/Tool/sgicc.py
@@ -33,7 +33,7 @@ selection method.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import cc
+from . import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
diff --git a/src/engine/SCons/Tool/sgilink.py b/src/engine/SCons/Tool/sgilink.py
index 6244141e..b1e7921e 100644
--- a/src/engine/SCons/Tool/sgilink.py
+++ b/src/engine/SCons/Tool/sgilink.py
@@ -35,7 +35,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-import link
+from . import link
linkers = ['CC', 'cc']
diff --git a/src/engine/SCons/Tool/suncc.py b/src/engine/SCons/Tool/suncc.py
index 458538b4..4651219b 100644
--- a/src/engine/SCons/Tool/suncc.py
+++ b/src/engine/SCons/Tool/suncc.py
@@ -34,7 +34,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-import cc
+from . import cc
def generate(env):
"""
diff --git a/src/engine/SCons/Tool/sunf77.py b/src/engine/SCons/Tool/sunf77.py
index d05ce541..20d18938 100644
--- a/src/engine/SCons/Tool/sunf77.py
+++ b/src/engine/SCons/Tool/sunf77.py
@@ -35,7 +35,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-from FortranCommon import add_all_to_env
+from .FortranCommon import add_all_to_env
compilers = ['sunf77', 'f77']
diff --git a/src/engine/SCons/Tool/sunf90.py b/src/engine/SCons/Tool/sunf90.py
index 93b89c00..ce1697c3 100644
--- a/src/engine/SCons/Tool/sunf90.py
+++ b/src/engine/SCons/Tool/sunf90.py
@@ -35,7 +35,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-from FortranCommon import add_all_to_env
+from .FortranCommon import add_all_to_env
compilers = ['sunf90', 'f90']
diff --git a/src/engine/SCons/Tool/sunf95.py b/src/engine/SCons/Tool/sunf95.py
index c09026ca..218569c0 100644
--- a/src/engine/SCons/Tool/sunf95.py
+++ b/src/engine/SCons/Tool/sunf95.py
@@ -35,7 +35,7 @@ __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Util
-from FortranCommon import add_all_to_env
+from .FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
diff --git a/src/engine/SCons/Tool/sunlink.py b/src/engine/SCons/Tool/sunlink.py
index 5996a301..af133929 100644
--- a/src/engine/SCons/Tool/sunlink.py
+++ b/src/engine/SCons/Tool/sunlink.py
@@ -37,7 +37,7 @@ import os.path
import SCons.Util
-import link
+from . import link
ccLinker = None
diff --git a/src/engine/SCons/Tool/tex.py b/src/engine/SCons/Tool/tex.py
index 5f24df0a..febec352 100644
--- a/src/engine/SCons/Tool/tex.py
+++ b/src/engine/SCons/Tool/tex.py
@@ -163,15 +163,15 @@ def FindFile(name,suffixes,paths,env,requireExt=False):
if ext:
name = name + ext
if Verbose:
- print " searching for '%s' with extensions: " % name,suffixes
+ print(" searching for '%s' with extensions: " % name,suffixes)
for path in paths:
testName = os.path.join(path,name)
if Verbose:
- print " look for '%s'" % testName
+ print(" look for '%s'" % testName)
if os.path.isfile(testName):
if Verbose:
- print " found '%s'" % testName
+ print(" found '%s'" % testName)
return env.fs.File(testName)
else:
name_ext = SCons.Util.splitext(testName)[1]
@@ -182,14 +182,14 @@ def FindFile(name,suffixes,paths,env,requireExt=False):
for suffix in suffixes:
testNameExt = testName + suffix
if Verbose:
- print " look for '%s'" % testNameExt
+ print(" look for '%s'" % testNameExt)
if os.path.isfile(testNameExt):
if Verbose:
- print " found '%s'" % testNameExt
+ print(" found '%s'" % testNameExt)
return env.fs.File(testNameExt)
if Verbose:
- print " did not find '%s'" % name
+ print(" did not find '%s'" % name)
return None
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
@@ -249,7 +249,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
- print "hashes: ",saved_hashes
+ print("hashes: ",saved_hashes)
must_rerun_latex = True
@@ -268,12 +268,12 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if saved_hashes[suffix] == new_md5:
if Verbose:
- print "file %s not changed" % (targetbase+suffix)
+ print("file %s not changed" % (targetbase+suffix))
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
- print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
+ print("file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5)
return True # changed
# generate the file name that latex will generate
@@ -322,8 +322,8 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
bcffiles = list(dups.keys())
if Verbose:
- print "auxfiles ",auxfiles
- print "bcffiles ",bcffiles
+ print("auxfiles ",auxfiles)
+ print("bcffiles ",bcffiles)
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
@@ -339,7 +339,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
content = open(target_aux, "rb").read()
if content.find("bibdata") != -1:
if Verbose:
- print "Need to run bibtex on ",auxfilename
+ print("Need to run bibtex on ",auxfilename)
bibfile = env.fs.File(SCons.Util.splitext(target_aux)[0])
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
@@ -362,7 +362,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
content = open(target_bcf, "rb").read()
if content.find("bibdata") != -1:
if Verbose:
- print "Need to run biber on ",bcffilename
+ print("Need to run biber on ",bcffilename)
bibfile = env.fs.File(SCons.Util.splitext(target_bcf)[0])
result = BiberAction(bibfile, bibfile, env)
if result != 0:
@@ -373,7 +373,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
- print "Need to run makeindex"
+ print("Need to run makeindex")
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
@@ -391,7 +391,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
- print "Need to run makeindex for nomenclature"
+ print("Need to run makeindex for nomenclature")
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
@@ -403,7 +403,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossaries) or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
- print "Need to run makeindex for glossary"
+ print("Need to run makeindex for glossary")
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
@@ -415,7 +415,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if check_MD5(suffix_nodes['.acn'],'.acn') or (count == 1 and run_acronyms):
# We must run makeindex
if Verbose:
- print "Need to run makeindex for acronyms"
+ print("Need to run makeindex for acronyms")
acrfile = suffix_nodes['.acn']
result = MakeAcronymsAction(acrfile, acrfile, env)
if result != 0:
@@ -428,7 +428,7 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if check_MD5(suffix_nodes[newglossary_suffix[ig][2]],newglossary_suffix[ig][2]) or (count == 1):
# We must run makeindex
if Verbose:
- print "Need to run makeindex for newglossary"
+ print("Need to run makeindex for newglossary")
newglfile = suffix_nodes[newglossary_suffix[ig][2]]
MakeNewGlossaryAction = SCons.Action.Action("$MAKENEWGLOSSARY ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" % (newglossary_suffix[ig][2],newglossary_suffix[ig][0],newglossary_suffix[ig][1]), "$MAKENEWGLOSSARYCOMSTR")
@@ -442,26 +442,26 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
- print "rerun Latex due to latex or package rerun warning"
+ print("rerun Latex due to latex or package rerun warning")
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
- print "rerun Latex due to 'Rerun to get citations correct' warning"
+ print("rerun Latex due to 'Rerun to get citations correct' warning")
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
- print "rerun Latex due to undefined references or citations"
+ print("rerun Latex due to undefined references or citations")
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
- print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
+ print("reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES')))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.isfile(resultfilename)):
if os.path.isfile(resultfilename):
- print "move %s to %s" % (resultfilename, str(target[0]), )
+ print("move %s to %s" % (resultfilename, str(target[0]), ))
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
@@ -515,27 +515,27 @@ def is_LaTeX(flist,env,abspath):
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
- print "is_LaTeX search path ",paths
- print "files to search :",flist
+ print("is_LaTeX search path ",paths)
+ print("files to search :",flist)
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
- print " checking for Latex source ",str(f)
+ print(" checking for Latex source ",str(f))
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
- print "file %s is a LaTeX file" % str(f)
+ print("file %s is a LaTeX file" % str(f))
return 1
if Verbose:
- print "file %s is not a LaTeX file" % str(f)
+ print("file %s is not a LaTeX file" % str(f))
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
- print "files included by '%s': "%str(f),inc_files
+ print("files included by '%s': "%str(f),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
@@ -545,7 +545,7 @@ def is_LaTeX(flist,env,abspath):
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
- print "FindFile found ",srcNode
+ print("FindFile found ",srcNode)
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
@@ -554,7 +554,7 @@ def is_LaTeX(flist,env,abspath):
return file_test
if Verbose:
- print " done scanning ",str(f)
+ print(" done scanning ",str(f))
return 0
@@ -619,15 +619,15 @@ def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphi
content = theFile.get_text_contents()
if Verbose:
- print " scanning ",str(theFile)
+ print(" scanning ",str(theFile))
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
if Verbose:
- print "scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1]
+ print("scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1])
file_tests[i][0] = file_tests_search[i].search(content)
if Verbose and file_tests[i][0]:
- print " found match for ",file_tests[i][1][-1]
+ print(" found match for ",file_tests[i][1][-1])
# for newglossary insert the suffixes in file_tests[i]
if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary':
findresult = file_tests_search[i].findall(content)
@@ -638,19 +638,19 @@ def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphi
suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ]
newglossary_suffix.append(suffix_list)
if Verbose:
- print " new suffixes for newglossary ",newglossary_suffix
+ print(" new suffixes for newglossary ",newglossary_suffix)
incResult = includeOnly_re.search(content)
if incResult:
aux_files.append(os.path.join(targetdir, incResult.group(1)))
if Verbose:
- print "\include file names : ", aux_files
+ print("\include file names : ", aux_files)
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
- print "files included by '%s': "%str(theFile),inc_files
+ print("files included by '%s': "%str(theFile),inc_files)
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
@@ -659,7 +659,7 @@ def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphi
if srcNode is not None:
file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
if Verbose:
- print " done scanning ",str(theFile)
+ print(" done scanning ",str(theFile))
return file_tests
def tex_emitter_core(target, source, env, graphics_extensions):
@@ -689,7 +689,7 @@ def tex_emitter_core(target, source, env, graphics_extensions):
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
if Verbose:
- print "side effect :",auxfilename,logfilename,flsfilename
+ print("side effect :",auxfilename,logfilename,flsfilename)
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
@@ -765,7 +765,7 @@ def tex_emitter_core(target, source, env, graphics_extensions):
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
- print "search path ",paths
+ print("search path ",paths)
# scan all sources for side effect files
aux_files = []
@@ -774,7 +774,7 @@ def tex_emitter_core(target, source, env, graphics_extensions):
for (theSearch,suffix_list) in file_tests:
# add side effects if feature is present.If file is to be generated,add all side effects
if Verbose and theSearch:
- print "check side effects for ",suffix_list[-1]
+ print("check side effects for ",suffix_list[-1])
if (theSearch != None) or (not source[0].exists() ):
file_list = [targetbase,]
# for bibunit we need a list of files
@@ -788,11 +788,11 @@ def tex_emitter_core(target, source, env, graphics_extensions):
if suffix_list[-1] == 'multibib':
for multibibmatch in multibib_re.finditer(content):
if Verbose:
- print "multibib match ",multibibmatch.group(1)
+ print("multibib match ",multibibmatch.group(1))
if multibibmatch != None:
baselist = multibibmatch.group(1).split(',')
if Verbose:
- print "multibib list ", baselist
+ print("multibib list ", baselist)
for i in range(len(baselist)):
file_list.append(os.path.join(targetdir, baselist[i]))
# now define the side effects
@@ -800,14 +800,14 @@ def tex_emitter_core(target, source, env, graphics_extensions):
for suffix in suffix_list[:-1]:
env.SideEffect(file_name + suffix,target[0])
if Verbose:
- print "side effect tst :",file_name + suffix, " target is ",str(target[0])
+ print("side effect tst :",file_name + suffix, " target is ",str(target[0]))
env.Clean(target[0],file_name + suffix)
for aFile in aux_files:
aFile_base = SCons.Util.splitext(aFile)[0]
env.SideEffect(aFile_base + '.aux',target[0])
if Verbose:
- print "side effect aux :",aFile_base + '.aux'
+ print("side effect aux :",aFile_base + '.aux')
env.Clean(target[0],aFile_base + '.aux')
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
@@ -820,7 +820,7 @@ def tex_emitter_core(target, source, env, graphics_extensions):
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
- print "side effect fls :",out_files
+ print("side effect fls :",out_files)
env.Clean(target[0],out_files)
return (target, source)
@@ -840,7 +840,7 @@ def generate(env):
generate_common(env)
- import dvi
+ from . import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
diff --git a/src/engine/SCons/Tool/textfile.py b/src/engine/SCons/Tool/textfile.py
index 8dc8f4b8..4897113f 100644
--- a/src/engine/SCons/Tool/textfile.py
+++ b/src/engine/SCons/Tool/textfile.py
@@ -54,6 +54,7 @@ import re
from SCons.Node import Node
from SCons.Node.Python import Value
from SCons.Util import is_String, is_Sequence, is_Dict
+import collections
def _do_subst(node, subs):
"""
@@ -96,7 +97,7 @@ def _action(target, source, env):
raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
subs = []
for (k,v) in d:
- if callable(v):
+ if isinstance(v, collections.Callable):
v = v()
if is_String(v):
v = env.subst(v)
@@ -107,7 +108,7 @@ def _action(target, source, env):
# write the file
try:
fd = open(target[0].get_path(), "wb")
- except (OSError,IOError), e:
+ except (OSError,IOError) as e:
raise SCons.Errors.UserError("Can't write target file %s" % target[0])
# separate lines by 'linesep' only if linesep is not empty
lsep = None
diff --git a/src/engine/SCons/Tool/xgettext.py b/src/engine/SCons/Tool/xgettext.py
index 64436b8e..489d4d77 100644
--- a/src/engine/SCons/Tool/xgettext.py
+++ b/src/engine/SCons/Tool/xgettext.py
@@ -55,7 +55,7 @@ class _CmdRunner(object):
proc = SCons.Action._subproc(env, command, **kw)
self.out, self.err = proc.communicate()
self.status = proc.wait()
- if self.err: sys.stderr.write(unicode(self.err))
+ if self.err: sys.stderr.write(str(self.err))
return self.status
def strfunction(self, target, source, env):
@@ -153,7 +153,7 @@ from SCons.Builder import BuilderBase
class _POTBuilder(BuilderBase):
def _execute(self, env, target, source, *args):
if not target:
- if env.has_key('POTDOMAIN') and env['POTDOMAIN']:
+ if 'POTDOMAIN' in env and env['POTDOMAIN']:
domain = env['POTDOMAIN']
else:
domain = 'messages'
@@ -175,7 +175,7 @@ def _scan_xgettext_from_files(target, source, env, files = None, path = None):
files = [ files ]
if path is None:
- if env.has_key('XGETTEXTPATH'):
+ if 'XGETTEXTPATH' in env:
path = env['XGETTEXTPATH']
else:
path = []
@@ -222,7 +222,7 @@ def _pot_update_emitter(target, source, env):
import SCons.Util
import SCons.Node.FS
- if env.has_key('XGETTEXTFROM'):
+ if 'XGETTEXTFROM' in env:
xfrom = env['XGETTEXTFROM']
else:
return target, source
diff --git a/src/engine/SCons/Util.py b/src/engine/SCons/Util.py
index 822d5249..f2e5325f 100644
--- a/src/engine/SCons/Util.py
+++ b/src/engine/SCons/Util.py
@@ -33,15 +33,16 @@ import re
import types
from collections import UserDict, UserList, UserString
+import collections
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
InstanceType = types.InstanceType
MethodType = types.MethodType
FunctionType = types.FunctionType
-try: unicode
+try: str
except NameError: UnicodeType = None
-else: UnicodeType = unicode
+else: UnicodeType = str
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
@@ -111,7 +112,7 @@ class NodeList(UserList):
>>> someList.strip()
[ 'foo', 'bar' ]
"""
- def __nonzero__(self):
+ def __bool__(self):
return len(self.data) != 0
def __str__(self):
@@ -153,7 +154,7 @@ class DisplayEngine(object):
return
if append_newline: text = text + '\n'
try:
- sys.stdout.write(unicode(text))
+ sys.stdout.write(str(text))
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
@@ -239,7 +240,7 @@ def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}):
' N = no clean\n' +
' H = no cache\n' +
'\n')
- sys.stdout.write(unicode(legend))
+ sys.stdout.write(str(legend))
tags = ['[']
tags.append(' E'[IDX(root.exists())])
@@ -264,10 +265,10 @@ def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}):
children = child_func(root)
if prune and rname in visited and children:
- sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + u'\n')
+ sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + '\n')
return
- sys.stdout.write(''.join(tags + margins + ['+-', rname]) + u'\n')
+ sys.stdout.write(''.join(tags + margins + ['+-', rname]) + '\n')
visited[rname] = 1
@@ -303,11 +304,11 @@ SequenceTypes = (list, tuple, UserList)
# Note that profiling data shows a speed-up when comparing
# explicitely with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
-StringTypes = (str, unicode, UserString)
+StringTypes = (str, str, UserString)
# Empirically, it is faster to check explicitely for str and
# unicode than for basestring.
-BaseStringTypes = (str, unicode)
+BaseStringTypes = (str, str)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
@@ -440,7 +441,7 @@ _semi_deepcopy_dispatch = d = {}
def semi_deepcopy_dict(x, exclude = [] ):
copy = {}
- for key, val in x.items():
+ for key, val in list(x.items()):
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
@@ -465,7 +466,7 @@ def semi_deepcopy(x):
if copier:
return copier(x)
else:
- if hasattr(x, '__semi_deepcopy__') and callable(x.__semi_deepcopy__):
+ if hasattr(x, '__semi_deepcopy__') and isinstance(x.__semi_deepcopy__, collections.Callable):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(semi_deepcopy_dict(x))
@@ -718,7 +719,7 @@ else:
# raised so as to not mask possibly serious disk or
# network issues.
continue
- if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
+ if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
try:
reject.index(f)
except ValueError:
@@ -979,7 +980,7 @@ class OrderedDict(UserDict):
if key not in self._keys: self._keys.append(key)
def update(self, dict):
- for (key, val) in dict.items():
+ for (key, val) in list(dict.items()):
self.__setitem__(key, val)
def values(self):
@@ -1001,7 +1002,7 @@ class Selector(OrderedDict):
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
- for (k,v) in self.items():
+ for (k,v) in list(self.items()):
if k is not None:
s_k = env.subst(k)
if s_k in s_dict:
@@ -1360,7 +1361,7 @@ def AddMethod(obj, function, name=None):
print a.listIndex(5)
"""
if name is None:
- name = function.func_name
+ name = function.__name__
else:
function = RenameFunction(function, name)
@@ -1376,10 +1377,10 @@ def RenameFunction(function, name):
Returns a function identical to the specified function, but with
the specified name.
"""
- return FunctionType(function.func_code,
- function.func_globals,
+ return FunctionType(function.__code__,
+ function.__globals__,
name,
- function.func_defaults)
+ function.__defaults__)
md5 = False
@@ -1461,7 +1462,7 @@ class Null(object):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
- def __nonzero__(self):
+ def __bool__(self):
return False
def __getattr__(self, name):
return self
diff --git a/src/engine/SCons/UtilTests.py b/src/engine/SCons/UtilTests.py
index a1e67567..a30404c3 100644
--- a/src/engine/SCons/UtilTests.py
+++ b/src/engine/SCons/UtilTests.py
@@ -219,7 +219,7 @@ class UtilTestCase(unittest.TestCase):
assert not is_Dict(())
assert not is_Dict("")
if HasUnicode:
- exec "assert not is_Dict(u'')"
+ exec("assert not is_Dict(u'')")
def test_is_List(self):
assert is_List([])
@@ -235,12 +235,12 @@ class UtilTestCase(unittest.TestCase):
assert not is_List({})
assert not is_List("")
if HasUnicode:
- exec "assert not is_List(u'')"
+ exec("assert not is_List(u'')")
def test_is_String(self):
assert is_String("")
if HasUnicode:
- exec "assert is_String(u'')"
+ exec("assert is_String(u'')")
assert is_String(UserString(''))
try:
class mystr(str):
@@ -266,7 +266,7 @@ class UtilTestCase(unittest.TestCase):
assert not is_Tuple({})
assert not is_Tuple("")
if HasUnicode:
- exec "assert not is_Tuple(u'')"
+ exec("assert not is_Tuple(u'')")
def test_to_String(self):
"""Test the to_String() method."""
@@ -285,16 +285,16 @@ class UtilTestCase(unittest.TestCase):
assert to_String(s2) == 'foo', s2
if HasUnicode:
- s3=UserString(unicode('bar'))
+ s3=UserString(str('bar'))
assert to_String(s3) == s3, s3
- assert to_String(s3) == unicode('bar'), s3
- assert isinstance(to_String(s3), unicode), \
+ assert to_String(s3) == str('bar'), s3
+ assert isinstance(to_String(s3), str), \
type(to_String(s3))
if HasUnicode:
- s4 = unicode('baz')
- assert to_String(s4) == unicode('baz'), to_String(s4)
- assert isinstance(to_String(s4), unicode), \
+ s4 = str('baz')
+ assert to_String(s4) == str('baz'), to_String(s4)
+ assert isinstance(to_String(s4), str), \
type(to_String(s4))
def test_WhereIs(self):
@@ -313,10 +313,10 @@ class UtilTestCase(unittest.TestCase):
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
- os.chmod(sub3_xxx_exe, 0777)
+ os.chmod(sub3_xxx_exe, 0o777)
test.write(sub4_xxx_exe, "\n")
- os.chmod(sub4_xxx_exe, 0777)
+ os.chmod(sub4_xxx_exe, 0o777)
env_path = os.environ['PATH']
@@ -681,7 +681,7 @@ bling
fobj = io.StringIO(content)
except TypeError:
# Python 2.7 and beyond require unicode strings.
- fobj = io.StringIO(unicode(content))
+ fobj = io.StringIO(str(content))
lines = LogicalLines(fobj).readlines()
assert lines == [
@@ -696,7 +696,7 @@ bling
s1 = silent_intern("spam")
# Python 3.x does not have a unicode() global function
if sys.version[0] == '2':
- s2 = silent_intern(unicode("unicode spam"))
+ s2 = silent_intern(str("unicode spam"))
s3 = silent_intern(42)
s4 = silent_intern("spam")
assert id(s1) == id(s4)
diff --git a/src/engine/SCons/Variables/EnumVariableTests.py b/src/engine/SCons/Variables/EnumVariableTests.py
index f4b600d7..4feb7126 100644
--- a/src/engine/SCons/Variables/EnumVariableTests.py
+++ b/src/engine/SCons/Variables/EnumVariableTests.py
@@ -122,7 +122,7 @@ class EnumVariableTestCase(unittest.TestCase):
'C' : ['C', 'three', 'three'],
}
- for k, l in table.items():
+ for k, l in list(table.items()):
x = o0.converter(k)
assert x == l[0], "o0 got %s, expected %s" % (x, l[0])
x = o1.converter(k)
@@ -186,7 +186,7 @@ class EnumVariableTestCase(unittest.TestCase):
'no_v' : [invalid, invalid, invalid],
}
- for v, l in table.items():
+ for v, l in list(table.items()):
l[0](o0, v)
l[1](o1, v)
l[2](o2, v)
diff --git a/src/engine/SCons/Variables/PathVariableTests.py b/src/engine/SCons/Variables/PathVariableTests.py
index 084154bd..2fa46eb8 100644
--- a/src/engine/SCons/Variables/PathVariableTests.py
+++ b/src/engine/SCons/Variables/PathVariableTests.py
@@ -65,7 +65,7 @@ class PathVariableTestCase(unittest.TestCase):
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == 'Path for option X does not exist: %s' % dne, e
except:
raise Exception("did not catch expected UserError")
@@ -89,7 +89,7 @@ class PathVariableTestCase(unittest.TestCase):
f = test.workpath('file')
try:
o.validator('X', f, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == 'Directory path for option X is a file: %s' % f, e
except:
raise Exception("did not catch expected UserError")
@@ -97,7 +97,7 @@ class PathVariableTestCase(unittest.TestCase):
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == 'Directory path for option X does not exist: %s' % dne, e
except:
raise Exception("did not catch expected UserError")
@@ -122,7 +122,7 @@ class PathVariableTestCase(unittest.TestCase):
f = test.workpath('file')
try:
o.validator('X', f, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == 'Path for option X is a file, not a directory: %s' % f, e
except:
raise Exception("did not catch expected UserError")
@@ -146,7 +146,7 @@ class PathVariableTestCase(unittest.TestCase):
d = test.workpath('d')
try:
o.validator('X', d, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == 'File path for option X does not exist: %s' % d, e
except:
raise Exception("did not catch expected UserError")
@@ -154,7 +154,7 @@ class PathVariableTestCase(unittest.TestCase):
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
assert str(e) == 'File path for option X does not exist: %s' % dne, e
except:
raise Exception("did not catch expected UserError")
@@ -198,7 +198,7 @@ class PathVariableTestCase(unittest.TestCase):
dne = test.workpath('does_not_exist')
try:
o.validator('X', dne, {})
- except SCons.Errors.UserError, e:
+ except SCons.Errors.UserError as e:
expect = 'Path for option X does not exist: %s' % dne
assert str(e) == expect, e
else:
@@ -217,7 +217,7 @@ class PathVariableTestCase(unittest.TestCase):
try:
o.validator('Y', 'value', {})
- except Exception, e:
+ except Exception as e:
assert str(e) == 'my_validator() got called for Y, value!', e
else:
raise Exception("did not catch expected exception from my_validator()")
diff --git a/src/engine/SCons/Variables/VariablesTests.py b/src/engine/SCons/Variables/VariablesTests.py
index ad46bd6d..520c8e37 100644
--- a/src/engine/SCons/Variables/VariablesTests.py
+++ b/src/engine/SCons/Variables/VariablesTests.py
@@ -55,7 +55,7 @@ def check(key, value, env):
def checkSave(file, expected):
gdict = {}
ldict = {}
- exec open(file, 'rU').read() in gdict, ldict
+ exec(open(file, 'rU').read(), gdict, ldict)
assert expected == ldict, "%s\n...not equal to...\n%s" % (expected, ldict)
class VariablesTestCase(unittest.TestCase):
diff --git a/src/engine/SCons/Variables/__init__.py b/src/engine/SCons/Variables/__init__.py
index ede74803..8d15b8d3 100644
--- a/src/engine/SCons/Variables/__init__.py
+++ b/src/engine/SCons/Variables/__init__.py
@@ -36,11 +36,11 @@ import SCons.Errors
import SCons.Util
import SCons.Warnings
-from BoolVariable import BoolVariable # okay
-from EnumVariable import EnumVariable # okay
-from ListVariable import ListVariable # naja
-from PackageVariable import PackageVariable # naja
-from PathVariable import PathVariable # okay
+from .BoolVariable import BoolVariable # okay
+from .EnumVariable import EnumVariable # okay
+from .ListVariable import ListVariable # naja
+from .PackageVariable import PackageVariable # naja
+from .PathVariable import PathVariable # okay
class Variables(object):
@@ -170,7 +170,7 @@ class Variables(object):
sys.path.insert(0, dir)
try:
values['__name__'] = filename
- exec open(filename, 'rU').read() in {}, values
+ exec(open(filename, 'rU').read(), {}, values)
finally:
if dir:
del sys.path[0]
@@ -180,7 +180,7 @@ class Variables(object):
if args is None:
args = self.args
- for arg, value in args.items():
+ for arg, value in list(args.items()):
added = False
for option in self.options:
if arg in list(option.aliases) + [ option.key ]:
@@ -206,7 +206,7 @@ class Variables(object):
env[option.key] = option.converter(value)
except TypeError:
env[option.key] = option.converter(value, env)
- except ValueError, x:
+ except ValueError as x:
raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x))
@@ -268,7 +268,7 @@ class Variables(object):
finally:
fh.close()
- except IOError, x:
+ except IOError as x:
raise SCons.Errors.UserError('Error writing options to file: %s\n%s' % (filename, x))
def GenerateHelpText(self, env, sort=None):
diff --git a/src/engine/SCons/compat/__init__.py b/src/engine/SCons/compat/__init__.py
index c870fbc4..6f1a7eec 100644
--- a/src/engine/SCons/compat/__init__.py
+++ b/src/engine/SCons/compat/__init__.py
@@ -87,7 +87,7 @@ def rename_module(new, old):
rename_module('builtins', '__builtin__')
-import _scons_builtins
+from . import _scons_builtins
try:
diff --git a/src/engine/SCons/compat/_scons_subprocess.py b/src/engine/SCons/compat/_scons_subprocess.py
index eebe53d3..72581f7d 100644
--- a/src/engine/SCons/compat/_scons_subprocess.py
+++ b/src/engine/SCons/compat/_scons_subprocess.py
@@ -439,22 +439,22 @@ except TypeError:
def is_int(obj):
return isinstance(obj, type(1))
def is_int_or_long(obj):
- return type(obj) in (type(1), type(1L))
+ return type(obj) in (type(1), type(1))
else:
def is_int(obj):
return isinstance(obj, int)
def is_int_or_long(obj):
- return isinstance(obj, (int, long))
+ return isinstance(obj, int)
try:
- types.StringTypes
+ str
except AttributeError:
try:
- types.StringTypes = (str, unicode)
+ str = (str, str)
except NameError:
- types.StringTypes = (str,)
+ str = (str,)
def is_string(obj):
- return isinstance(obj, types.StringTypes)
+ return isinstance(obj, str)
_active = []
@@ -785,7 +785,7 @@ class Popen(object):
errread, errwrite):
"""Execute program (MS Windows version)"""
- if not isinstance(args, types.StringTypes):
+ if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
@@ -802,7 +802,7 @@ class Popen(object):
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
- if (GetVersion() >= 0x80000000L or
+ if (GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
@@ -830,7 +830,7 @@ class Popen(object):
env,
cwd,
startupinfo)
- except pywintypes.error, e:
+ except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
@@ -1215,8 +1215,8 @@ def _demo_posix():
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
- print "Process list:"
- print plist
+ print("Process list:")
+ print(plist)
#
# Example 2: Change uid before executing child
@@ -1228,25 +1228,25 @@ def _demo_posix():
#
# Example 3: Connecting several subprocesses
#
- print "Looking for 'hda'..."
+ print("Looking for 'hda'...")
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
- print repr(p2.communicate()[0])
+ print(repr(p2.communicate()[0]))
#
# Example 4: Catch execution error
#
- print
- print "Trying a weird file..."
+ print()
+ print("Trying a weird file...")
try:
- print Popen(["/this/path/does/not/exist"]).communicate()
- except OSError, e:
+ print(Popen(["/this/path/does/not/exist"]).communicate())
+ except OSError as e:
if e.errno == errno.ENOENT:
- print "The file didn't exist. I thought so..."
- print "Child traceback:"
- print e.child_traceback
+ print("The file didn't exist. I thought so...")
+ print("Child traceback:")
+ print(e.child_traceback)
else:
- print "Error", e.errno
+ print("Error", e.errno)
else:
sys.stderr.write( "Gosh. No error.\n" )
@@ -1255,15 +1255,15 @@ def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
- print "Looking for 'PROMPT' in set output..."
+ print("Looking for 'PROMPT' in set output...")
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
- print repr(p2.communicate()[0])
+ print(repr(p2.communicate()[0]))
#
# Example 2: Simple execution of program
#
- print "Executing calc..."
+ print("Executing calc...")
p = Popen("calc")
p.wait()
diff --git a/src/engine/SCons/cpp.py b/src/engine/SCons/cpp.py
index 0ba10f59..4cc771b8 100644
--- a/src/engine/SCons/cpp.py
+++ b/src/engine/SCons/cpp.py
@@ -31,6 +31,7 @@ import SCons.compat
import os
import re
+import collections
#
# First "subsystem" of regular expressions that we set up:
@@ -72,7 +73,7 @@ cpp_lines_dict = {
# the corresponding compiled regular expression that fetches the arguments
# we care about.
Table = {}
-for op_list, expr in cpp_lines_dict.items():
+for op_list, expr in list(cpp_lines_dict.items()):
e = re.compile(expr)
for op in op_list:
Table[op] = e
@@ -87,7 +88,7 @@ del op_list
override = {
'if' : 'if(?!def)',
}
-l = [override.get(x, x) for x in Table.keys()]
+l = [override.get(x, x) for x in list(Table.keys())]
# Turn the list of expressions into one big honkin' regular expression
@@ -130,7 +131,7 @@ CPP_to_Python_Ops_Sub = lambda m: CPP_to_Python_Ops_Dict[m.group(0)]
# re module, as late as version 2.2.2, empirically matches the
# "!" in "!=" first, instead of finding the longest match.
# What's up with that?
-l = sorted(CPP_to_Python_Ops_Dict.keys(), key=lambda a: len(a), reverse=True)
+l = sorted(list(CPP_to_Python_Ops_Dict.keys()), key=lambda a: len(a), reverse=True)
# Turn the list of keys into one regular expression that will allow us
# to substitute all of the operators at once.
@@ -266,7 +267,7 @@ class PreProcessor(object):
d = {
'scons_current_file' : self.scons_current_file
}
- for op in Table.keys():
+ for op in list(Table.keys()):
d[op] = getattr(self, 'do_' + op)
self.default_table = d
@@ -552,7 +553,7 @@ class PreProcessor(object):
except KeyError:
m = function_name.search(s)
s = self.cpp_namespace[m.group(1)]
- if callable(s):
+ if isinstance(s, collections.Callable):
args = function_arg_separator.split(m.group(2))
s = s(*args)
if not s:
diff --git a/src/engine/SCons/cppTests.py b/src/engine/SCons/cppTests.py
index 2f2025be..5566e530 100644
--- a/src/engine/SCons/cppTests.py
+++ b/src/engine/SCons/cppTests.py
@@ -27,7 +27,7 @@ import atexit
import sys
import unittest
-import cpp
+from . import cpp
diff --git a/src/engine/SCons/dblite.py b/src/engine/SCons/dblite.py
index f4ba90a1..89b98567 100644
--- a/src/engine/SCons/dblite.py
+++ b/src/engine/SCons/dblite.py
@@ -14,20 +14,20 @@ keep_all_files = 00000
ignore_corrupt_dbfiles = 0
def corruption_warning(filename):
- print "Warning: Discarding corrupt database:", filename
+ print("Warning: Discarding corrupt database:", filename)
-try: unicode
+try: str
except NameError:
def is_string(s):
return isinstance(s, str)
else:
def is_string(s):
- return type(s) in (str, unicode)
+ return type(s) in (str, str)
try:
- unicode('a')
+ str('a')
except NameError:
- def unicode(s): return s
+ def str(s): return s
dblite_suffix = '.dblite'
tmp_suffix = '.tmp'
@@ -77,7 +77,7 @@ class dblite(object):
statinfo = os.stat(self._file_name)
self._chown_to = statinfo.st_uid
self._chgrp_to = statinfo.st_gid
- except OSError, e:
+ except OSError as e:
# db file doesn't exist yet.
# Check os.environ for SUDO_UID, use if set
self._chown_to = int(os.environ.get('SUDO_UID', -1))
@@ -90,7 +90,7 @@ class dblite(object):
else:
try:
f = self._open(self._file_name, "rb")
- except IOError, e:
+ except IOError as e:
if (self._flag != "c"):
raise e
self._open(self._file_name, "wb", self._mode)
@@ -122,7 +122,7 @@ class dblite(object):
# (e.g. from a previous run as root). We should still be able to
# unlink() the file if the directory's writable, though, so ignore
# any OSError exception thrown by the chmod() call.
- try: self._os_chmod(self._file_name, 0777)
+ try: self._os_chmod(self._file_name, 0o777)
except OSError: pass
self._os_unlink(self._file_name)
self._os_rename(self._tmp_name, self._file_name)
@@ -151,7 +151,7 @@ class dblite(object):
if (not is_string(value)):
raise TypeError("value `%s' must be a string but is %s" % (value, type(value)))
self._dict[key] = value
- self._needs_sync = 0001
+ self._needs_sync = 0o001
def keys(self):
return list(self._dict.keys())
@@ -171,7 +171,7 @@ class dblite(object):
def __len__(self):
return len(self._dict)
-def open(file, flag=None, mode=0666):
+def open(file, flag=None, mode=0o666):
return dblite(file, flag, mode)
def _exercise():
@@ -179,26 +179,26 @@ def _exercise():
assert len(db) == 0
db["foo"] = "bar"
assert db["foo"] == "bar"
- db[unicode("ufoo")] = unicode("ubar")
- assert db[unicode("ufoo")] == unicode("ubar")
+ db[str("ufoo")] = str("ubar")
+ assert db[str("ufoo")] == str("ubar")
db.sync()
db = open("tmp", "c")
assert len(db) == 2, len(db)
assert db["foo"] == "bar"
db["bar"] = "foo"
assert db["bar"] == "foo"
- db[unicode("ubar")] = unicode("ufoo")
- assert db[unicode("ubar")] == unicode("ufoo")
+ db[str("ubar")] = str("ufoo")
+ assert db[str("ubar")] == str("ufoo")
db.sync()
db = open("tmp", "r")
assert len(db) == 4, len(db)
assert db["foo"] == "bar"
assert db["bar"] == "foo"
- assert db[unicode("ufoo")] == unicode("ubar")
- assert db[unicode("ubar")] == unicode("ufoo")
+ assert db[str("ufoo")] == str("ubar")
+ assert db[str("ubar")] == str("ufoo")
try:
db.sync()
- except IOError, e:
+ except IOError as e:
assert str(e) == "Read-only database: tmp.dblite"
else:
raise RuntimeError("IOError expected.")
@@ -208,13 +208,13 @@ def _exercise():
db.sync()
try:
db[(1,2)] = "tuple"
- except TypeError, e:
+ except TypeError as e:
assert str(e) == "key `(1, 2)' must be a string but is <type 'tuple'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
try:
db["list"] = [1,2]
- except TypeError, e:
+ except TypeError as e:
assert str(e) == "value `[1, 2]' must be a string but is <type 'list'>", str(e)
else:
raise RuntimeError("TypeError exception expected")
@@ -238,11 +238,11 @@ def _exercise():
os.unlink("tmp.dblite")
try:
db = open("tmp", "w")
- except IOError, e:
+ except IOError as e:
assert str(e) == "[Errno 2] No such file or directory: 'tmp.dblite'", str(e)
else:
raise RuntimeError("IOError expected.")
- print "OK"
+ print("OK")
if (__name__ == "__main__"):
_exercise()
diff --git a/src/script/scons-time.py b/src/script/scons-time.py
index 3b215f98..4296192d 100644
--- a/src/script/scons-time.py
+++ b/src/script/scons-time.py
@@ -29,8 +29,8 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import division
-from __future__ import nested_scopes
+
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -42,6 +42,7 @@ import shutil
import sys
import tempfile
import time
+import collections
try:
sorted
@@ -109,8 +110,8 @@ def HACK_for_exec(cmd, *args):
internal functions.
'''
if not args: exec(cmd)
- elif len(args) == 1: exec cmd in args[0]
- else: exec cmd in args[0], args[1]
+ elif len(args) == 1: exec(cmd, args[0])
+ else: exec(cmd, args[0], args[1])
class Plotter(object):
def increment_size(self, largest):
@@ -146,7 +147,7 @@ class Line(object):
def print_label(self, inx, x, y):
if self.label:
- print 'set label %s "%s" at %s,%s right' % (inx, self.label, x, y)
+ print('set label %s "%s" at %s,%s right' % (inx, self.label, x, y))
def plot_string(self):
if self.title:
@@ -159,15 +160,15 @@ class Line(object):
if fmt is None:
fmt = self.fmt
if self.comment:
- print '# %s' % self.comment
+ print('# %s' % self.comment)
for x, y in self.points:
# If y is None, it usually represents some kind of break
# in the line's index number. We might want to represent
# this some way rather than just drawing the line straight
# between the two points on either side.
if not y is None:
- print fmt % (x, y)
- print 'e'
+ print(fmt % (x, y))
+ print('e')
def get_x_values(self):
return [ p[0] for p in self.points ]
@@ -253,8 +254,8 @@ class Gnuplotter(Plotter):
return
if self.title:
- print 'set title "%s"' % self.title
- print 'set key %s' % self.key_location
+ print('set title "%s"' % self.title)
+ print('set key %s' % self.key_location)
min_y = self.get_min_y()
max_y = self.max_graph_value(self.get_max_y())
@@ -269,7 +270,7 @@ class Gnuplotter(Plotter):
inx += 1
plot_strings = [ self.plot_string(l) for l in self.lines ]
- print 'plot ' + ', \\\n '.join(plot_strings)
+ print('plot ' + ', \\\n '.join(plot_strings))
for line in self.lines:
line.print_points()
@@ -455,7 +456,7 @@ class SConsTimer(object):
Each message is prepended with a standard prefix of our name
plus the time.
"""
- if callable(msg):
+ if isinstance(msg, collections.Callable):
msg = msg(*args)
else:
msg = msg % args
@@ -474,7 +475,7 @@ class SConsTimer(object):
The action is called if it's a callable Python function, and
otherwise passed to os.system().
"""
- if callable(action):
+ if isinstance(action, collections.Callable):
action(*args)
else:
os.system(action % args)
@@ -540,7 +541,7 @@ class SConsTimer(object):
header_fmt = ' '.join(['%12s'] * len(columns))
line_fmt = header_fmt + ' %s'
- print header_fmt % columns
+ print(header_fmt % columns)
for file in files:
t = line_function(file, *args, **kw)
@@ -550,7 +551,7 @@ class SConsTimer(object):
if diff > 0:
t += [''] * diff
t.append(file_function(file))
- print line_fmt % tuple(t)
+ print(line_fmt % tuple(t))
def collect_results(self, files, function, *args, **kw):
results = {}
@@ -690,13 +691,13 @@ class SConsTimer(object):
"""
try:
import pstats
- except ImportError, e:
+ except ImportError as e:
sys.stderr.write('%s: func: %s\n' % (self.name, e))
sys.stderr.write('%s This version of Python is missing the profiler.\n' % self.name_spaces)
sys.stderr.write('%s Cannot use the "func" subcommand.\n' % self.name_spaces)
sys.exit(1)
statistics = pstats.Stats(file).stats
- matches = [ e for e in statistics.items() if e[0][2] == function ]
+ matches = [ e for e in list(statistics.items()) if e[0][2] == function ]
r = matches[0]
return r[0][0], r[0][1], r[0][2], r[1][3]
@@ -751,7 +752,7 @@ class SConsTimer(object):
return self.default(argv)
try:
return func(argv)
- except TypeError, e:
+ except TypeError as e:
sys.stderr.write("%s %s: %s\n" % (self.name, cmdName, e))
import traceback
traceback.print_exc(file=sys.stderr)
@@ -856,7 +857,7 @@ class SConsTimer(object):
self.title = a
if self.config_file:
- exec open(self.config_file, 'rU').read() in self.__dict__
+ exec(open(self.config_file, 'rU').read(), self.__dict__)
if self.chdir:
os.chdir(self.chdir)
@@ -889,13 +890,13 @@ class SConsTimer(object):
try:
f, line, func, time = \
self.get_function_profile(file, function_name)
- except ValueError, e:
+ except ValueError as e:
sys.stderr.write("%s: func: %s: %s\n" %
(self.name, file, e))
else:
if f.startswith(cwd_):
f = f[len(cwd_):]
- print "%.3f %s:%d(%s)" % (time, f, line, func)
+ print("%.3f %s:%d(%s)" % (time, f, line, func))
elif format == 'gnuplot':
@@ -1233,7 +1234,7 @@ class SConsTimer(object):
sys.exit(1)
if self.config_file:
- exec open(self.config_file, 'rU').read() in self.__dict__
+ exec(open(self.config_file, 'rU').read(), self.__dict__)
if args:
self.archive_list = args
@@ -1466,7 +1467,7 @@ class SConsTimer(object):
elif o in ('--title',):
self.title = a
elif o in ('--which',):
- if not a in self.time_strings.keys():
+ if not a in list(self.time_strings.keys()):
sys.stderr.write('%s: time: Unrecognized timer "%s".\n' % (self.name, a))
sys.stderr.write('%s Type "%s help time" for help.\n' % (self.name_spaces, self.name))
sys.exit(1)
diff --git a/src/script/sconsign.py b/src/script/sconsign.py
index e5e9d4f9..323d1bff 100644
--- a/src/script/sconsign.py
+++ b/src/script/sconsign.py
@@ -171,7 +171,7 @@ sys.path = libs + sys.path
import SCons.compat # so pickle will import cPickle instead
-import whichdb
+import dbm
import time
import pickle
import imp
@@ -189,8 +189,8 @@ def my_whichdb(filename):
pass
return _orig_whichdb(filename)
-_orig_whichdb = whichdb.whichdb
-whichdb.whichdb = my_whichdb
+_orig_whichdb = dbm.whichdb
+dbm.whichdb = my_whichdb
def my_import(mname):
if '.' in mname:
@@ -310,14 +310,14 @@ def printfield(name, entry, prefix=""):
outlist = field("implicit", entry, 0)
if outlist:
if Verbose:
- print " implicit:"
- print " " + outlist
+ print(" implicit:")
+ print(" " + outlist)
outact = field("action", entry, 0)
if outact:
if Verbose:
- print " action: " + outact
+ print(" action: " + outact)
else:
- print " " + outact
+ print(" " + outact)
def printentries(entries, location):
if Print_Entries:
@@ -330,9 +330,9 @@ def printentries(entries, location):
try:
ninfo = entry.ninfo
except AttributeError:
- print name + ":"
+ print(name + ":")
else:
- print nodeinfo_string(name, entry.ninfo)
+ print(nodeinfo_string(name, entry.ninfo))
printfield(name, entry.binfo)
else:
for name in sorted(entries.keys()):
@@ -340,9 +340,9 @@ def printentries(entries, location):
try:
ninfo = entry.ninfo
except AttributeError:
- print name + ":"
+ print(name + ":")
else:
- print nodeinfo_string(name, entry.ninfo)
+ print(nodeinfo_string(name, entry.ninfo))
printfield(name, entry.binfo)
class Do_SConsignDB(object):
@@ -361,7 +361,7 @@ class Do_SConsignDB(object):
# .sconsign => .sconsign.dblite
# .sconsign.dblite => .sconsign.dblite.dblite
db = self.dbm.open(fname, "r")
- except (IOError, OSError), e:
+ except (IOError, OSError) as e:
print_e = e
try:
# That didn't work, so try opening the base name,
@@ -375,7 +375,7 @@ class Do_SConsignDB(object):
# suffix-mangling).
try:
open(fname, "r")
- except (IOError, OSError), e:
+ except (IOError, OSError) as e:
# Nope, that file doesn't even exist, so report that
# fact back.
print_e = e
@@ -386,7 +386,7 @@ class Do_SConsignDB(object):
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s'\n" % (self.dbm_name, fname))
return
- except Exception, e:
+ except Exception as e:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s': %s\n" % (self.dbm_name, fname, e))
return
@@ -403,13 +403,13 @@ class Do_SConsignDB(object):
self.printentries(dir, db[dir])
def printentries(self, dir, val):
- print '=== ' + dir + ':'
+ print('=== ' + dir + ':')
printentries(pickle.loads(val), dir)
def Do_SConsignDir(name):
try:
fp = open(name, 'rb')
- except (IOError, OSError), e:
+ except (IOError, OSError) as e:
sys.stderr.write("sconsign: %s\n" % (e))
return
try:
@@ -419,7 +419,7 @@ def Do_SConsignDir(name):
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s'\n" % (name))
return
- except Exception, e:
+ except Exception as e:
sys.stderr.write("sconsign: ignoring invalid .sconsign file `%s': %s\n" % (name, e))
return
printentries(sconsign.entries, args[0])
@@ -471,13 +471,13 @@ for o, a in opts:
dbm = my_import(dbm_name)
except:
sys.stderr.write("sconsign: illegal file format `%s'\n" % a)
- print helpstr
+ print(helpstr)
sys.exit(2)
Do_Call = Do_SConsignDB(a, dbm)
else:
Do_Call = Do_SConsignDir
elif o in ('-h', '--help'):
- print helpstr
+ print(helpstr)
sys.exit(0)
elif o in ('-i', '--implicit'):
Print_Flags['implicit'] = 1
@@ -497,7 +497,7 @@ if Do_Call:
Do_Call(a)
else:
for a in args:
- dbm_name = whichdb.whichdb(a)
+ dbm_name = dbm.whichdb(a)
if dbm_name:
Map_Module = {'SCons.dblite' : 'dblite'}
dbm = my_import(dbm_name)
diff --git a/src/test_files.py b/src/test_files.py
index 7d8e75e2..d71329b2 100644
--- a/src/test_files.py
+++ b/src/test_files.py
@@ -77,7 +77,7 @@ check = {
missing = []
no_result = []
-for directory, check_list in check.items():
+for directory, check_list in list(check.items()):
if os.path.exists(directory):
for c in check_list:
f = os.path.join(directory, c)
@@ -87,13 +87,13 @@ for directory, check_list in check.items():
no_result.append(directory)
if missing:
- print "Missing the following files:\n"
- print "\t" + "\n\t".join(missing)
+ print("Missing the following files:\n")
+ print("\t" + "\n\t".join(missing))
test.fail_test(1)
if no_result:
- print "Cannot check files, the following have apparently not been built:"
- print "\t" + "\n\t".join(no_result)
+ print("Cannot check files, the following have apparently not been built:")
+ print("\t" + "\n\t".join(no_result))
test.no_result(1)
test.pass_test()
diff --git a/src/test_interrupts.py b/src/test_interrupts.py
index fb12e2ad..1e027a1c 100644
--- a/src/test_interrupts.py
+++ b/src/test_interrupts.py
@@ -102,7 +102,7 @@ for f in files:
indent_list.append( (line_num, match.group('try_or_except') ) )
try_except_lines[match.group('indent')] = indent_list
uncaught_this_file = []
- for indent in try_except_lines.keys():
+ for indent in list(try_except_lines.keys()):
exc_keyboardint_seen = 0
exc_all_seen = 0
for (l,statement) in try_except_lines[indent] + [(-1,indent + 'try')]:
@@ -129,9 +129,9 @@ for f in files:
if expected_num != len(uncaught_this_file):
uncaughtKeyboardInterrupt = 1
msg = "%s: expected %d uncaught interrupts, got %d:"
- print msg % (f, expected_num, len(uncaught_this_file))
+ print(msg % (f, expected_num, len(uncaught_this_file)))
for line in uncaught_this_file:
- print " File %s:%d: Uncaught KeyboardInterrupt!" % (f,line)
+ print(" File %s:%d: Uncaught KeyboardInterrupt!" % (f,line))
test.fail_test(uncaughtKeyboardInterrupt)
diff --git a/src/test_pychecker.py b/src/test_pychecker.py
index f87d303e..24aa966a 100644
--- a/src/test_pychecker.py
+++ b/src/test_pychecker.py
@@ -139,7 +139,7 @@ for file in files:
mismatches.append(stderr)
if mismatches:
- print ''.join(mismatches[1:])
+ print(''.join(mismatches[1:]))
test.fail_test()
test.pass_test()
diff --git a/src/test_setup.py b/src/test_setup.py
index 29d36bf3..731fbe76 100644
--- a/src/test_setup.py
+++ b/src/test_setup.py
@@ -197,9 +197,9 @@ if not os.path.isdir(scons_version) and os.path.isfile(tar_gz):
os.system("gunzip -c %s | tar xf -" % tar_gz)
if not os.path.isdir(scons_version):
- print "Cannot test package installation, found none of the following packages:"
- print "\t" + tar_gz
- print "\t" + zip
+ print("Cannot test package installation, found none of the following packages:")
+ print("\t" + tar_gz)
+ print("\t" + zip)
test.no_result(1)
# Verify that a virgin installation installs the version library,
diff --git a/src/test_strings.py b/src/test_strings.py
index 3288d5f6..b57c7140 100644
--- a/src/test_strings.py
+++ b/src/test_strings.py
@@ -248,13 +248,13 @@ for collector in check_list:
not_built.append(collector.directory)
if missing_strings:
- print "Found the following files with missing strings:"
- print "\t" + "\n\t".join(missing_strings)
+ print("Found the following files with missing strings:")
+ print("\t" + "\n\t".join(missing_strings))
test.fail_test(1)
if not_built:
- print "Cannot check all strings, the following have apparently not been built:"
- print "\t" + "\n\t".join(not_built)
+ print("Cannot check all strings, the following have apparently not been built:")
+ print("\t" + "\n\t".join(not_built))
test.no_result(1)
test.pass_test()
diff --git a/test/AS/nasm.py b/test/AS/nasm.py
index be7db3ef..551a5ab5 100644
--- a/test/AS/nasm.py
+++ b/test/AS/nasm.py
@@ -69,7 +69,7 @@ else:
# anyway...).
nasm_format = 'elf'
format_map = {}
-for k, v in format_map.items():
+for k, v in list(format_map.items()):
if sys.platform.find(k) != -1:
nasm_format = v
break
diff --git a/test/Actions/unicode-signature.py b/test/Actions/unicode-signature.py
index 0d0c4694..0ba50c30 100644
--- a/test/Actions/unicode-signature.py
+++ b/test/Actions/unicode-signature.py
@@ -36,7 +36,7 @@ import TestSCons
test = TestSCons.TestSCons()
try:
- unicode
+ str
except NameError:
import sys
msg = "Unicode not supported by Python version %s; skipping test\n"
diff --git a/test/AddOption/help.py b/test/AddOption/help.py
index d50e5954..a3fd7be8 100644
--- a/test/AddOption/help.py
+++ b/test/AddOption/help.py
@@ -57,10 +57,10 @@ lines = test.stdout().split('\n')
missing = [e for e in expected_lines if e not in lines]
if missing:
- print "====== STDOUT:"
- print test.stdout()
- print "====== Missing the following lines in the above AddOption() help output:"
- print "\n".join(missing)
+ print("====== STDOUT:")
+ print(test.stdout())
+ print("====== Missing the following lines in the above AddOption() help output:")
+ print("\n".join(missing))
test.fail_test()
test.unlink('SConstruct')
@@ -70,10 +70,10 @@ lines = test.stdout().split('\n')
unexpected = [e for e in expected_lines if e in lines]
if unexpected:
- print "====== STDOUT:"
- print test.stdout()
- print "====== Unexpected lines in the above non-AddOption() help output:"
- print "\n".join(unexpected)
+ print("====== STDOUT:")
+ print(test.stdout())
+ print("====== Unexpected lines in the above non-AddOption() help output:")
+ print("\n".join(unexpected))
test.fail_test()
test.pass_test()
diff --git a/test/Batch/action-changed.py b/test/Batch/action-changed.py
index dc2805a4..d0314324 100644
--- a/test/Batch/action-changed.py
+++ b/test/Batch/action-changed.py
@@ -54,7 +54,7 @@ sys.exit(0)
"""
test.write('build.py', build_py_contents % (python, 'one'))
-os.chmod(test.workpath('build.py'), 0755)
+os.chmod(test.workpath('build.py'), 0o755)
test.write('SConstruct', """
env = Environment()
@@ -81,7 +81,7 @@ test.must_match('f3.out', "one\nf3.in\n")
test.up_to_date(arguments = '.')
test.write('build.py', build_py_contents % (python, 'two'))
-os.chmod(test.workpath('build.py'), 0755)
+os.chmod(test.workpath('build.py'), 0o755)
test.not_up_to_date(arguments = '.')
diff --git a/test/Chmod.py b/test/Chmod.py
index c00aea0f..8b5fbe00 100644
--- a/test/Chmod.py
+++ b/test/Chmod.py
@@ -87,19 +87,19 @@ test.write('f10', "f10\n")
test.subdir('d11')
test.subdir('d12')
-os.chmod(test.workpath('f1'), 0444)
-os.chmod(test.workpath('f1-File'), 0444)
-os.chmod(test.workpath('d2'), 0555)
-os.chmod(test.workpath('d2-Dir'), 0555)
-os.chmod(test.workpath('f3'), 0444)
-os.chmod(test.workpath('d4'), 0555)
-os.chmod(test.workpath('f5'), 0444)
-os.chmod(test.workpath('Chmod-f7.in'), 0444)
-os.chmod(test.workpath('f7.out-Chmod'), 0444)
-os.chmod(test.workpath('f9'), 0444)
-os.chmod(test.workpath('f10'), 0444)
-os.chmod(test.workpath('d11'), 0555)
-os.chmod(test.workpath('d12'), 0555)
+os.chmod(test.workpath('f1'), 0o444)
+os.chmod(test.workpath('f1-File'), 0o444)
+os.chmod(test.workpath('d2'), 0o555)
+os.chmod(test.workpath('d2-Dir'), 0o555)
+os.chmod(test.workpath('f3'), 0o444)
+os.chmod(test.workpath('d4'), 0o555)
+os.chmod(test.workpath('f5'), 0o444)
+os.chmod(test.workpath('Chmod-f7.in'), 0o444)
+os.chmod(test.workpath('f7.out-Chmod'), 0o444)
+os.chmod(test.workpath('f9'), 0o444)
+os.chmod(test.workpath('f10'), 0o444)
+os.chmod(test.workpath('d11'), 0o555)
+os.chmod(test.workpath('d12'), 0o555)
expect = test.wrap_stdout(read_str = """\
Chmod("f1", 0666)
@@ -123,68 +123,68 @@ cat(["f8.out"], ["f8.in"])
test.run(options = '-n', arguments = '.', stdout = expect)
s = stat.S_IMODE(os.stat(test.workpath('f1'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
s = stat.S_IMODE(os.stat(test.workpath('f1-File'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
s = stat.S_IMODE(os.stat(test.workpath('d2'))[stat.ST_MODE])
-test.fail_test(s != 0555)
+test.fail_test(s != 0o555)
s = stat.S_IMODE(os.stat(test.workpath('d2-Dir'))[stat.ST_MODE])
-test.fail_test(s != 0555)
+test.fail_test(s != 0o555)
test.must_not_exist('bar.out')
s = stat.S_IMODE(os.stat(test.workpath('f3'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
s = stat.S_IMODE(os.stat(test.workpath('d4'))[stat.ST_MODE])
-test.fail_test(s != 0555)
+test.fail_test(s != 0o555)
s = stat.S_IMODE(os.stat(test.workpath('f5'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
test.must_not_exist('f6.out')
test.must_not_exist('f7.out')
s = stat.S_IMODE(os.stat(test.workpath('Chmod-f7.in'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
s = stat.S_IMODE(os.stat(test.workpath('f7.out-Chmod'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
test.must_not_exist('f8.out')
s = stat.S_IMODE(os.stat(test.workpath('f9'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
s = stat.S_IMODE(os.stat(test.workpath('f10'))[stat.ST_MODE])
-test.fail_test(s != 0444)
+test.fail_test(s != 0o444)
s = stat.S_IMODE(os.stat(test.workpath('d11'))[stat.ST_MODE])
-test.fail_test(s != 0555)
+test.fail_test(s != 0o555)
s = stat.S_IMODE(os.stat(test.workpath('d12'))[stat.ST_MODE])
-test.fail_test(s != 0555)
+test.fail_test(s != 0o555)
test.run()
s = stat.S_IMODE(os.stat(test.workpath('f1'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
s = stat.S_IMODE(os.stat(test.workpath('f1-File'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
s = stat.S_IMODE(os.stat(test.workpath('d2'))[stat.ST_MODE])
-test.fail_test(s != 0777)
+test.fail_test(s != 0o777)
s = stat.S_IMODE(os.stat(test.workpath('d2-Dir'))[stat.ST_MODE])
-test.fail_test(s != 0777)
+test.fail_test(s != 0o777)
test.must_match('bar.out', "bar.in\n")
s = stat.S_IMODE(os.stat(test.workpath('f3'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
s = stat.S_IMODE(os.stat(test.workpath('d4'))[stat.ST_MODE])
-test.fail_test(s != 0777)
+test.fail_test(s != 0o777)
s = stat.S_IMODE(os.stat(test.workpath('f5'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
test.must_match('f6.out', "f6.in\n")
test.must_match('f7.out', "f7.in\n")
s = stat.S_IMODE(os.stat(test.workpath('Chmod-f7.in'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
s = stat.S_IMODE(os.stat(test.workpath('f7.out-Chmod'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
test.must_match('f8.out', "f8.in\n")
s = stat.S_IMODE(os.stat(test.workpath('f9'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
s = stat.S_IMODE(os.stat(test.workpath('f10'))[stat.ST_MODE])
-test.fail_test(s != 0666)
+test.fail_test(s != 0o666)
s = stat.S_IMODE(os.stat(test.workpath('d11'))[stat.ST_MODE])
-test.fail_test(s != 0777)
+test.fail_test(s != 0o777)
s = stat.S_IMODE(os.stat(test.workpath('d12'))[stat.ST_MODE])
-test.fail_test(s != 0777)
+test.fail_test(s != 0o777)
test.pass_test()
diff --git a/test/Configure/ConfigureDryRunError.py b/test/Configure/ConfigureDryRunError.py
index b4be67e2..a8c63091 100644
--- a/test/Configure/ConfigureDryRunError.py
+++ b/test/Configure/ConfigureDryRunError.py
@@ -97,7 +97,7 @@ test.checkLogAndStdout( ["Checking for C library %s... " % lib,
newLog = test.read(test.workpath('config.log'))
if newLog != oldLog:
- print "Unexpected update of log file within a dry run"
+ print("Unexpected update of log file within a dry run")
test.fail_test()
test.pass_test()
diff --git a/test/Configure/config-h.py b/test/Configure/config-h.py
index cda6c3b9..6c70c9cb 100644
--- a/test/Configure/config-h.py
+++ b/test/Configure/config-h.py
@@ -135,19 +135,19 @@ test.run(stdout=expected_stdout)
config_h = test.read(test.workpath('config.h'))
if expected_config_h != config_h:
- print "Unexpected config.h"
- print "Expected: "
- print "---------------------------------------------------------"
- print repr(expected_config_h)
- print "---------------------------------------------------------"
- print "Found: "
- print "---------------------------------------------------------"
- print repr(config_h)
- print "---------------------------------------------------------"
- print "Stdio: "
- print "---------------------------------------------------------"
- print test.stdout()
- print "---------------------------------------------------------"
+ print("Unexpected config.h")
+ print("Expected: ")
+ print("---------------------------------------------------------")
+ print(repr(expected_config_h))
+ print("---------------------------------------------------------")
+ print("Found: ")
+ print("---------------------------------------------------------")
+ print(repr(config_h))
+ print("---------------------------------------------------------")
+ print("Stdio: ")
+ print("---------------------------------------------------------")
+ print(test.stdout())
+ print("---------------------------------------------------------")
test.fail_test()
expected_read_str = re.sub(r'\b((yes)|(no))\b',
@@ -162,19 +162,19 @@ test.run(stdout=expected_stdout)
config_h = test.read(test.workpath('config.h'))
if expected_config_h != config_h:
- print "Unexpected config.h"
- print "Expected: "
- print "---------------------------------------------------------"
- print repr(expected_config_h)
- print "---------------------------------------------------------"
- print "Found: "
- print "---------------------------------------------------------"
- print repr(config_h)
- print "---------------------------------------------------------"
- print "Stdio: "
- print "---------------------------------------------------------"
- print test.stdout()
- print "---------------------------------------------------------"
+ print("Unexpected config.h")
+ print("Expected: ")
+ print("---------------------------------------------------------")
+ print(repr(expected_config_h))
+ print("---------------------------------------------------------")
+ print("Found: ")
+ print("---------------------------------------------------------")
+ print(repr(config_h))
+ print("---------------------------------------------------------")
+ print("Stdio: ")
+ print("---------------------------------------------------------")
+ print(test.stdout())
+ print("---------------------------------------------------------")
test.fail_test()
test.pass_test()
diff --git a/test/Configure/implicit-cache.py b/test/Configure/implicit-cache.py
index 0f04b1e4..059fd4c8 100644
--- a/test/Configure/implicit-cache.py
+++ b/test/Configure/implicit-cache.py
@@ -93,11 +93,11 @@ test.run_sconsign('-d .sconf_temp -e conftest_0.c --raw .sconsign.dblite')
new_sconsign_dblite = test.stdout()
if old_sconsign_dblite != new_sconsign_dblite:
- print ".sconsign.dblite did not match:"
- print "FIRST RUN =========="
- print old_sconsign_dblite
- print "SECOND RUN =========="
- print new_sconsign_dblite
+ print(".sconsign.dblite did not match:")
+ print("FIRST RUN ==========")
+ print(old_sconsign_dblite)
+ print("SECOND RUN ==========")
+ print(new_sconsign_dblite)
test.fail_test()
test.pass_test()
diff --git a/test/Copy-Action.py b/test/Copy-Action.py
index 51635c51..ded158a7 100644
--- a/test/Copy-Action.py
+++ b/test/Copy-Action.py
@@ -84,8 +84,8 @@ test.subdir('d5')
test.write(['d5', 'f12.in'], "f12.in\n")
test.write('f 13.in', "f 13.in\n")
-os.chmod('f1.in', 0646)
-os.chmod('f4.in', 0644)
+os.chmod('f1.in', 0o646)
+os.chmod('f4.in', 0o644)
test.sleep()
diff --git a/test/Deprecated/Options/Options.py b/test/Deprecated/Options/Options.py
index 8116a636..61a12f3f 100644
--- a/test/Deprecated/Options/Options.py
+++ b/test/Deprecated/Options/Options.py
@@ -237,7 +237,7 @@ opts.Save('options.saved', env)
def checkSave(file, expected):
gdict = {}
ldict = {}
- exec open(file, 'rU').read() in gdict, ldict
+ exec(open(file, 'rU').read(), gdict, ldict)
assert expected == ldict, "%s\n...not equal to...\n%s" % (expected, ldict)
# First test with no command line options
diff --git a/test/Deprecated/SourceCode/BitKeeper/BitKeeper.py b/test/Deprecated/SourceCode/BitKeeper/BitKeeper.py
index 28b87616..9a36eac7 100644
--- a/test/Deprecated/SourceCode/BitKeeper/BitKeeper.py
+++ b/test/Deprecated/SourceCode/BitKeeper/BitKeeper.py
@@ -67,7 +67,7 @@ test.subdir('BK', 'import', ['import', 'sub'])
# Test using BitKeeper to fetch from SCCS/s.file files.
sccs = test.where_is('sccs')
if not sccs:
- print "Could not find SCCS, skipping sub-test of BitKeeper using SCCS files."
+ print("Could not find SCCS, skipping sub-test of BitKeeper using SCCS files.")
else:
test.subdir('work1',
['work1', 'SCCS'],
@@ -167,9 +167,9 @@ sub/fff.in 1.1 -> 1.2: 1 lines
rcs = test.where_is('rcs')
ci = test.where_is('ci')
if not rcs:
- print "Could not find RCS,\nskipping sub-test of BitKeeper using RCS files."
+ print("Could not find RCS,\nskipping sub-test of BitKeeper using RCS files.")
elif not ci:
- print "Could not find the RCS ci command,\nskipping sub-test of BitKeeper using RCS files."
+ print("Could not find the RCS ci command,\nskipping sub-test of BitKeeper using RCS files.")
else:
test.subdir('work2',
['work2', 'RCS'],
diff --git a/test/Deprecated/SourceCode/Subversion.py b/test/Deprecated/SourceCode/Subversion.py
index a97c86f2..ac3eb57a 100644
--- a/test/Deprecated/SourceCode/Subversion.py
+++ b/test/Deprecated/SourceCode/Subversion.py
@@ -50,7 +50,7 @@ svnadmin = test.where_is('svnadmin')
if not svn:
test.skip_test("Could not find 'svnadmin'; skipping test(s).\n")
-print "Short-circuiting this test until we support Subversion"
+print("Short-circuiting this test until we support Subversion")
test.pass_test()
test.subdir('Subversion', 'import', ['import', 'sub'], 'work1', 'work2')
diff --git a/test/GetBuildFailures/parallel.py b/test/GetBuildFailures/parallel.py
index b7576af6..101c2e2f 100644
--- a/test/GetBuildFailures/parallel.py
+++ b/test/GetBuildFailures/parallel.py
@@ -102,8 +102,8 @@ error_45 = f4_error + f5_error
error_54 = f5_error + f4_error
if test.stderr() not in [error_45, error_54]:
- print "Did not find the following output in list of expected strings:"
- print test.stderr(),
+ print("Did not find the following output in list of expected strings:")
+ print(test.stderr(), end=' ')
test.fail_test()
# We jump through hoops above to try to make sure that the individual
@@ -121,8 +121,8 @@ failed_45 = f4_failed + f5_failed
failed_54 = f5_failed + f4_failed
if test.stdout() not in [failed_45, failed_54]:
- print "Did not find the following output in list of expected strings:"
- print test.stdout(),
+ print("Did not find the following output in list of expected strings:")
+ print(test.stdout(), end=' ')
test.fail_test()
test.must_match(test.workpath('f3'), 'f3.in\n')
diff --git a/test/Glob/glob-libpath.py b/test/Glob/glob-libpath.py
index b09aab95..6ee06e6b 100644
--- a/test/Glob/glob-libpath.py
+++ b/test/Glob/glob-libpath.py
@@ -77,10 +77,10 @@ test.write(['src', 'util', 'util.cpp'], """int i=0;
test.run(arguments = '-Q .')
if not test.match_re_dotall(test.stdout(), r".*(-L|/LIBPATH:)build[/\\]util.*"):
- print repr(test.stdout())+" should contain -Lbuild/util or /LIBPATH:build\\util"
+ print(repr(test.stdout())+" should contain -Lbuild/util or /LIBPATH:build\\util")
test.fail_test()
if test.match_re_dotall(test.stdout(), r".*(-L|/LIBPATH:)src[/\\]util.*"):
- print repr(test.stdout())+" should not contain -Lsrc/util or /LIBPATH:src\\util"
+ print(repr(test.stdout())+" should not contain -Lsrc/util or /LIBPATH:src\\util")
test.fail_test()
test.pass_test()
diff --git a/test/Install/Install.py b/test/Install/Install.py
index adadfd96..d66660ba 100644
--- a/test/Install/Install.py
+++ b/test/Install/Install.py
@@ -129,7 +129,7 @@ test.must_match(['work', 'f2.out'], "f2.in\n")
# if a target can not be unlinked before building it:
test.write(['work', 'f1.in'], "f1.in again again\n")
-os.chmod(test.workpath('work', 'export'), 0555)
+os.chmod(test.workpath('work', 'export'), 0o555)
f = open(f1_out, 'rb')
diff --git a/test/Interactive/version.py b/test/Interactive/version.py
index bbca9efc..295b9d3f 100644
--- a/test/Interactive/version.py
+++ b/test/Interactive/version.py
@@ -76,7 +76,7 @@ expect2 = r"""scons>>> SCons by Steven Knight et al\.:
stdout = test.stdout() + '\n'
if not test.match_re(stdout, expect1) and not test.match_re(stdout, expect2):
- print repr(stdout)
+ print(repr(stdout))
test.fail_test()
diff --git a/test/Java/multi-step.py b/test/Java/multi-step.py
index a8efcd46..add42089 100644
--- a/test/Java/multi-step.py
+++ b/test/Java/multi-step.py
@@ -557,7 +557,7 @@ test.must_exist(['buildout', 'jni', 'SampleTest.java'])
# it doesn't exist.
p = test.workpath('buildout', 'jni', 'SampleTest.class')
if not os.path.exists(p):
- print 'Warning: %s does not exist' % p
+ print('Warning: %s does not exist' % p)
test.up_to_date(arguments = '.')
diff --git a/test/MSVC/batch-longlines.py b/test/MSVC/batch-longlines.py
index ef7233b1..c584abac 100644
--- a/test/MSVC/batch-longlines.py
+++ b/test/MSVC/batch-longlines.py
@@ -1,61 +1,61 @@
-#!/usr/bin/env python
-#
-# __COPYRIGHT__
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
-# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-
-"""
-Verify operation of Visual C/C++ batch builds with long lines.
-
-Only runs on Windows.
-"""
-
-import TestSCons
-
-test = TestSCons.TestSCons()
-
-test.skip_if_not_msvc()
-
-_python_ = TestSCons._python_
-
-for i in xrange(1,200):
- test.write('source-file-with-quite-a-long-name-maybe-unrealistic-but-who-cares-%05d.cxx'%i,
- '/* source file %d */\nint var%d;\n'%(i,i))
-
-test.write('SConstruct', """
-env = Environment(tools=['msvc', 'mslink'],
- MSVC_BATCH=ARGUMENTS.get('MSVC_BATCH'))
-env.SharedLibrary('mylib', Glob('source*.cxx'))
-""" % locals())
-
-test.run(arguments = 'MSVC_BATCH=1 .')
-
-test.must_exist('mylib.dll')
-
-test.pass_test()
-
-# Local Variables:
-# tab-width:4
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=4 shiftwidth=4:
+#!/usr/bin/env python
+#
+# __COPYRIGHT__
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
+
+"""
+Verify operation of Visual C/C++ batch builds with long lines.
+
+Only runs on Windows.
+"""
+
+import TestSCons
+
+test = TestSCons.TestSCons()
+
+test.skip_if_not_msvc()
+
+_python_ = TestSCons._python_
+
+for i in range(1,200):
+ test.write('source-file-with-quite-a-long-name-maybe-unrealistic-but-who-cares-%05d.cxx'%i,
+ '/* source file %d */\nint var%d;\n'%(i,i))
+
+test.write('SConstruct', """
+env = Environment(tools=['msvc', 'mslink'],
+ MSVC_BATCH=ARGUMENTS.get('MSVC_BATCH'))
+env.SharedLibrary('mylib', Glob('source*.cxx'))
+""" % locals())
+
+test.run(arguments = 'MSVC_BATCH=1 .')
+
+test.must_exist('mylib.dll')
+
+test.pass_test()
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/test/MSVC/msvc.py b/test/MSVC/msvc.py
index c68fb45f..6f5c8d64 100644
--- a/test/MSVC/msvc.py
+++ b/test/MSVC/msvc.py
@@ -181,9 +181,9 @@ slow = time.time() - start
# using precompiled headers should be faster
limit = slow*0.90
if fast >= limit:
- print "Using precompiled headers was not fast enough:"
- print "slow.obj: %.3fs" % slow
- print "fast.obj: %.3fs (expected less than %.3fs)" % (fast, limit)
+ print("Using precompiled headers was not fast enough:")
+ print("slow.obj: %.3fs" % slow)
+ print("fast.obj: %.3fs (expected less than %.3fs)" % (fast, limit))
test.fail_test()
# Modifying resource.h should cause both the resource and precompiled header to be rebuilt:
diff --git a/test/QT/QTFLAGS.py b/test/QT/QTFLAGS.py
index 008397a8..b7172fc5 100644
--- a/test/QT/QTFLAGS.py
+++ b/test/QT/QTFLAGS.py
@@ -147,7 +147,7 @@ test.must_exist(['work1', 'mmmmocFromH.cxx'],
['work1', 'mmmanother_ui_file.cxx'])
def _flagTest(test,fileToContentsStart):
- for f,c in fileToContentsStart.items():
+ for f,c in list(fileToContentsStart.items()):
if test.read(test.workpath('work1', f)).find(c) != 0:
return 1
return 0
diff --git a/test/QT/copied-env.py b/test/QT/copied-env.py
index efa91be6..59a344da 100644
--- a/test/QT/copied-env.py
+++ b/test/QT/copied-env.py
@@ -70,8 +70,8 @@ moc_MyForm = [x for x in test.stdout().split('\n') if x.find('moc_MyForm') != -1
MYLIB_IMPL = [x for x in moc_MyForm if x.find('MYLIB_IMPL') != -1]
if not MYLIB_IMPL:
- print "Did not find MYLIB_IMPL on moc_MyForm compilation line:"
- print test.stdout()
+ print("Did not find MYLIB_IMPL on moc_MyForm compilation line:")
+ print(test.stdout())
test.fail_test()
test.pass_test()
diff --git a/test/QT/warnings.py b/test/QT/warnings.py
index a861b24b..5e680f60 100644
--- a/test/QT/warnings.py
+++ b/test/QT/warnings.py
@@ -59,8 +59,8 @@ scons: warning: Generated moc file 'aaa.moc' is not included by 'aaa.cpp'
""" + TestSCons.file_expr
if not re.search(match12, test.stderr()):
- print "Did not find expected regular expression in stderr:"
- print test.stderr()
+ print("Did not find expected regular expression in stderr:")
+ print(test.stderr())
test.fail_test()
os.environ['QTDIR'] = test.QT
diff --git a/test/SConsignFile/use-dbhash.py b/test/SConsignFile/use-dbhash.py
index 45e3e36f..bd9b7996 100644
--- a/test/SConsignFile/use-dbhash.py
+++ b/test/SConsignFile/use-dbhash.py
@@ -35,7 +35,7 @@ _python_ = TestSCons._python_
test = TestSCons.TestSCons()
try:
- import dbhash
+ import dbm.bsd
except ImportError:
test.skip_test('No dbhash in this version of Python; skipping test.\n')
diff --git a/test/SConsignFile/use-dbm.py b/test/SConsignFile/use-dbm.py
index 75f088dc..90983b30 100644
--- a/test/SConsignFile/use-dbm.py
+++ b/test/SConsignFile/use-dbm.py
@@ -35,7 +35,7 @@ _python_ = TestSCons._python_
test = TestSCons.TestSCons()
try:
- import dbm
+ import dbm.ndbm
except ImportError:
test.skip_test('No dbm in this version of Python; skipping test.\n')
diff --git a/test/SConsignFile/use-dumbdbm.py b/test/SConsignFile/use-dumbdbm.py
index 9d48fe5d..84f1dd44 100644
--- a/test/SConsignFile/use-dumbdbm.py
+++ b/test/SConsignFile/use-dumbdbm.py
@@ -35,7 +35,7 @@ _python_ = TestSCons._python_
test = TestSCons.TestSCons()
try:
- import dumbdbm
+ import dbm.dumb
except ImportError:
test.skip_test('No dumbdbm in this version of Python; skipping test.\n')
diff --git a/test/SConsignFile/use-gdbm.py b/test/SConsignFile/use-gdbm.py
index 1eb36452..a7e4f59e 100644
--- a/test/SConsignFile/use-gdbm.py
+++ b/test/SConsignFile/use-gdbm.py
@@ -35,7 +35,7 @@ _python_ = TestSCons._python_
test = TestSCons.TestSCons()
try:
- import gdbm
+ import dbm.gnu
except ImportError:
test.skip_test('No gdbm in this version of Python; skipping test.\n')
diff --git a/test/SHELL.py b/test/SHELL.py
index 93ed0b13..faee27fe 100644
--- a/test/SHELL.py
+++ b/test/SHELL.py
@@ -64,7 +64,7 @@ ofp.close()
sys.exit(0)
""" % locals())
-os.chmod(my_shell, 0755)
+os.chmod(my_shell, 0o755)
test.write('SConstruct', """\
env = Environment(SHELL = r'%(my_shell)s')
diff --git a/test/Scanner/unicode.py b/test/Scanner/unicode.py
index 55e22bdd..7a72804e 100644
--- a/test/Scanner/unicode.py
+++ b/test/Scanner/unicode.py
@@ -36,7 +36,7 @@ _python_ = TestSCons._python_
test = TestSCons.TestSCons()
try:
- unicode
+ str
except NameError:
import sys
msg = "Unicode not supported by Python version %s; skipping test\n"
@@ -102,28 +102,28 @@ include utf16be.k
foo.k 1 line 4
""")
-contents = unicode("""\
+contents = str("""\
ascii.k 1 line 1
include ascii.inc
ascii.k 1 line 3
""")
test.write('ascii.k', contents.encode('ascii'))
-contents = unicode("""\
+contents = str("""\
utf8.k 1 line 1
include utf8.inc
utf8.k 1 line 3
""")
test.write('utf8.k', codecs.BOM_UTF8 + contents.encode('utf-8'))
-contents = unicode("""\
+contents = str("""\
utf16le.k 1 line 1
include utf16le.inc
utf16le.k 1 line 3
""")
test.write('utf16le.k', codecs.BOM_UTF16_LE + contents.encode('utf-16-le'))
-contents = unicode("""\
+contents = str("""\
utf16be.k 1 line 1
include utf16be.inc
utf16be.k 1 line 3
diff --git a/test/TEMPFILEPREFIX.py b/test/TEMPFILEPREFIX.py
index f9b3240f..8e756afb 100644
--- a/test/TEMPFILEPREFIX.py
+++ b/test/TEMPFILEPREFIX.py
@@ -45,7 +45,7 @@ print sys.argv
echo_py = test.workpath('echo.py')
st = os.stat(echo_py)
-os.chmod(echo_py, st[stat.ST_MODE]|0111)
+os.chmod(echo_py, st[stat.ST_MODE]|0o111)
test.write('SConstruct', """
import os
diff --git a/test/TEX/TEX.py b/test/TEX/TEX.py
index 24d4bdd4..863144d2 100644
--- a/test/TEX/TEX.py
+++ b/test/TEX/TEX.py
@@ -173,14 +173,14 @@ Run \texttt{latex}, then \texttt{bibtex}, then \texttt{latex} twice again \cite{
reruns = [x for x in output_lines if x.find('latex -interaction=nonstopmode -recorder rerun.tex') != -1]
if len(reruns) != 2:
- print "Expected 2 latex calls, got %s:" % len(reruns)
- print '\n'.join(reruns)
+ print("Expected 2 latex calls, got %s:" % len(reruns))
+ print('\n'.join(reruns))
test.fail_test()
bibtex = [x for x in output_lines if x.find('bibtex bibtex-test') != -1]
if len(bibtex) != 1:
- print "Expected 1 bibtex call, got %s:" % len(bibtex)
- print '\n'.join(bibtex)
+ print("Expected 1 bibtex call, got %s:" % len(bibtex))
+ print('\n'.join(bibtex))
test.fail_test()
test.pass_test()
diff --git a/test/Value.py b/test/Value.py
index f3ad4582..025c6dea 100644
--- a/test/Value.py
+++ b/test/Value.py
@@ -81,7 +81,7 @@ open(sys.argv[-1],'wb').write(" ".join(sys.argv[1:-2]))
# to make sure there's no difference in behavior.
for source_signature in ['MD5', 'timestamp-newer']:
- print "Testing Value node with source signatures:", source_signature
+ print("Testing Value node with source signatures:", source_signature)
test.write('SConstruct', SConstruct_content % locals())
diff --git a/test/Variables/Variables.py b/test/Variables/Variables.py
index 454e32e4..d0bf4320 100644
--- a/test/Variables/Variables.py
+++ b/test/Variables/Variables.py
@@ -231,7 +231,7 @@ opts.Save('variables.saved', env)
def checkSave(file, expected):
gdict = {}
ldict = {}
- exec open(file, 'rU').read() in gdict, ldict
+ exec(open(file, 'rU').read(), gdict, ldict)
assert expected == ldict, "%s\n...not equal to...\n%s" % (expected, ldict)
# First test with no command line variables
diff --git a/test/WhereIs.py b/test/WhereIs.py
index dc127b62..07c3f6c4 100644
--- a/test/WhereIs.py
+++ b/test/WhereIs.py
@@ -45,10 +45,10 @@ if sys.platform != 'win32':
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
-os.chmod(sub3_xxx_exe, 0777)
+os.chmod(sub3_xxx_exe, 0o777)
test.write(sub4_xxx_exe, "\n")
-os.chmod(sub4_xxx_exe, 0777)
+os.chmod(sub4_xxx_exe, 0o777)
env_path = os.environ['PATH']
diff --git a/test/Win32/bad-drive.py b/test/Win32/bad-drive.py
index f2a55b8c..ff98b4e2 100644
--- a/test/Win32/bad-drive.py
+++ b/test/Win32/bad-drive.py
@@ -50,8 +50,8 @@ for i in range(len(uppercase)-1, -1, -1):
break
if bad_drive is None:
- print "All drive letters appear to be in use."
- print "Cannot test SCons handling of invalid Windows drive letters."
+ print("All drive letters appear to be in use.")
+ print("Cannot test SCons handling of invalid Windows drive letters.")
test.no_result(1);
test.write('SConstruct', """
diff --git a/test/Win32/default-drive.py b/test/Win32/default-drive.py
index f427a203..2bc14b11 100644
--- a/test/Win32/default-drive.py
+++ b/test/Win32/default-drive.py
@@ -59,7 +59,7 @@ test.write(['src', 'file.in'], "src/file.in\n")
build_file_out = test.workpath('build', 'file.out')
-print os.path.splitdrive(build_file_out)[1]
+print(os.path.splitdrive(build_file_out)[1])
test.run(chdir = 'src',
arguments = os.path.splitdrive(build_file_out)[1])
diff --git a/test/ZIP/ZIP.py b/test/ZIP/ZIP.py
index af2450b5..f2acad89 100644
--- a/test/ZIP/ZIP.py
+++ b/test/ZIP/ZIP.py
@@ -47,7 +47,7 @@ def zipfile_contains(zipfilename, names):
for name in names:
try:
info=zf.getinfo(name)
- except KeyError, e: # name not found
+ except KeyError as e: # name not found
zf.close()
return False
return True
diff --git a/test/ZIP/ZIPROOT.py b/test/ZIP/ZIPROOT.py
index 96747a7a..f3e4496b 100644
--- a/test/ZIP/ZIPROOT.py
+++ b/test/ZIP/ZIPROOT.py
@@ -1,98 +1,98 @@
-#!/usr/bin/env python
-#
-# __COPYRIGHT__
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
-# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-
-import os
-import stat
-
-import TestSCons
-
-_python_ = TestSCons._python_
-
-test = TestSCons.TestSCons()
-
-import zipfile
-
-def zipfile_contains(zipfilename, names):
- """Returns True if zipfilename contains all the names, False otherwise."""
- zf=zipfile.ZipFile(zipfilename, 'r')
- if type(names)==type(''):
- names=[names]
- for name in names:
- try:
- info=zf.getinfo(name)
- except KeyError, e: # name not found
- zf.close()
- return False
- return True
-
-def zipfile_files(fname):
- """Returns all the filenames in zip file fname."""
- zf = zipfile.ZipFile(fname, 'r')
- return [x.filename for x in zf.infolist()]
-
-test.subdir('sub1')
-test.subdir(['sub1', 'sub2'])
-
-test.write('SConstruct', """
-env = Environment(tools = ['zip'])
-env.Zip(target = 'aaa.zip', source = ['sub1/file1'], ZIPROOT='sub1')
-env.Zip(target = 'bbb.zip', source = ['sub1/file2', 'sub1/sub2/file2'], ZIPROOT='sub1')
-""" % locals())
-
-test.write(['sub1', 'file1'], "file1\n")
-test.write(['sub1', 'file2'], "file2a\n")
-test.write(['sub1', 'sub2', 'file2'], "file2b\n")
-
-test.run(arguments = 'aaa.zip', stderr = None)
-
-test.must_exist('aaa.zip')
-
-# TEST: Zip file should contain 'file1', not 'sub1/file1', because of ZIPROOT.
-zf=zipfile.ZipFile('aaa.zip', 'r')
-test.fail_test(zf.testzip() is not None)
-zf.close()
-
-files=zipfile_files('aaa.zip')
-test.fail_test(zipfile_files('aaa.zip') != ['file1'],
- message='Zip file aaa.zip has wrong files: %s'%repr(files))
-
-###
-
-test.run(arguments = 'bbb.zip', stderr = None)
-
-test.must_exist('bbb.zip')
-
-# TEST: Zip file should contain 'sub2/file2', not 'sub1/sub2/file2', because of ZIPROOT.
-zf=zipfile.ZipFile('bbb.zip', 'r')
-test.fail_test(zf.testzip() is not None)
-zf.close()
-
-files=zipfile_files('bbb.zip')
-test.fail_test(zipfile_files('bbb.zip') != ['file2', 'sub2/file2'],
- message='Zip file bbb.zip has wrong files: %s'%repr(files))
-
-
-test.pass_test()
+#!/usr/bin/env python
+#
+# __COPYRIGHT__
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
+
+import os
+import stat
+
+import TestSCons
+
+_python_ = TestSCons._python_
+
+test = TestSCons.TestSCons()
+
+import zipfile
+
+def zipfile_contains(zipfilename, names):
+ """Returns True if zipfilename contains all the names, False otherwise."""
+ zf=zipfile.ZipFile(zipfilename, 'r')
+ if type(names)==type(''):
+ names=[names]
+ for name in names:
+ try:
+ info=zf.getinfo(name)
+ except KeyError as e: # name not found
+ zf.close()
+ return False
+ return True
+
+def zipfile_files(fname):
+ """Returns all the filenames in zip file fname."""
+ zf = zipfile.ZipFile(fname, 'r')
+ return [x.filename for x in zf.infolist()]
+
+test.subdir('sub1')
+test.subdir(['sub1', 'sub2'])
+
+test.write('SConstruct', """
+env = Environment(tools = ['zip'])
+env.Zip(target = 'aaa.zip', source = ['sub1/file1'], ZIPROOT='sub1')
+env.Zip(target = 'bbb.zip', source = ['sub1/file2', 'sub1/sub2/file2'], ZIPROOT='sub1')
+""" % locals())
+
+test.write(['sub1', 'file1'], "file1\n")
+test.write(['sub1', 'file2'], "file2a\n")
+test.write(['sub1', 'sub2', 'file2'], "file2b\n")
+
+test.run(arguments = 'aaa.zip', stderr = None)
+
+test.must_exist('aaa.zip')
+
+# TEST: Zip file should contain 'file1', not 'sub1/file1', because of ZIPROOT.
+zf=zipfile.ZipFile('aaa.zip', 'r')
+test.fail_test(zf.testzip() is not None)
+zf.close()
+
+files=zipfile_files('aaa.zip')
+test.fail_test(zipfile_files('aaa.zip') != ['file1'],
+ message='Zip file aaa.zip has wrong files: %s'%repr(files))
+
+###
+
+test.run(arguments = 'bbb.zip', stderr = None)
+
+test.must_exist('bbb.zip')
+
+# TEST: Zip file should contain 'sub2/file2', not 'sub1/sub2/file2', because of ZIPROOT.
+zf=zipfile.ZipFile('bbb.zip', 'r')
+test.fail_test(zf.testzip() is not None)
+zf.close()
+
+files=zipfile_files('bbb.zip')
+test.fail_test(zipfile_files('bbb.zip') != ['file2', 'sub2/file2'],
+ message='Zip file bbb.zip has wrong files: %s'%repr(files))
+
+
+test.pass_test()
diff --git a/test/gnutools.py b/test/gnutools.py
index e1b7e42b..f79efe46 100644
--- a/test/gnutools.py
+++ b/test/gnutools.py
@@ -118,7 +118,7 @@ def testObject(test, obj, expect):
line1 = contents.split('\n')[0]
actual = ' '.join(line1.split())
if not expect == actual:
- print "%s: %s != %s\n" % (obj, repr(expect), repr(actual))
+ print("%s: %s != %s\n" % (obj, repr(expect), repr(actual)))
test.fail_test()
if sys.platform in ('win32', 'cygwin'):
diff --git a/test/import.py b/test/import.py
index ef5ee614..fb8a28c0 100644
--- a/test/import.py
+++ b/test/import.py
@@ -185,8 +185,8 @@ for tool in tools:
matched = 1
break
if not matched:
- print "Failed importing '%s', stderr:" % tool
- print stderr
+ print("Failed importing '%s', stderr:" % tool)
+ print(stderr)
failures.append(tool)
test.fail_test(len(failures))
diff --git a/test/long-lines/signature.py b/test/long-lines/signature.py
index bc5d11a9..ce38bec8 100644
--- a/test/long-lines/signature.py
+++ b/test/long-lines/signature.py
@@ -51,7 +51,7 @@ fp.write('FILEFLAG=%s\\n' % args[2])
fp.write('TIMESTAMP=%s\\n' % args[3])
""")
-os.chmod(build_py, 0755)
+os.chmod(build_py, 0o755)
test.write('SConstruct', """\
arg = 'a_long_ignored_argument'
diff --git a/test/option--random.py b/test/option--random.py
index a9b9b9d8..59592cb2 100644
--- a/test/option--random.py
+++ b/test/option--random.py
@@ -66,7 +66,7 @@ tries = 0
max_tries = 10
while test.stdout() == non_random_output:
if tries >= max_tries:
- print "--random generated the non-random output %s times!" % max_tries
+ print("--random generated the non-random output %s times!" % max_tries)
test.fail_test()
tries = tries + 1
test.run(arguments = '-n -Q --random')
@@ -84,7 +84,7 @@ tries = 0
max_tries = 10
while test.stdout() == non_random_output:
if tries >= max_tries:
- print "--random generated the non-random output %s times!" % max_tries
+ print("--random generated the non-random output %s times!" % max_tries)
test.fail_test()
tries = tries + 1
test.run(arguments = '-n -Q --random')
diff --git a/test/option-v.py b/test/option-v.py
index 680f5417..8985db09 100644
--- a/test/option-v.py
+++ b/test/option-v.py
@@ -55,13 +55,13 @@ expect2 = r"""SCons by Steven Knight et al.:
test.run(arguments = '-v')
stdout = test.stdout()
if not test.match_re(stdout, expect1) and not test.match_re(stdout, expect2):
- print stdout
+ print(stdout)
test.fail_test()
test.run(arguments = '--version')
stdout = test.stdout()
if not test.match_re(stdout, expect1) and not test.match_re(stdout, expect2):
- print stdout
+ print(stdout)
test.fail_test()
test.pass_test()
diff --git a/test/option/debug-count.py b/test/option/debug-count.py
index 3f8a23c0..076d7fa8 100644
--- a/test/option/debug-count.py
+++ b/test/option/debug-count.py
@@ -74,10 +74,10 @@ for args in ['-h --debug=count', '--debug=count']:
missing = [o for o in objects if find_object_count(o, stdout) is None]
if missing:
- print "Missing the following object lines from '%s' output:" % args
- print "\t", ' '.join(missing)
- print "STDOUT =========="
- print stdout
+ print("Missing the following object lines from '%s' output:" % args)
+ print("\t", ' '.join(missing))
+ print("STDOUT ==========")
+ print(stdout)
test.fail_test(1)
expect_warning = """
diff --git a/test/option/debug-time.py b/test/option/debug-time.py
index 198d71df..7448e1c0 100644
--- a/test/option/debug-time.py
+++ b/test/option/debug-time.py
@@ -20,7 +20,7 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import division
+
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
@@ -164,7 +164,7 @@ outside of the 15%% tolerance.
""" % locals())
if failures or warnings:
- print '\n'.join([test.stdout()] + failures + warnings)
+ print('\n'.join([test.stdout()] + failures + warnings))
if failures:
test.fail_test(1)
@@ -172,7 +172,7 @@ test.run(arguments = "--debug=time . SLEEP=0")
command_time = get_command_time(test.stdout())
if command_time != 0.0:
- print "Up-to-date run received non-zero command time of %s" % command_time
+ print("Up-to-date run received non-zero command time of %s" % command_time)
test.fail_test()
@@ -201,14 +201,14 @@ outside of the 1%% tolerance.
""" % locals())
if failures:
- print '\n'.join([test.stdout()] + failures)
+ print('\n'.join([test.stdout()] + failures))
test.fail_test(1)
test.run(arguments = "-j4 --debug=time . SLEEP=1")
command_time = get_command_time(test.stdout())
if command_time != 0.0:
- print "Up-to-date run received non-zero command time of %s" % command_time
+ print("Up-to-date run received non-zero command time of %s" % command_time)
test.fail_test()
diff --git a/test/option/help-options.py b/test/option/help-options.py
index 45bbfa0d..1b23cb34 100644
--- a/test/option/help-options.py
+++ b/test/option/help-options.py
@@ -60,8 +60,8 @@ options = [x[-1] == ',' and x[:-1] or x for x in options]
lowered = [x.lower() for x in options]
ordered = sorted(lowered)
if lowered != ordered:
- print "lowered =", lowered
- print "sorted =", ordered
+ print("lowered =", lowered)
+ print("sorted =", ordered)
test.fail_test()
test.pass_test()
diff --git a/test/option/profile.py b/test/option/profile.py
index d53c6900..513da476 100644
--- a/test/option/profile.py
+++ b/test/option/profile.py
@@ -36,7 +36,7 @@ else:
# when we drop support for Python 2.6.
class StringIO(_StringIO):
def write(self, s):
- _StringIO.write(self, unicode(s))
+ _StringIO.write(self, str(s))
import TestSCons
diff --git a/test/scons-time/run/config/python.py b/test/scons-time/run/config/python.py
index 6cf965b0..57b732d8 100644
--- a/test/scons-time/run/config/python.py
+++ b/test/scons-time/run/config/python.py
@@ -53,7 +53,7 @@ for arg in sys.argv[1:]:
print 'my_python.py: %s' % profile
""")
-os.chmod(my_python_py, 0755)
+os.chmod(my_python_py, 0o755)
test.run(arguments = 'run -f config foo.tar.gz')
diff --git a/test/scons-time/run/option/python.py b/test/scons-time/run/option/python.py
index a28e23fb..70feb70a 100644
--- a/test/scons-time/run/option/python.py
+++ b/test/scons-time/run/option/python.py
@@ -49,7 +49,7 @@ for arg in sys.argv[1:]:
sys.stdout.write('my_python.py: %s\\n' % profile)
""")
-os.chmod(my_python_py, 0755)
+os.chmod(my_python_py, 0o755)
test.run(arguments = 'run --python %s foo.tar.gz' % my_python_py)
diff --git a/test/sconsign/nonwritable.py b/test/sconsign/nonwritable.py
index 913dcf16..5ae55bbc 100644
--- a/test/sconsign/nonwritable.py
+++ b/test/sconsign/nonwritable.py
@@ -78,7 +78,7 @@ test.write(['work1', 'foo.in'], "work1/foo.in\n")
test.write(work1__sconsign_dblite, "")
-os.chmod(work1__sconsign_dblite, 0444)
+os.chmod(work1__sconsign_dblite, 0o444)
test.run(chdir='work1', arguments='.')
@@ -95,7 +95,7 @@ test.write(['work2', 'foo.in'], "work2/foo.in\n")
pickle.dump({}, open(work2_sub1__sconsign, 'wb'), 1)
pickle.dump({}, open(work2_sub2__sconsign, 'wb'), 1)
-os.chmod(work2_sub1__sconsign, 0444)
+os.chmod(work2_sub1__sconsign, 0o444)
test.run(chdir='work2', arguments='.')
diff --git a/test/sconsign/script/SConsignFile.py b/test/sconsign/script/SConsignFile.py
index a9f79f41..71a7e6dc 100644
--- a/test/sconsign/script/SConsignFile.py
+++ b/test/sconsign/script/SConsignFile.py
@@ -86,8 +86,8 @@ output.write(input.read())
sys.exit(0)
""")
-test.chmod(fake_cc_py, 0755)
-test.chmod(fake_link_py, 0755)
+test.chmod(fake_cc_py, 0o755)
+test.chmod(fake_link_py, 0o755)
# Note: We don't use os.path.join() representations of the file names
# in the expected output because paths in the .sconsign files are
diff --git a/test/sconsign/script/Signatures.py b/test/sconsign/script/Signatures.py
index cfd2a7f9..5babe676 100644
--- a/test/sconsign/script/Signatures.py
+++ b/test/sconsign/script/Signatures.py
@@ -105,8 +105,8 @@ output.write(input.read())
sys.exit(0)
""")
-test.chmod(fake_cc_py, 0755)
-test.chmod(fake_link_py, 0755)
+test.chmod(fake_cc_py, 0o755)
+test.chmod(fake_link_py, 0o755)
test.write('SConstruct', """
SConsignFile(None)
diff --git a/test/sconsign/script/no-SConsignFile.py b/test/sconsign/script/no-SConsignFile.py
index 829d650e..70b18c83 100644
--- a/test/sconsign/script/no-SConsignFile.py
+++ b/test/sconsign/script/no-SConsignFile.py
@@ -94,8 +94,8 @@ output.write(input.read())
sys.exit(0)
""")
-test.chmod(fake_cc_py, 0755)
-test.chmod(fake_link_py, 0755)
+test.chmod(fake_cc_py, 0o755)
+test.chmod(fake_link_py, 0o755)
# Note: We don't use os.path.join() representations of the file names
# in the expected output because paths in the .sconsign files are
diff --git a/test/site_scons/sysdirs.py b/test/site_scons/sysdirs.py
index c05ef676..d05f6a47 100644
--- a/test/site_scons/sysdirs.py
+++ b/test/site_scons/sysdirs.py
@@ -1,71 +1,71 @@
-#!/usr/bin/env python
-#
-# __COPYRIGHT__
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
-# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-
-import TestSCons
-
-"""
-Verify site_scons system dirs are getting loaded.
-Uses an internal test fixture to get at the site_scons dirs.
-
-TODO: it would be great to test if it can actually load site_scons
-files from the system dirs, but the test harness can't put files in
-those dirs (which may not even exist on a build system).
-"""
-
-test = TestSCons.TestSCons()
-
-test.write('SConstruct', """
-import SCons.Script
-SCons.Script.Main.test_load_all_site_scons_dirs(Dir('.').path)
-""")
-
-test.run(arguments = '-Q .')
-
-import SCons.Platform
-platform = SCons.Platform.platform_default()
-if platform in ('win32', 'cygwin'):
- dir_to_check_for='Application Data'
-elif platform in ('darwin'):
- dir_to_check_for='Library'
-else:
- dir_to_check_for='.scons'
-
-if 'Loading site dir' not in test.stdout():
- print test.stdout()
- test.fail_test()
-if dir_to_check_for not in test.stdout():
- print test.stdout()
- test.fail_test()
-
-test.pass_test()
-
-# end of file
-
-# Local Variables:
-# tab-width:4
-# indent-tabs-mode:nil
-# End:
-# vim: set expandtab tabstop=4 shiftwidth=4:
+#!/usr/bin/env python
+#
+# __COPYRIGHT__
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
+
+import TestSCons
+
+"""
+Verify site_scons system dirs are getting loaded.
+Uses an internal test fixture to get at the site_scons dirs.
+
+TODO: it would be great to test if it can actually load site_scons
+files from the system dirs, but the test harness can't put files in
+those dirs (which may not even exist on a build system).
+"""
+
+test = TestSCons.TestSCons()
+
+test.write('SConstruct', """
+import SCons.Script
+SCons.Script.Main.test_load_all_site_scons_dirs(Dir('.').path)
+""")
+
+test.run(arguments = '-Q .')
+
+import SCons.Platform
+platform = SCons.Platform.platform_default()
+if platform in ('win32', 'cygwin'):
+ dir_to_check_for='Application Data'
+elif platform in ('darwin'):
+ dir_to_check_for='Library'
+else:
+ dir_to_check_for='.scons'
+
+if 'Loading site dir' not in test.stdout():
+ print(test.stdout())
+ test.fail_test()
+if dir_to_check_for not in test.stdout():
+ print(test.stdout())
+ test.fail_test()
+
+test.pass_test()
+
+# end of file
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/test/spaces.py b/test/spaces.py
index db1ae975..13090474 100644
--- a/test/spaces.py
+++ b/test/spaces.py
@@ -36,7 +36,7 @@ if sys.platform == 'win32':
else:
test.write('duplicate a file.sh', 'cp foo.in foo.out\n')
copy = test.workpath('duplicate a file.sh')
- os.chmod(test.workpath('duplicate a file.sh'), 0777)
+ os.chmod(test.workpath('duplicate a file.sh'), 0o777)
test.write('SConstruct', r'''
diff --git a/test/subdivide.py b/test/subdivide.py
index 2978e08b..44402c90 100644
--- a/test/subdivide.py
+++ b/test/subdivide.py
@@ -76,8 +76,8 @@ for s in sys.argv[2:]:
ofp.write(open(s, 'rb').read())
""")
-test.chmod(fake_cc_py, 0755)
-test.chmod(fake_link_py, 0755)
+test.chmod(fake_cc_py, 0o755)
+test.chmod(fake_link_py, 0o755)
test.write('SConstruct', """\
SConsignFile(None)
diff --git a/test/update-release-info/update-release-info.py b/test/update-release-info/update-release-info.py
index d3125c7b..0db65ba0 100644
--- a/test/update-release-info/update-release-info.py
+++ b/test/update-release-info/update-release-info.py
@@ -201,7 +201,7 @@ RELEASE 2.0.0.alpha.yyyymmdd - NEW DATE WILL BE INSERTED HERE
""", mode = 'r')
-years = ', '.join(map(str, range(2001, this_year + 1)))
+years = ', '.join(map(str, list(range(2001, this_year + 1))))
test.must_match(SConstruct, """
month_year = 'MONTH YEAR'
copyright_years = %s
diff --git a/timings/ElectricCloud/TimeSCons-run.py b/timings/ElectricCloud/TimeSCons-run.py
index be75d121..fbe0028c 100644
--- a/timings/ElectricCloud/TimeSCons-run.py
+++ b/timings/ElectricCloud/TimeSCons-run.py
@@ -87,7 +87,7 @@ test.run(program=test.workpath('genscons.pl'), arguments=' '.join(arguments))
# This print is nott for debugging, leave it alone!
# We want to display the output from genscons.pl's generation the build
# configuration, so the buildbot logs contain more info.
-print test.stdout()
+print(test.stdout())
test.main(chdir='sconsbld')
diff --git a/www/gen_sched_table.py b/www/gen_sched_table.py
index e67f0d74..9ac8acdc 100755
--- a/www/gen_sched_table.py
+++ b/www/gen_sched_table.py
@@ -6,13 +6,13 @@ import datetime
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-print '<table width="100%">'
+print('<table width="100%">')
def row(*cells, **kw):
td = kw.get('tr','td')
- print ' <tr>'
+ print(' <tr>')
for cell in cells:
- print ' <%s>%s</%s>' % (td,cell,td)
- print ' </tr>'
+ print(' <%s>%s</%s>' % (td,cell,td))
+ print(' </tr>')
row('Estimated&nbsp;date', 'Type', 'Comments', tr = 'th')
if len(sys.argv) > 1:
@@ -30,7 +30,7 @@ for line in f:
incr,type,desc = line[1:].strip().split(None,2)
now = now + datetime.timedelta(int(incr))
else:
- print 'dunna understand code', line[0]
+ print('dunna understand code', line[0])
sys.exit(1)
#name = current + '.d' + str(now).replace('-','')
date = '%s-%s-%s' % (now.day,months[now.month-1],now.year)
@@ -41,7 +41,7 @@ for line in f:
else:
category = current = type
row(date, category, desc)
-print '</table>'
+print('</table>')
# Local Variables:
# tab-width:4