summaryrefslogtreecommitdiff
path: root/Tools
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2012-04-11 12:46:24 +0200
committerGeorg Brandl <georg@python.org>2012-04-11 12:46:24 +0200
commitd113fbbdccda87a19675e624650dd02435bf7988 (patch)
treec3f0f368c5f2bfd5d750c0bb082c0b9744b70894 /Tools
parentb2c574caec7279f6078b9ffecef84f4ed3370725 (diff)
parent25f9ec117a47548f90748ee892b2d5c5eb9e5c79 (diff)
downloadcpython-d113fbbdccda87a19675e624650dd02435bf7988.tar.gz
Merge 3.2.3 release clone.
Diffstat (limited to 'Tools')
-rw-r--r--Tools/msi/msi.py1
-rwxr-xr-xTools/scripts/abitype.py88
-rwxr-xr-xTools/scripts/find_recursionlimit.py24
-rwxr-xr-xTools/scripts/findnocoding.py54
-rwxr-xr-xTools/scripts/fixcid.py2
-rwxr-xr-xTools/scripts/md5sum.py2
-rwxr-xr-xTools/scripts/parseentities.py3
-rwxr-xr-xTools/scripts/pdeps.py10
8 files changed, 95 insertions, 89 deletions
diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py
index 19cd7fb1dd..508816dd86 100644
--- a/Tools/msi/msi.py
+++ b/Tools/msi/msi.py
@@ -1021,6 +1021,7 @@ def add_files(db):
lib.add_file("check_soundcard.vbs")
lib.add_file("empty.vbs")
lib.add_file("Sine-1000Hz-300ms.aif")
+ lib.add_file("mime.types")
lib.glob("*.uue")
lib.glob("*.pem")
lib.glob("*.pck")
diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py
index 4d96c8b70e..ab0ba42c36 100755
--- a/Tools/scripts/abitype.py
+++ b/Tools/scripts/abitype.py
@@ -3,34 +3,6 @@
# Usage: abitype.py < old_code > new_code
import re, sys
-############ Simplistic C scanner ##################################
-tokenizer = re.compile(
- r"(?P<preproc>#.*\n)"
- r"|(?P<comment>/\*.*?\*/)"
- r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
- r"|(?P<ws>[ \t\n]+)"
- r"|(?P<other>.)",
- re.MULTILINE)
-
-tokens = []
-source = sys.stdin.read()
-pos = 0
-while pos != len(source):
- m = tokenizer.match(source, pos)
- tokens.append([m.lastgroup, m.group()])
- pos += len(tokens[-1][1])
- if tokens[-1][0] == 'preproc':
- # continuation lines are considered
- # only in preprocess statements
- while tokens[-1][1].endswith('\\\n'):
- nl = source.find('\n', pos)
- if nl == -1:
- line = source[pos:]
- else:
- line = source[pos:nl+1]
- tokens[-1][1] += line
- pos += len(line)
-
###### Replacement of PyTypeObject static instances ##############
# classify each token, giving it a one-letter code:
@@ -79,7 +51,7 @@ def get_fields(start, real_end):
while tokens[pos][0] in ('ws', 'comment'):
pos += 1
if tokens[pos][1] != 'PyVarObject_HEAD_INIT':
- raise Exception, '%s has no PyVarObject_HEAD_INIT' % name
+ raise Exception('%s has no PyVarObject_HEAD_INIT' % name)
while tokens[pos][1] != ')':
pos += 1
pos += 1
@@ -183,18 +155,48 @@ def make_slots(name, fields):
return '\n'.join(res)
-# Main loop: replace all static PyTypeObjects until
-# there are none left.
-while 1:
- c = classify()
- m = re.search('(SW)?TWIW?=W?{.*?};', c)
- if not m:
- break
- start = m.start()
- end = m.end()
- name, fields = get_fields(start, m)
- tokens[start:end] = [('',make_slots(name, fields))]
+if __name__ == '__main__':
+
+ ############ Simplistic C scanner ##################################
+ tokenizer = re.compile(
+ r"(?P<preproc>#.*\n)"
+ r"|(?P<comment>/\*.*?\*/)"
+ r"|(?P<ident>[a-zA-Z_][a-zA-Z0-9_]*)"
+ r"|(?P<ws>[ \t\n]+)"
+ r"|(?P<other>.)",
+ re.MULTILINE)
+
+ tokens = []
+ source = sys.stdin.read()
+ pos = 0
+ while pos != len(source):
+ m = tokenizer.match(source, pos)
+ tokens.append([m.lastgroup, m.group()])
+ pos += len(tokens[-1][1])
+ if tokens[-1][0] == 'preproc':
+ # continuation lines are considered
+ # only in preprocess statements
+ while tokens[-1][1].endswith('\\\n'):
+ nl = source.find('\n', pos)
+ if nl == -1:
+ line = source[pos:]
+ else:
+ line = source[pos:nl+1]
+ tokens[-1][1] += line
+ pos += len(line)
+
+ # Main loop: replace all static PyTypeObjects until
+ # there are none left.
+ while 1:
+ c = classify()
+ m = re.search('(SW)?TWIW?=W?{.*?};', c)
+ if not m:
+ break
+ start = m.start()
+ end = m.end()
+ name, fields = get_fields(start, m)
+ tokens[start:end] = [('',make_slots(name, fields))]
-# Output result to stdout
-for t, v in tokens:
- sys.stdout.write(v)
+ # Output result to stdout
+ for t, v in tokens:
+ sys.stdout.write(v)
diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py
index 443f052c4e..7a8660356a 100755
--- a/Tools/scripts/find_recursionlimit.py
+++ b/Tools/scripts/find_recursionlimit.py
@@ -106,14 +106,16 @@ def check_limit(n, test_func_name):
else:
print("Yikes!")
-limit = 1000
-while 1:
- check_limit(limit, "test_recurse")
- check_limit(limit, "test_add")
- check_limit(limit, "test_repr")
- check_limit(limit, "test_init")
- check_limit(limit, "test_getattr")
- check_limit(limit, "test_getitem")
- check_limit(limit, "test_cpickle")
- print("Limit of %d is fine" % limit)
- limit = limit + 100
+if __name__ == '__main__':
+
+ limit = 1000
+ while 1:
+ check_limit(limit, "test_recurse")
+ check_limit(limit, "test_add")
+ check_limit(limit, "test_repr")
+ check_limit(limit, "test_init")
+ check_limit(limit, "test_getattr")
+ check_limit(limit, "test_getitem")
+ check_limit(limit, "test_cpickle")
+ print("Limit of %d is fine" % limit)
+ limit = limit + 100
diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py
index 77607ce137..a494a480f0 100755
--- a/Tools/scripts/findnocoding.py
+++ b/Tools/scripts/findnocoding.py
@@ -76,29 +76,31 @@ usage = """Usage: %s [-cd] paths...
-c: recognize Python source files trying to compile them
-d: debug output""" % sys.argv[0]
-try:
- opts, args = getopt.getopt(sys.argv[1:], 'cd')
-except getopt.error as msg:
- print(msg, file=sys.stderr)
- print(usage, file=sys.stderr)
- sys.exit(1)
-
-is_python = pysource.looks_like_python
-debug = False
-
-for o, a in opts:
- if o == '-c':
- is_python = pysource.can_be_compiled
- elif o == '-d':
- debug = True
-
-if not args:
- print(usage, file=sys.stderr)
- sys.exit(1)
-
-for fullpath in pysource.walk_python_files(args, is_python):
- if debug:
- print("Testing for coding: %s" % fullpath)
- result = needs_declaration(fullpath)
- if result:
- print(fullpath)
+if __name__ == '__main__':
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'cd')
+ except getopt.error as msg:
+ print(msg, file=sys.stderr)
+ print(usage, file=sys.stderr)
+ sys.exit(1)
+
+ is_python = pysource.looks_like_python
+ debug = False
+
+ for o, a in opts:
+ if o == '-c':
+ is_python = pysource.can_be_compiled
+ elif o == '-d':
+ debug = True
+
+ if not args:
+ print(usage, file=sys.stderr)
+ sys.exit(1)
+
+ for fullpath in pysource.walk_python_files(args, is_python):
+ if debug:
+ print("Testing for coding: %s" % fullpath)
+ result = needs_declaration(fullpath)
+ if result:
+ print(fullpath)
diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py
index 2d4cd1ab00..87e2a0929f 100755
--- a/Tools/scripts/fixcid.py
+++ b/Tools/scripts/fixcid.py
@@ -292,7 +292,7 @@ def addsubst(substfile):
if not words: continue
if len(words) == 3 and words[0] == 'struct':
words[:2] = [words[0] + ' ' + words[1]]
- elif len(words) <> 2:
+ elif len(words) != 2:
err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
continue
if Reverse:
diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py
index 743da72aa8..521960c17d 100755
--- a/Tools/scripts/md5sum.py
+++ b/Tools/scripts/md5sum.py
@@ -20,7 +20,7 @@ file ... : files to sum; '-' or no files means stdin
import sys
import os
import getopt
-import md5
+from hashlib import md5
def sum(*files):
sts = 0
diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py
index 5b0f1c6741..a042d1c24c 100755
--- a/Tools/scripts/parseentities.py
+++ b/Tools/scripts/parseentities.py
@@ -13,7 +13,6 @@
"""
import re,sys
-import TextTools
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
@@ -45,7 +44,7 @@ def writefile(f,defs):
charcode = repr(charcode)
else:
charcode = repr(charcode)
- comment = TextTools.collapse(comment)
+ comment = ' '.join(comment.split())
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
f.write('\n}\n')
diff --git a/Tools/scripts/pdeps.py b/Tools/scripts/pdeps.py
index 938f31c164..f8218ac524 100755
--- a/Tools/scripts/pdeps.py
+++ b/Tools/scripts/pdeps.py
@@ -76,10 +76,9 @@ def process(filename, table):
nextline = fp.readline()
if not nextline: break
line = line[:-1] + nextline
- if m_import.match(line) >= 0:
- (a, b), (a1, b1) = m_import.regs[:2]
- elif m_from.match(line) >= 0:
- (a, b), (a1, b1) = m_from.regs[:2]
+ m_found = m_import.match(line) or m_from.match(line)
+ if m_found:
+ (a, b), (a1, b1) = m_found.regs[:2]
else: continue
words = line[a1:b1].split(',')
# print '#', line, words
@@ -87,6 +86,7 @@ def process(filename, table):
word = word.strip()
if word not in list:
list.append(word)
+ fp.close()
# Compute closure (this is in fact totally general)
@@ -123,7 +123,7 @@ def closure(table):
def inverse(table):
inv = {}
for key in table.keys():
- if not inv.has_key(key):
+ if key not in inv:
inv[key] = []
for item in table[key]:
store(inv, item, key)