summaryrefslogtreecommitdiff
path: root/sphinx/pycode
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2014-03-01 08:18:23 +0100
committerGeorg Brandl <georg@python.org>2014-03-01 08:18:23 +0100
commit2f950d546d7304e7b9d6592d27b47f82b3a424ad (patch)
tree54b9419c78a5f725508241c48a1ca731ee9317fa /sphinx/pycode
parent3c649bfde0126d72894989506c40bb8ae35d7d23 (diff)
parent4047fe8184c2984241b92754b6e6d6b639b8d09b (diff)
downloadsphinx-2f950d546d7304e7b9d6592d27b47f82b3a424ad.tar.gz
Update copyright year.
Diffstat (limited to 'sphinx/pycode')
-rw-r--r--sphinx/pycode/__init__.py13
-rw-r--r--sphinx/pycode/pgen2/driver.py2
-rw-r--r--sphinx/pycode/pgen2/grammar.py13
-rw-r--r--sphinx/pycode/pgen2/literals.py3
-rw-r--r--sphinx/pycode/pgen2/parse.c89
-rw-r--r--sphinx/pycode/pgen2/pgen.py17
-rw-r--r--sphinx/pycode/pgen2/tokenize.py9
7 files changed, 33 insertions, 113 deletions
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index 7a6f59b1..b735fb31 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -8,6 +8,7 @@
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import print_function
import sys
from os import path
@@ -17,7 +18,7 @@ from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source, detect_encoding
-from sphinx.util.pycompat import next, StringIO, BytesIO, TextIOWrapper
+from sphinx.util.pycompat import StringIO, BytesIO, TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
@@ -182,7 +183,7 @@ class ModuleAnalyzer(object):
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
- except Exception, err:
+ except Exception as err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
@@ -202,7 +203,7 @@ class ModuleAnalyzer(object):
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
- except PycodeError, err:
+ except PycodeError as err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
@@ -245,7 +246,7 @@ class ModuleAnalyzer(object):
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
- except tokenize.TokenError, err:
+ except tokenize.TokenError as err:
raise PycodeError('tokenizing failed', err)
self.source.close()
@@ -256,7 +257,7 @@ class ModuleAnalyzer(object):
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
- except parse.ParseError, err:
+ except parse.ParseError as err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
@@ -344,4 +345,4 @@ if __name__ == '__main__':
pprint.pprint(ma.find_tags())
x3 = time.time()
#print nodes.nice_repr(ma.parsetree, number2name)
- print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2)
+ print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2))
diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py
index 422671db..c531edb3 100644
--- a/sphinx/pycode/pgen2/driver.py
+++ b/sphinx/pycode/pgen2/driver.py
@@ -131,7 +131,7 @@ def load_grammar(gt="Grammar.txt", gp=None,
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
- except IOError, e:
+ except IOError as e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py
index 01d84346..91874fa2 100644
--- a/sphinx/pycode/pgen2/grammar.py
+++ b/sphinx/pycode/pgen2/grammar.py
@@ -11,6 +11,7 @@ token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
+from __future__ import print_function
# Python imports
import pickle
@@ -100,17 +101,17 @@ class Grammar(object):
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
- print "s2n"
+ print("s2n")
pprint(self.symbol2number)
- print "n2s"
+ print("n2s")
pprint(self.number2symbol)
- print "states"
+ print("states")
pprint(self.states)
- print "dfas"
+ print("dfas")
pprint(self.dfas)
- print "labels"
+ print("labels")
pprint(self.labels)
- print "start", self.start
+ print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
index d4893702..ce4a0ebc 100644
--- a/sphinx/pycode/pgen2/literals.py
+++ b/sphinx/pycode/pgen2/literals.py
@@ -4,6 +4,7 @@
# Extended to handle raw and unicode literals by Georg Brandl.
"""Safely evaluate Python string literals without using eval()."""
+from __future__ import print_function
import re
@@ -89,7 +90,7 @@ def test():
s = repr(c)
e = evalString(s)
if e != c:
- print i, c, s, e
+ print(i, c, s, e)
if __name__ == "__main__":
diff --git a/sphinx/pycode/pgen2/parse.c b/sphinx/pycode/pgen2/parse.c
index e09f5058..96fa6c8b 100644
--- a/sphinx/pycode/pgen2/parse.c
+++ b/sphinx/pycode/pgen2/parse.c
@@ -353,95 +353,6 @@ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
-#if PY_VERSION_HEX < 0x02050000
-#ifndef PyAnySet_CheckExact
-
-#define PyAnySet_CheckExact(ob) \
- ((ob)->ob_type == &PySet_Type || \
- (ob)->ob_type == &PyFrozenSet_Type)
-
-#define PySet_New(iterable) \
- PyObject_CallFunctionObjArgs((PyObject *)&PySet_Type, (iterable), NULL)
-
-#define Pyx_PyFrozenSet_New(iterable) \
- PyObject_CallFunctionObjArgs((PyObject *)&PyFrozenSet_Type, (iterable), NULL)
-
-#define PySet_Size(anyset) \
- PyObject_Size((anyset))
-
-#define PySet_Contains(anyset, key) \
- PySequence_Contains((anyset), (key))
-
-#define PySet_Pop(set) \
- PyObject_CallMethod(set, (char *)"pop", NULL)
-
-static INLINE int PySet_Clear(PyObject *set) {
- PyObject *ret = PyObject_CallMethod(set, (char *)"clear", NULL);
- if (!ret) return -1;
- Py_DECREF(ret); return 0;
-}
-
-static INLINE int PySet_Discard(PyObject *set, PyObject *key) {
- PyObject *ret = PyObject_CallMethod(set, (char *)"discard", (char *)"O", key);
- if (!ret) return -1;
- Py_DECREF(ret); return 0;
-}
-
-static INLINE int PySet_Add(PyObject *set, PyObject *key) {
- PyObject *ret = PyObject_CallMethod(set, (char *)"add", (char *)"O", key);
- if (!ret) return -1;
- Py_DECREF(ret); return 0;
-}
-
-#endif /* PyAnySet_CheckExact (<= Py2.4) */
-
-#if PY_VERSION_HEX < 0x02040000
-#ifndef Py_SETOBJECT_H
-#define Py_SETOBJECT_H
-
-static PyTypeObject *__Pyx_PySet_Type = NULL;
-static PyTypeObject *__Pyx_PyFrozenSet_Type = NULL;
-
-#define PySet_Type (*__Pyx_PySet_Type)
-#define PyFrozenSet_Type (*__Pyx_PyFrozenSet_Type)
-
-#define PyAnySet_Check(ob) \
- (PyAnySet_CheckExact(ob) || \
- PyType_IsSubtype((ob)->ob_type, &PySet_Type) || \
- PyType_IsSubtype((ob)->ob_type, &PyFrozenSet_Type))
-
-#define PyFrozenSet_CheckExact(ob) ((ob)->ob_type == &PyFrozenSet_Type)
-
-static int __Pyx_Py23SetsImport(void) {
- PyObject *sets=0, *Set=0, *ImmutableSet=0;
-
- sets = PyImport_ImportModule((char *)"sets");
- if (!sets) goto bad;
- Set = PyObject_GetAttrString(sets, (char *)"Set");
- if (!Set) goto bad;
- ImmutableSet = PyObject_GetAttrString(sets, (char *)"ImmutableSet");
- if (!ImmutableSet) goto bad;
- Py_DECREF(sets);
-
- __Pyx_PySet_Type = (PyTypeObject*) Set;
- __Pyx_PyFrozenSet_Type = (PyTypeObject*) ImmutableSet;
-
- return 0;
-
- bad:
- Py_XDECREF(sets);
- Py_XDECREF(Set);
- Py_XDECREF(ImmutableSet);
- return -1;
-}
-
-#else
-static int __Pyx_Py23SetsImport(void) { return 0; }
-#endif /* !Py_SETOBJECT_H */
-#endif /* < Py2.4 */
-#endif /* < Py2.5 */
-
-
static INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py
index 0a04447d..ab990cef 100644
--- a/sphinx/pycode/pgen2/pgen.py
+++ b/sphinx/pycode/pgen2/pgen.py
@@ -1,7 +1,10 @@
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+from __future__ import print_function
+
# Pgen imports
+
from sphinx.pycode.pgen2 import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
@@ -203,10 +206,10 @@ class ParserGenerator(object):
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
- print "Dump of NFA for", name
+ print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
- print " State", i, state is finish and "(final)" or ""
+ print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
@@ -214,16 +217,16 @@ class ParserGenerator(object):
j = len(todo)
todo.append(next)
if label is None:
- print " -> %d" % j
+ print(" -> %d" % j)
else:
- print " %s -> %d" % (label, j)
+ print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
- print "Dump of DFA for", name
+ print("Dump of DFA for", name)
for i, state in enumerate(dfa):
- print " State", i, state.isfinal and "(final)" or ""
+ print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.iteritems():
- print " %s -> %d" % (label, dfa.index(next))
+ print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index 7ad9f012..4e94fa5c 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -23,7 +23,10 @@ Older entry points
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
-each time a new token is found."""
+each time a new token is found.
+"""
+
+from __future__ import print_function
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
@@ -146,8 +149,8 @@ class StopTokenizing(Exception): pass
def printtoken(type, token, scell, ecell, line): # for testing
srow, scol = scell
erow, ecol = ecell
- print "%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token))
+ print("%d,%d-%d,%d:\t%s\t%s" %
+ (srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""